hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
010cb78f3e177c9eac12a7dbe8d7bb50e0e5f26cf63e88ec7e36958f95c883b3 | import collections
import re
import warnings
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref, Subquery
from django.db.models.query_utils import QueryWrapper, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, NotSupportedError
from django.utils.deprecation import RemovedInDjango31Warning
FORCE = object()
class SQLCompiler:
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)')
self._meta_ordering = None
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
# Skip References to the select clause, as all expressions in the
# select clause are already part of the group by.
if not expr.contains_aggregate and not is_ref:
expressions.extend(expr.get_source_expressions())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
if isinstance(expr, Subquery) and not sql.startswith('('):
# Subquery expression from HAVING clause may not contain
# wrapping () because they could be removed when a subquery is
# the "rhs" in an expression (see Subquery._prepare()).
sql = '(%s)' % sql
if (sql, tuple(params)) not in seen:
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.base_table):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key
}
expressions = [pk] + [
expr for expr in expressions
if expr in having or (
getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key and expr.target.model._meta.managed
}
aliases = {expr.alias for expr in pks}
expressions = [
expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases
]
return expressions
def get_select(self):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
cols = self.get_default_columns()
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
try:
sql, params = self.compile(col, select_format=True)
except EmptyResultSet:
# Select a predicate that's always False.
sql, params = '0', ()
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif self.query.get_meta().ordering:
ordering = self.query.get_meta().ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for field in ordering:
if hasattr(field, 'resolve_expression'):
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = order == 'DESC'
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT clause
order_by.append((
OrderBy(self.query.annotations[col], descending=descending),
False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query.extra or col not in self.query.extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator:
src = resolved.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias:
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL('%d' % (idx + 1), ())])
break
else:
raise DatabaseError('ORDER BY term does not match any column in the result set.')
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
if (without_ordering, tuple(params)) in seen:
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format is FORCE or (select_format and not self.query.subquery):
return node.output_field.select_format(self, sql, params)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection)
for query in self.query.combined_queries if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.')
if compiler.get_order_by():
raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.')
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query.set_values((
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
))
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = 'SELECT * FROM ({})'.format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif not features.supports_slicing_ordering_in_compound:
part_sql = '({})'.format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == 'union' or (combinator == 'difference' and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == 'union':
combinator_sql += ' ALL'
braces = '({})' if features.supports_slicing_ordering_in_compound else '{}'
sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts))
result = [' {} '.format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, 'supports_select_{}'.format(combinator)):
raise NotSupportedError('{} is not supported on this database backend.'.format(combinator))
result, params = self.get_combinator_sql(combinator, self.query.combinator_all)
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
result = ['SELECT']
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result += [', '.join(out_cols), 'FROM', *from_]
params.extend(f_params)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit:
raise NotSupportedError(
'LIMIT/OFFSET is not supported with '
'select_for_update on this database backend.'
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
# If it's a NOWAIT/SKIP LOCKED/OF query but the backend
# doesn't support it, raise NotSupportedError to prevent a
# possible deadlock.
if nowait and not self.connection.features.has_select_for_update_nowait:
raise NotSupportedError('NOWAIT is not supported on this database backend.')
elif skip_locked and not self.connection.features.has_select_for_update_skip_locked:
raise NotSupportedError('SKIP LOCKED is not supported on this database backend.')
elif of and not self.connection.features.has_select_for_update_of:
raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.')
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
)
if for_update_part and self.connection.features.for_update_after_from:
result.append(for_update_part)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError('annotate() + distinct(fields) is not implemented.')
order_by = order_by or self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if self._meta_ordering:
# When the deprecation ends, replace with:
# order_by = None
warnings.warn(
"%s QuerySet won't use Meta.ordering in Django 3.1. "
"Add .order_by('%s') to retain the current query." % (
self.query.model.__name__,
"', '".join(self._meta_ordering)
),
RemovedInDjango31Warning,
stacklevel=4,
)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if self.query.explain_query:
result.insert(0, self.connection.ops.explain_query_prefix(
self.query.explain_format,
**self.query.explain_options
))
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limit_offset:
result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark))
if for_update_part and not self.connection.features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if not alias and with_col_aliases:
alias = 'col%d' % index
if alias:
sub_selects.append("%s.%s" % (
self.connection.ops.quote_name('subquery'),
self.connection.ops.quote_name(alias),
))
else:
select_clone = select.relabeled_clone({select.alias: 'subquery'})
subselect, subparams = select_clone.as_sql(self, self.connection)
sub_selects.append(subselect)
sub_params.extend(subparams)
return 'SELECT %s FROM (%s) subquery' % (
', '.join(sub_selects),
' '.join(result),
), tuple(sub_params + params)
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(name)
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == 'DESC'
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name:
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices, self.query._filtered_relations)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.remote_field.model,
'field': f,
'reverse': False,
'local_setter': f.set_cached_value,
'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins([related_field_name], opts, root_alias)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': f.remote_field.set_cached_value,
'remote_setter': f.set_cached_value,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias)
model = join_opts.model
alias = joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
def local_setter(obj, from_obj):
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(obj, from_obj):
setattr(from_obj, name, obj)
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': local_setter,
'remote_setter': remote_setter,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select, opts=model._meta, root_alias=alias,
cur_depth=cur_depth + 1, requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield 'self'
else:
field = klass_info['field']
if klass_info['reverse']:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get('related_klass_infos', [])
)
result = []
invalid_names = []
for name in self.query.select_for_update_of:
parts = [] if name == 'self' else name.split(LOOKUP_SEP)
klass_info = self.klass_info
for part in parts:
for related_klass_info in klass_info.get('related_klass_infos', []):
field = related_klass_info['field']
if related_klass_info['reverse']:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
select_index = klass_info['select_fields'][0]
col = self.select[select_index][0]
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: %s.' % (
', '.join(invalid_names),
', '.join(_get_field_choices()),
)
)
return result
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
for row in result[0]:
if not isinstance(row, str):
yield ' '.join(str(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
return_id = False
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, 'as_sql'):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = '%s', [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
'can only be used to update, not to insert.' % (value, field)
)
if value.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
if value.contains_over_clause:
raise FieldError('Window expressions are not allowed in this query.')
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(ignore_conflicts=self.query.ignore_conflicts)
result = ['%s %s' % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (not self.return_id and self.connection.features.has_bulk_insert)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
ignore_conflicts_suffix_sql = self.connection.ops.ignore_conflicts_suffix_sql(
ignore_conflicts=self.query.ignore_conflicts
)
if self.return_id and self.connection.features.can_return_columns_from_insert:
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += [r_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, return_id=False):
assert not (
return_id and len(self.query.objs) != 1 and
not self.connection.features.can_return_rows_from_bulk_insert
)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not return_id:
return
if self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1:
return self.connection.ops.fetch_returned_insert_ids(cursor)
if self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(
cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column
)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
assert len([t for t in self.query.alias_map if self.query.alias_refcount[t] > 0]) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.base_table)]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
if val.contains_over_clause:
raise FieldError('Window expressions are not allowed in this query.')
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
table = self.query.base_table
result = [
'UPDATE %s SET' % qn(table),
', '.join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(True)
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super().pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation, select_format=FORCE)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
|
5e280ed347ca2f0873aea321899a5fc261a92fd84e3a9ae3fc6913a8400a31b3 | from django.db.backends.base.features import BaseDatabaseFeatures
from django.db.utils import InterfaceError
class DatabaseFeatures(BaseDatabaseFeatures):
interprets_empty_strings_as_nulls = True
has_select_for_update = True
has_select_for_update_nowait = True
has_select_for_update_skip_locked = True
has_select_for_update_of = True
select_for_update_of_column = True
can_return_columns_from_insert = True
can_introspect_autofield = True
supports_subqueries_in_group_by = False
supports_transactions = True
supports_timezones = False
has_native_duration_field = True
can_defer_constraint_checks = True
supports_partially_nullable_unique_constraints = False
truncates_names = True
supports_tablespaces = True
supports_sequence_reset = False
can_introspect_materialized_views = True
can_introspect_time_field = False
atomic_transactions = False
supports_combined_alters = False
nulls_order_largest = True
requires_literal_defaults = True
closed_cursor_error_class = InterfaceError
bare_select_suffix = " FROM DUAL"
# select for update with limit can be achieved on Oracle, but not with the current backend.
supports_select_for_update_with_limit = False
supports_temporal_subtraction = True
# Oracle doesn't ignore quoted identifiers case but the current backend
# does by uppercasing all identifiers.
ignores_table_name_case = True
supports_index_on_text_field = False
has_case_insensitive_like = False
create_test_procedure_without_params_sql = """
CREATE PROCEDURE "TEST_PROCEDURE" AS
V_I INTEGER;
BEGIN
V_I := 1;
END;
"""
create_test_procedure_with_int_param_sql = """
CREATE PROCEDURE "TEST_PROCEDURE" (P_I INTEGER) AS
V_I INTEGER;
BEGIN
V_I := P_I;
END;
"""
supports_callproc_kwargs = True
supports_over_clause = True
supports_ignore_conflicts = False
max_query_params = 2**16 - 1
supports_partial_indexes = False
supports_slicing_ordering_in_compound = True
|
67d76a99c38a0f4e9fd9eb974de3b6cc3802d7624a7c8d8e8c4f798669a6109a | """
Oracle database backend for Django.
Requires cx_Oracle: https://oracle.github.io/python-cx_Oracle/
"""
import datetime
import decimal
import os
import platform
from contextlib import contextmanager
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils.encoding import force_bytes, force_str
from django.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.AL32UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
from .utils import Oracle_datetime # NOQA isort:skip
from .validation import DatabaseValidation # NOQA isort:skip
@contextmanager
def wrap_oracle_errors():
try:
yield
except Database.DatabaseError as e:
# cx_Oracle raises a cx_Oracle.DatabaseError exception with the
# following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# Convert that case to Django's IntegrityError exception.
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') and x.code == 2091 and 'ORA-02291' in x.message:
raise utils.IntegrityError(*tuple(e.args))
raise
class _UninitializedOperatorsDescriptor:
def __get__(self, instance, cls=None):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
display_name = 'Oracle'
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11) GENERATED BY DEFAULT ON NULL AS IDENTITY',
'BigAutoField': 'NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'INTERVAL DAY(9) TO SECOND(6)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '%(qn_column)s IN (0,1)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
# Oracle doesn't support a database index on these columns.
_limited_data_types = ('clob', 'nclob', 'blob')
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = {
**_standard_operators,
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, %, _)
# should be escaped on the database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
'contains': "'%%' || {} || '%%'",
'icontains': "'%%' || UPPER({}) || '%%'",
'startswith': "{} || '%%'",
'istartswith': "UPPER({}) || '%%'",
'endswith': "'%%' || {}",
'iendswith': "'%%' || UPPER({})",
}
_standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()}
_likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'"
for k, v in _pattern_ops.items()}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
validation_class = DatabaseValidation
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_columns_from_insert = use_returning_into
def _dsn(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT']:
return Database.makedsn(settings_dict['HOST'], int(settings_dict['PORT']), settings_dict['NAME'])
return settings_dict['NAME']
def _connect_string(self):
return '%s/\\"%s\\"@%s' % (self.settings_dict['USER'], self.settings_dict['PASSWORD'], self._dsn())
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
return Database.connect(
user=self.settings_dict['USER'],
password=self.settings_dict['PASSWORD'],
dsn=self._dsn(),
**conn_params,
)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set Oracle date to ANSI date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" +
(" TIME_ZONE = 'UTC'" if settings.USE_TZ else '')
)
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except Database.DatabaseError:
self.operators = self._likec_operators
self.pattern_ops = self._likec_pattern_ops
else:
self.operators = self._standard_operators
self.pattern_ops = self._standard_pattern_ops
cursor.close()
self.connection.stmtcachesize = 20
# Ensure all changes are preserved even when AUTOCOMMIT is False.
if not self.get_autocommit():
self.commit()
def create_cursor(self, name=None):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
with wrap_oracle_errors():
return self.connection.commit()
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
def _savepoint_commit(self, sid):
if self.queries_logged:
self.queries_log.append({
'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid),
'time': '0.000',
})
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def oracle_version(self):
with self.temporary_connection():
return tuple(int(x) for x in self.connection.version.split('.'))
class OracleParam:
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
param = Oracle_datetime.from_datetime(param)
string_size = 0
# Oracle doesn't recognize True and False correctly.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, (Database.Binary, datetime.timedelta)):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = force_str(param, cursor.charset, strings_only)
if isinstance(self.force_bytes, str):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
elif isinstance(param, datetime.datetime):
self.input_size = Database.TIMESTAMP
else:
self.input_size = None
class VariableWrapper:
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instantiate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class FormatStylePlaceholderCursor:
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
self.cursor.outputtypehandler = self._output_type_handler
@staticmethod
def _output_number_converter(value):
return decimal.Decimal(value) if '.' in value else int(value)
@staticmethod
def _get_decimal_converter(precision, scale):
if scale == 0:
return int
context = decimal.Context(prec=precision)
quantize_value = decimal.Decimal(1).scaleb(-scale)
return lambda v: decimal.Decimal(v).quantize(quantize_value, context=context)
@staticmethod
def _output_type_handler(cursor, name, defaultType, length, precision, scale):
"""
Called for each db column fetched from cursors. Return numbers as the
appropriate Python type.
"""
if defaultType == Database.NUMBER:
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point.
# This will normally be an integer from a sequence,
# but it could be a decimal value.
outconverter = FormatStylePlaceholderCursor._output_number_converter
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
outconverter = float
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntegerField and DecimalField columns.
outconverter = FormatStylePlaceholderCursor._get_decimal_converter(precision, scale)
else:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
outconverter = FormatStylePlaceholderCursor._output_number_converter
return cursor.var(
Database.STRING,
size=255,
arraysize=cursor.arraysize,
outconverter=outconverter,
)
def _format_params(self, params):
try:
return {k: OracleParam(v, self, True) for k, v in params.items()}
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
if sizes:
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
if sizes:
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return {k: v.force_bytes for k, v in params.items()}
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params, unify_by_values=False):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
elif hasattr(params, 'keys'):
# Handle params as dict
args = {k: ":%s" % k for k in params}
query = query % args
elif unify_by_values and params:
# Handle params as a dict with unified query parameters by their
# values. It can be used only in single query execute() because
# executemany() shares the formatted query with each of the params
# list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75]
# params_dict = {0.75: ':arg0', 2: ':arg1', 'sth': ':arg2'}
# args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0']
# params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'}
params_dict = {param: ':arg%d' % i for i, param in enumerate(set(params))}
args = [params_dict[param] for param in params]
params = {value: key for key, value in params_dict.items()}
query = query % tuple(args)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = query % tuple(args)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params, unify_by_values=True)
self._guess_input_sizes([params])
with wrap_oracle_errors():
return self.cursor.execute(query, self._param_generator(params))
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
with wrap_oracle_errors():
return self.cursor.executemany(query, [self._param_generator(p) for p in formatted])
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
|
0ae79f3c5fda00d779fb9ba637a75a4f50edca5e5c9affc0a840937fcbd8fcb2 | import datetime
import re
import uuid
from functools import lru_cache
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.utils import strip_quotes, truncate_name
from django.db.utils import DatabaseError
from django.utils import timezone
from django.utils.encoding import force_bytes
from django.utils.functional import cached_property
from .base import Database
from .utils import BulkInsertMapper, InsertIdVar, Oracle_datetime
class DatabaseOperations(BaseDatabaseOperations):
# Oracle uses NUMBER(11) and NUMBER(19) for integer fields.
integer_field_ranges = {
'SmallIntegerField': (-99999999999, 99999999999),
'IntegerField': (-99999999999, 99999999999),
'BigIntegerField': (-9999999999999999999, 9999999999999999999),
'PositiveSmallIntegerField': (0, 99999999999),
'PositiveIntegerField': (0, 99999999999),
}
set_operators = {**BaseDatabaseOperations.set_operators, 'difference': 'MINUS'}
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
_sequence_reset_sql = """
DECLARE
table_value integer;
seq_value integer;
seq_name user_tab_identity_cols.sequence_name%%TYPE;
BEGIN
BEGIN
SELECT sequence_name INTO seq_name FROM user_tab_identity_cols
WHERE table_name = '%(table_name)s' AND
column_name = '%(column_name)s';
EXCEPTION WHEN NO_DATA_FOUND THEN
seq_name := '%(no_autofield_sequence_name)s';
END;
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = seq_name;
WHILE table_value > seq_value LOOP
EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL'
INTO seq_value;
END LOOP;
END;
/"""
# Oracle doesn't support string without precision; use the max string size.
cast_char_field_without_max_length = 'NVARCHAR2(2000)'
cast_data_types = {
'AutoField': 'NUMBER(11)',
'BigAutoField': 'NUMBER(19)',
'TextField': cast_char_field_without_max_length,
}
def cache_key_culling_sql(self):
return 'SELECT cache_key FROM %s ORDER BY cache_key OFFSET %%s ROWS FETCH FIRST 1 ROWS ONLY'
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
elif lookup_type == 'week':
# IW = ISO week number
return "TO_CHAR(%s, 'IW')" % field_name
elif lookup_type == 'quarter':
return "TO_CHAR(%s, 'Q')" % field_name
elif lookup_type == 'iso_year':
return "TO_CHAR(%s, 'IYYY')" % field_name
else:
# https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/EXTRACT-datetime.html
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
# https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html
if lookup_type in ('year', 'month'):
return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'quarter':
return "TRUNC(%s, 'Q')" % field_name
elif lookup_type == 'week':
return "TRUNC(%s, 'IW')" % field_name
else:
return "TRUNC(%s)" % field_name
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = re.compile(r'^[\w/:+-]+$')
def _convert_field_to_tz(self, field_name, tzname):
if not settings.USE_TZ:
return field_name
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE
# and cast it back to TIMESTAMP to strip the TIME ZONE details.
return "CAST((FROM_TZ(%s, '0:00') AT TIME ZONE '%s') AS TIMESTAMP)" % (field_name, tzname)
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return 'TRUNC(%s)' % field_name
def datetime_cast_time_sql(self, field_name, tzname):
# Since `TimeField` values are stored as TIMESTAMP where only the date
# part is ignored, convert the field to the specified timezone.
return self._convert_field_to_tz(field_name, tzname)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
# https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'quarter':
sql = "TRUNC(%s, 'Q')" % field_name
elif lookup_type == 'week':
sql = "TRUNC(%s, 'IW')" % field_name
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision.
return sql
def time_trunc_sql(self, lookup_type, field_name):
# The implementation is similar to `datetime_trunc_sql` as both
# `DateTimeField` and `TimeField` are stored as TIMESTAMP where
# the date part of the later is ignored.
if lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
elif lookup_type == 'second':
sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision.
return sql
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type == 'BinaryField':
converters.append(self.convert_binaryfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
if settings.USE_TZ:
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
# Oracle stores empty strings as null. If the field accepts the empty
# string, undo this to adhere to the Django convention of using
# the empty string instead of null.
if expression.field.empty_strings_allowed:
converters.append(
self.convert_empty_bytes
if internal_type == 'BinaryField' else
self.convert_empty_string
)
return converters
def convert_textfield_value(self, value, expression, connection):
if isinstance(value, Database.LOB):
value = value.read()
return value
def convert_binaryfield_value(self, value, expression, connection):
if isinstance(value, Database.LOB):
value = force_bytes(value.read())
return value
def convert_booleanfield_value(self, value, expression, connection):
if value in (0, 1):
value = bool(value)
return value
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime.
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection):
if isinstance(value, Database.Timestamp):
value = value.date()
return value
def convert_timefield_value(self, value, expression, connection):
if isinstance(value, Database.Timestamp):
value = value.time()
return value
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
@staticmethod
def convert_empty_string(value, expression, connection):
return '' if value is None else value
@staticmethod
def convert_empty_bytes(value, expression, connection):
return b'' if value is None else value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def fetch_returned_insert_id(self, cursor):
value = cursor._insert_id_var.getvalue()
if value is None or value == []:
# cx_Oracle < 6.3 returns None, >= 6.3 returns empty list.
raise DatabaseError(
'The database did not return a new row id. Probably "ORA-1403: '
'no data found" was raised internally but was hidden by the '
'Oracle OCI library (see https://code.djangoproject.com/ticket/28859).'
)
# cx_Oracle < 7 returns value, >= 7 returns list with single value.
return value[0] if isinstance(value, list) else value
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def no_limit_value(self):
return None
def limit_offset_sql(self, low_mark, high_mark):
fetch, offset = self._get_limit_offset_params(low_mark, high_mark)
return '%s%s' % (
(' OFFSET %d ROWS' % offset) if offset else '',
(' FETCH FIRST %d ROWS ONLY' % fetch) if fetch else '',
)
def last_executed_query(self, cursor, sql, params):
# https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
# Unlike Psycopg's `query` and MySQLdb`'s `_executed`, CxOracle's
# `statement` doesn't contain the query parameters. refs #20010.
return super().last_executed_query(cursor, statement, params)
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name)
cursor.execute('"%s".currval' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def pk_default_value(self):
return "NULL"
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return value.read()
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % truncate_name(name.upper(), self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%', '%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def __foreign_key_constraints(self, table_name, recursive):
with self.connection.cursor() as cursor:
if recursive:
cursor.execute("""
SELECT
user_tables.table_name, rcons.constraint_name
FROM
user_tables
JOIN
user_constraints cons
ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U'))
LEFT JOIN
user_constraints rcons
ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R')
START WITH user_tables.table_name = UPPER(%s)
CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name
GROUP BY
user_tables.table_name, rcons.constraint_name
HAVING user_tables.table_name != UPPER(%s)
ORDER BY MAX(level) DESC
""", (table_name, table_name))
else:
cursor.execute("""
SELECT
cons.table_name, cons.constraint_name
FROM
user_constraints cons
WHERE
cons.constraint_type = 'R'
AND cons.table_name = UPPER(%s)
""", (table_name,))
return cursor.fetchall()
@cached_property
def _foreign_key_constraints(self):
# 512 is large enough to fit the ~330 tables (as of this writing) in
# Django's test suite.
return lru_cache(maxsize=512)(self.__foreign_key_constraints)
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
truncated_tables = {table.upper() for table in tables}
constraints = set()
# Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE
# foreign keys which Django doesn't define. Emulate the
# PostgreSQL behavior which truncates all dependent tables by
# manually retrieving all foreign key constraints and resolving
# dependencies.
for table in tables:
for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade):
if allow_cascade:
truncated_tables.add(foreign_table)
constraints.add((foreign_table, constraint))
sql = [
"%s %s %s %s %s %s %s %s;" % (
style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
style.SQL_KEYWORD('DISABLE'),
style.SQL_KEYWORD('CONSTRAINT'),
style.SQL_FIELD(self.quote_name(constraint)),
style.SQL_KEYWORD('KEEP'),
style.SQL_KEYWORD('INDEX'),
) for table, constraint in constraints
] + [
"%s %s %s;" % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
) for table in truncated_tables
] + [
"%s %s %s %s %s %s;" % (
style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
style.SQL_KEYWORD('ENABLE'),
style.SQL_KEYWORD('CONSTRAINT'),
style.SQL_FIELD(self.quote_name(constraint)),
) for table, constraint in constraints
]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table'])
table = self.quote_name(sequence_info['table'])
column = self.quote_name(sequence_info['column'] or 'id')
query = self._sequence_reset_sql % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': strip_quotes(column),
}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = self._sequence_reset_sql
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table)
table = self.quote_name(model._meta.db_table)
column = self.quote_name(f.column)
output.append(query % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': strip_quotes(column),
})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.remote_field.through:
no_autofield_sequence_name = self._get_no_autofield_sequence_name(f.m2m_db_table())
table = self.quote_name(f.m2m_db_table())
column = self.quote_name('id')
output.append(query % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': 'ID',
})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
The default implementation transforms the date to text, but that is not
necessary for Oracle.
"""
return value
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
"""
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return Oracle_datetime.from_datetime(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
if isinstance(value, str):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return Oracle_datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def combine_expression(self, connector, sub_expressions):
lhs, rhs = sub_expressions
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs}
elif connector == '<<':
return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
elif connector == '>>':
return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
elif connector == '^':
return 'POWER(%s)' % ','.join(sub_expressions)
return super().combine_expression(connector, sub_expressions)
def _get_no_autofield_sequence_name(self, table):
"""
Manually created sequence name to keep backward compatibility for
AutoFields that aren't Oracle identity columns.
"""
name_length = self.max_name_length() - 3
return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper()
def _get_sequence_name(self, cursor, table, pk_name):
cursor.execute("""
SELECT sequence_name
FROM user_tab_identity_cols
WHERE table_name = UPPER(%s)
AND column_name = UPPER(%s)""", [table, pk_name])
row = cursor.fetchone()
return self._get_no_autofield_sequence_name(table) if row is None else row[0]
def bulk_insert_sql(self, fields, placeholder_rows):
query = []
for row in placeholder_rows:
select = []
for i, placeholder in enumerate(row):
# A model without any fields has fields=[None].
if fields[i]:
internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type()
placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder
# Add columns aliases to the first select to avoid "ORA-00918:
# column ambiguously defined" when two or more columns in the
# first select have the same value.
if not query:
placeholder = '%s col_%s' % (placeholder, i)
select.append(placeholder)
query.append('SELECT %s FROM DUAL' % ', '.join(select))
# Bulk insert to tables with Oracle identity columns causes Oracle to
# add sequence.nextval to it. Sequence.nextval cannot be used with the
# UNION operator. To prevent incorrect SQL, move UNION to a subquery.
return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query)
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == 'DateField':
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "NUMTODSINTERVAL(TO_NUMBER(%s - %s), 'DAY')" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return super().subtract_temporals(internal_type, lhs, rhs)
def bulk_batch_size(self, fields, objs):
"""Oracle restricts the number of parameters in a query."""
if fields:
return self.connection.features.max_query_params // len(fields)
return len(objs)
|
8374b03264bc113f4b7763e94b5b790ba6c89fe4af88599930d88d0e1433d776 | import copy
import datetime
import re
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.utils import DatabaseError
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_alter_column_null = "MODIFY %(column)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s NOT NULL"
sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s"
sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s"
sql_create_column_inline_fk = 'CONSTRAINT %(name)s REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s'
sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
def quote_value(self, value):
if isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
return "'%s'" % value
elif isinstance(value, str):
return "'%s'" % value.replace("\'", "\'\'")
elif isinstance(value, (bytes, bytearray, memoryview)):
return "'%s'" % value.hex()
elif isinstance(value, bool):
return "1" if value else "0"
else:
return str(value)
def remove_field(self, model, field):
# If the column is an identity column, drop the identity before
# removing the field.
if self._is_identity_column(model._meta.db_table, field.column):
self._drop_identity(model._meta.db_table, field.column)
super().remove_field(model, field)
def delete_model(self, model):
# Run superclass action
super().delete_model(model)
# Clean up manually created sequence.
self.execute("""
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(1) INTO i FROM USER_SEQUENCES
WHERE SEQUENCE_NAME = '%(sq_name)s';
IF i = 1 THEN
EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % {'sq_name': self.connection.ops._get_no_autofield_sequence_name(model._meta.db_table)})
def alter_field(self, model, old_field, new_field, strict=False):
try:
super().alter_field(model, old_field, new_field, strict)
except DatabaseError as e:
description = str(e)
# If we're changing type to an unsupported type we need a
# SQLite-ish workaround
if 'ORA-22858' in description or 'ORA-22859' in description:
self._alter_field_type_workaround(model, old_field, new_field)
# If an identity column is changing to a non-numeric type, drop the
# identity first.
elif 'ORA-30675' in description:
self._drop_identity(model._meta.db_table, old_field.column)
self.alter_field(model, old_field, new_field, strict)
# If a primary key column is changing to an identity column, drop
# the primary key first.
elif 'ORA-30673' in description and old_field.primary_key:
self._delete_primary_key(model, strict=True)
self._alter_field_type_workaround(model, old_field, new_field)
else:
raise
def _alter_field_type_workaround(self, model, old_field, new_field):
"""
Oracle refuses to change from some type to other type.
What we need to do instead is:
- Add a nullable version of the desired field with a temporary name. If
the new column is an auto field, then the temporary column can't be
nullable.
- Update the table to transfer values from old to new
- Drop old column
- Rename the new column and possibly drop the nullable property
"""
# Make a new field that's like the new one but with a temporary
# column name.
new_temp_field = copy.deepcopy(new_field)
new_temp_field.null = (new_field.get_internal_type() not in ('AutoField', 'BigAutoField'))
new_temp_field.column = self._generate_temp_name(new_field.column)
# Add it
self.add_field(model, new_temp_field)
# Explicit data type conversion
# https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf
# /Data-Type-Comparison-Rules.html#GUID-D0C5A47E-6F93-4C2D-9E49-4F2B86B359DD
new_value = self.quote_name(old_field.column)
old_type = old_field.db_type(self.connection)
if re.match('^N?CLOB', old_type):
new_value = "TO_CHAR(%s)" % new_value
old_type = 'VARCHAR2'
if re.match('^N?VARCHAR2', old_type):
new_internal_type = new_field.get_internal_type()
if new_internal_type == 'DateField':
new_value = "TO_DATE(%s, 'YYYY-MM-DD')" % new_value
elif new_internal_type == 'DateTimeField':
new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
elif new_internal_type == 'TimeField':
# TimeField are stored as TIMESTAMP with a 1900-01-01 date part.
new_value = "TO_TIMESTAMP(CONCAT('1900-01-01 ', %s), 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
# Transfer values across
self.execute("UPDATE %s set %s=%s" % (
self.quote_name(model._meta.db_table),
self.quote_name(new_temp_field.column),
new_value,
))
# Drop the old field
self.remove_field(model, old_field)
# Rename and possibly make the new field NOT NULL
super().alter_field(model, new_temp_field, new_field)
def normalize_name(self, name):
"""
Get the properly shortened and uppercased identifier as returned by
quote_name() but without the quotes.
"""
nn = self.quote_name(name)
if nn[0] == '"' and nn[-1] == '"':
nn = nn[1:-1]
return nn
def _generate_temp_name(self, for_name):
"""Generate temporary names for workarounds that need temp columns."""
suffix = hex(hash(for_name)).upper()[1:]
return self.normalize_name(for_name + "_" + suffix)
def prepare_default(self, value):
return self.quote_value(value)
def _field_should_be_indexed(self, model, field):
create_index = super()._field_should_be_indexed(model, field)
db_type = field.db_type(self.connection)
if db_type is not None and db_type.lower() in self.connection._limited_data_types:
return False
return create_index
def _unique_should_be_added(self, old_field, new_field):
return (
super()._unique_should_be_added(old_field, new_field) and
not self._field_became_primary_key(old_field, new_field)
)
def _is_identity_column(self, table_name, column_name):
with self.connection.cursor() as cursor:
cursor.execute("""
SELECT
CASE WHEN identity_column = 'YES' THEN 1 ELSE 0 END
FROM user_tab_cols
WHERE table_name = %s AND
column_name = %s
""", [self.normalize_name(table_name), self.normalize_name(column_name)])
row = cursor.fetchone()
return row[0] if row else False
def _drop_identity(self, table_name, column_name):
self.execute('ALTER TABLE %(table)s MODIFY %(column)s DROP IDENTITY' % {
'table': self.quote_name(table_name),
'column': self.quote_name(column_name),
})
|
4988326d9b81892ec3b94941cde98b51ce66521a0743a4cc8f8e0d306623ce87 | from django.db.utils import ProgrammingError
from django.utils.functional import cached_property
class BaseDatabaseFeatures:
gis_enabled = False
allows_group_by_pk = False
allows_group_by_selected_pks = False
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate NULL rows in a nullable
# unique field? All core backends implement this correctly, but other
# databases such as SQL Server do not.
supports_nullable_unique_constraints = True
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists and some fields are nullable but not all of them?
supports_partially_nullable_unique_constraints = True
can_use_chunked_reads = True
can_return_columns_from_insert = False
can_return_rows_from_bulk_insert = False
has_bulk_insert = True
uses_savepoints = True
can_release_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries_with_in = True
has_select_for_update = False
has_select_for_update_nowait = False
has_select_for_update_skip_locked = False
has_select_for_update_of = False
# Does the database's SELECT FOR UPDATE OF syntax require a column rather
# than a table?
select_for_update_of_column = False
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does the backend truncate names properly when they are too long?
truncates_names = False
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
# Is there a true datatype for uuid?
has_native_uuid_field = False
# Is there a true datatype for timedeltas?
has_native_duration_field = False
# Does the database driver supports same type temporal data subtraction
# by returning the type used to store duration field?
supports_temporal_subtraction = False
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Does the backend order NULL values as largest or smallest?
nulls_order_largest = False
# The database's limit on the number of query parameters.
max_query_params = None
# Can an object have an autoincrement primary key of 0? MySQL says No.
allows_auto_pk_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Can the backend introspect the default value of a column?
can_introspect_default = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Can the backend introspect an AutoField, instead of an IntegerField?
can_introspect_autofield = False
# Can the backend introspect a BigIntegerField, instead of an IntegerField?
can_introspect_big_integer_field = True
# Can the backend introspect an BinaryField, instead of an TextField?
can_introspect_binary_field = True
# Can the backend introspect an DecimalField, instead of an FloatField?
can_introspect_decimal_field = True
# Can the backend introspect a DurationField, instead of a BigIntegerField?
can_introspect_duration_field = True
# Can the backend introspect an IPAddressField, instead of an CharField?
can_introspect_ip_address_field = False
# Can the backend introspect a PositiveIntegerField, instead of an IntegerField?
can_introspect_positive_integer_field = False
# Can the backend introspect a SmallIntegerField, instead of an IntegerField?
can_introspect_small_integer_field = False
# Can the backend introspect a TimeField, instead of a DateTimeField?
can_introspect_time_field = True
# Some backends may not be able to differentiate BigAutoField from other
# fields such as AutoField.
introspected_big_auto_field_type = 'BigAutoField'
# Some backends may not be able to differentiate BooleanField from other
# fields such as IntegerField.
introspected_boolean_field_type = 'BooleanField'
# Can the backend introspect the column order (ASC/DESC) for indexes?
supports_index_column_ordering = True
# Does the backend support introspection of materialized views?
can_introspect_materialized_views = False
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Does it support operations requiring references rename in a transaction?
supports_atomic_references_rename = True
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# Does it support foreign keys?
supports_foreign_keys = True
# Can it create foreign key constraints inline when adding columns?
can_create_inline_fk = True
# Does it support CHECK constraints?
supports_column_check_constraints = True
supports_table_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterized ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
# Does 'a' LIKE 'A' match?
has_case_insensitive_like = True
# Suffix for backends that don't support "SELECT xxx;" queries.
bare_select_suffix = ''
# If NULL is implied on columns without needing to be explicitly specified
implied_column_null = False
# Does the backend support "select for update" queries with limit (and offset)?
supports_select_for_update_with_limit = True
# Does the backend ignore null expressions in GREATEST and LEAST queries unless
# every expression is null?
greatest_least_ignores_nulls = False
# Can the backend clone databases for parallel test execution?
# Defaults to False to allow third-party backends to opt-in.
can_clone_databases = False
# Does the backend consider table names with different casing to
# be equal?
ignores_table_name_case = False
# Place FOR UPDATE right after FROM clause. Used on MSSQL.
for_update_after_from = False
# Combinatorial flags
supports_select_union = True
supports_select_intersection = True
supports_select_difference = True
supports_slicing_ordering_in_compound = False
supports_parentheses_in_compound = True
# Does the database support SQL 2003 FILTER (WHERE ...) in aggregate
# expressions?
supports_aggregate_filter_clause = False
# Does the backend support indexing a TextField?
supports_index_on_text_field = True
# Does the backend support window expressions (expression OVER (...))?
supports_over_clause = False
# Does the backend support CAST with precision?
supports_cast_with_precision = True
# How many second decimals does the database return when casting a value to
# a type with time?
time_cast_precision = 6
# SQL to create a procedure for use by the Django test suite. The
# functionality of the procedure isn't important.
create_test_procedure_without_params_sql = None
create_test_procedure_with_int_param_sql = None
# Does the backend support keyword parameters for cursor.callproc()?
supports_callproc_kwargs = False
# Convert CharField results from bytes to str in database functions.
db_functions_convert_bytes_to_str = False
# What formats does the backend EXPLAIN syntax support?
supported_explain_formats = set()
# Does DatabaseOperations.explain_query_prefix() raise ValueError if
# unknown kwargs are passed to QuerySet.explain()?
validates_explain_options = True
# Does the backend support the default parameter in lead() and lag()?
supports_default_in_lead_lag = True
# Does the backend support ignoring constraint or uniqueness errors during
# INSERT?
supports_ignore_conflicts = True
# Does this backend require casting the results of CASE expressions used
# in UPDATE statements to ensure the expression has the correct type?
requires_casted_case_in_updates = False
# Does the backend support partial indexes (CREATE INDEX ... WHERE ...)?
supports_partial_indexes = True
supports_functions_in_partial_indexes = True
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_explaining_query_execution(self):
"""Does this backend support explaining query execution?"""
return self.connection.ops.explain_prefix is not None
@cached_property
def supports_transactions(self):
"""Confirm support for transactions."""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.set_autocommit(False)
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection.rollback()
self.connection.set_autocommit(True)
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
return count == 0
|
5dfb324ac4a9feed08f18b66673cf96d7f1efd594766b545f726af49f866bf70 | import datetime
import decimal
from importlib import import_module
import sqlparse
from django.conf import settings
from django.db import NotSupportedError, transaction
from django.db.backends import utils
from django.utils import timezone
from django.utils.encoding import force_str
class BaseDatabaseOperations:
"""
Encapsulate backend-specific differences, such as the way a backend
performs ordering or calculates the ID of a recently-inserted row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
'SmallIntegerField': (-32768, 32767),
'IntegerField': (-2147483648, 2147483647),
'BigIntegerField': (-9223372036854775808, 9223372036854775807),
'PositiveSmallIntegerField': (0, 32767),
'PositiveIntegerField': (0, 2147483647),
}
set_operators = {
'union': 'UNION',
'intersection': 'INTERSECT',
'difference': 'EXCEPT',
}
# Mapping of Field.get_internal_type() (typically the model field's class
# name) to the data type to use for the Cast() function, if different from
# DatabaseWrapper.data_types.
cast_data_types = {}
# CharField data type if the max_length argument isn't provided.
cast_char_field_without_max_length = None
# Start and end points for window expressions.
PRECEDING = 'PRECEDING'
FOLLOWING = 'FOLLOWING'
UNBOUNDED_PRECEDING = 'UNBOUNDED ' + PRECEDING
UNBOUNDED_FOLLOWING = 'UNBOUNDED ' + FOLLOWING
CURRENT_ROW = 'CURRENT ROW'
# Prefix for EXPLAIN queries, or None EXPLAIN isn't supported.
explain_prefix = None
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Return any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Return the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Return an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, return the SQL that casts the result of a union
to that type. The resulting string should contain a '%s' placeholder
for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', or 'day', return the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, timedelta):
"""
Implement the date interval functionality for expressions.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', or 'day', return the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_trunc_sql() method.')
def datetime_cast_date_sql(self, field_name, tzname):
"""
Return the SQL to cast a datetime value to date value.
"""
raise NotImplementedError(
'subclasses of BaseDatabaseOperations may require a '
'datetime_cast_date_sql() method.'
)
def datetime_cast_time_sql(self, field_name, tzname):
"""
Return the SQL to cast a datetime value to time value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method')
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or
'second', return the SQL that extracts a value from the given
datetime field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or
'second', return the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method')
def time_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', return the SQL
that truncates the given time field field_name to a time object with
only the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method')
def time_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute', or 'second', return the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, field_name)
def deferrable_sql(self):
"""
Return the SQL to make a constraint "initially deferred" during a
CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields, params):
"""
Return an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only check the given fields for
duplicates.
"""
if fields:
raise NotSupportedError('DISTINCT ON fields is not supported by this database backend')
else:
return ['DISTINCT'], []
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, return the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR') and an internal type
(e.g. 'GenericIPAddressField'), return the SQL to cast it before using
it in a WHERE statement. The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Return a list used in the "ORDER BY" clause to force no ordering at
all. Return an empty list to include nothing in the ordering.
"""
return []
def for_update_sql(self, nowait=False, skip_locked=False, of=()):
"""
Return the FOR UPDATE SQL clause to lock rows for an update operation.
"""
return 'FOR UPDATE%s%s%s' % (
' OF %s' % ', '.join(of) if of else '',
' NOWAIT' if nowait else '',
' SKIP LOCKED' if skip_locked else '',
)
def _get_limit_offset_params(self, low_mark, high_mark):
offset = low_mark or 0
if high_mark is not None:
return (high_mark - offset), offset
elif offset:
return self.connection.ops.no_limit_value(), offset
return None, offset
def limit_offset_sql(self, low_mark, high_mark):
"""Return LIMIT/OFFSET SQL clause."""
limit, offset = self._get_limit_offset_params(low_mark, high_mark)
return '%s%s' % (
(' LIMIT %d' % limit) if limit else '',
(' OFFSET %d' % offset) if offset else '',
)
def last_executed_query(self, cursor, sql, params):
"""
Return a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain string values.
def to_string(s):
return force_str(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_string(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_string(k): to_string(v) for k, v in params.items()}
return "QUERY = %r - PARAMS = %r" % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, return the newly created ID.
`pk_name` is the name of the primary-key column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Return the string to use in a query when performing lookups
("contains", "like", etc.). It should contain a '%s' placeholder for
the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Return the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Return the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Return the value to use for the LIMIT when we are wanting "LIMIT
infinity". Return None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Return the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Take an SQL script that may contain multiple lines and return a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
return [
sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql) if statement
]
def process_clob(self, value):
"""
Return the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part of an
insert query, return the SQL and params to append to the INSERT query.
The returned fragment should contain a format string to hold the
appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Return the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Return a quoted version of the given table, index, or column name. Do
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""Return an SQL expression that returns a random value."""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Return the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). It should contain a '%s'
placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), raise
NotImplementedError.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Return the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Return the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Return the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Return the SQL that will set the connection's time zone.
Return '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Return a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves) and the SQL statements required to reset the sequences
passed in `sequences`.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method')
def execute_sql_flush(self, using, sql_list):
"""Execute a list of SQL statements to flush the database."""
with transaction.atomic(using=using, savepoint=self.connection.features.can_rollback_ddl):
with self.connection.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def sequence_reset_by_name_sql(self, style, sequences):
"""
Return a list of the SQL statements required to reset sequences
passed in `sequences`.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Return a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""Return the SQL statement required to start a transaction."""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""Return the SQL statement required to end a transaction."""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Return the SQL that will be used in a query to define the tablespace.
Return '' if the backend doesn't support tablespaces.
If `inline` is True, append the SQL to a row; otherwise append it to
the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepare a value for use in a LIKE query."""
return str(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). Raise a ValueError if the value is
invalid, otherwise return the validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transform a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
As a consequence it may not work perfectly in all circumstances.
"""
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time):
return self.adapt_timefield_value(value)
elif isinstance(value, decimal.Decimal):
return self.adapt_decimalfield_value(value)
else:
return value
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return str(value)
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return str(value)
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return str(value)
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def adapt_ipaddressfield_value(self, value):
"""
Transform a string representation of an IP address into the expected
type for the backend driver.
"""
return value or None
def year_lookup_bounds_for_date_field(self, value):
"""
Return a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
first = self.adapt_datefield_value(first)
second = self.adapt_datefield_value(second)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Return a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
first = self.adapt_datetimefield_value(first)
second = self.adapt_datetimefield_value(second)
return [first, second]
def get_db_converters(self, expression):
"""
Return a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
"""
return []
def convert_durationfield_value(self, value, expression, connection):
if value is not None:
return datetime.timedelta(0, 0, value)
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotSupportedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""
Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions).
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def binary_placeholder_sql(self, value):
"""
Some backends require special syntax to insert binary content (MySQL
for example uses '_binary %s').
"""
return '%s'
def modify_insert_params(self, placeholder, params):
"""
Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
return a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
def subtract_temporals(self, internal_type, lhs, rhs):
if self.connection.features.supports_temporal_subtraction:
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "(%s - %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
raise NotSupportedError("This backend does not support %s subtraction." % internal_type)
def window_frame_start(self, start):
if isinstance(start, int):
if start < 0:
return '%d %s' % (abs(start), self.PRECEDING)
elif start == 0:
return self.CURRENT_ROW
elif start is None:
return self.UNBOUNDED_PRECEDING
raise ValueError("start argument must be a negative integer, zero, or None, but got '%s'." % start)
def window_frame_end(self, end):
if isinstance(end, int):
if end == 0:
return self.CURRENT_ROW
elif end > 0:
return '%d %s' % (end, self.FOLLOWING)
elif end is None:
return self.UNBOUNDED_FOLLOWING
raise ValueError("end argument must be a positive integer, zero, or None, but got '%s'." % end)
def window_frame_rows_start_end(self, start=None, end=None):
"""
Return SQL for start and end points in an OVER clause window frame.
"""
if not self.connection.features.supports_over_clause:
raise NotSupportedError('This backend does not support window expressions.')
return self.window_frame_start(start), self.window_frame_end(end)
def window_frame_range_start_end(self, start=None, end=None):
return self.window_frame_rows_start_end(start, end)
def explain_query_prefix(self, format=None, **options):
if not self.connection.features.supports_explaining_query_execution:
raise NotSupportedError('This backend does not support explaining query execution.')
if format:
supported_formats = self.connection.features.supported_explain_formats
normalized_format = format.upper()
if normalized_format not in supported_formats:
msg = '%s is not a recognized format.' % normalized_format
if supported_formats:
msg += ' Allowed formats: %s' % ', '.join(sorted(supported_formats))
raise ValueError(msg)
if options:
raise ValueError('Unknown options: %s' % ', '.join(sorted(options.keys())))
return self.explain_prefix
def insert_statement(self, ignore_conflicts=False):
return 'INSERT INTO'
def ignore_conflicts_suffix_sql(self, ignore_conflicts=None):
return ''
|
2377235f9bdb64284b7000ae1a44e94ab97c85faef7bfe0160aac7e6be34f51e | import logging
from datetime import datetime
from django.db.backends.ddl_references import (
Columns, ForeignKeyName, IndexName, Statement, Table,
)
from django.db.backends.utils import names_digest, split_identifier
from django.db.models import Index
from django.db.transaction import TransactionManagementError, atomic
from django.utils import timezone
logger = logging.getLogger('django.db.backends.schema')
def _is_relevant_relation(relation, altered_field):
"""
When altering the given field, must constraints on its model from the given
relation be temporarily dropped?
"""
field = relation.field
if field.many_to_many:
# M2M reverse field
return False
if altered_field.primary_key and field.to_fields == [None]:
# Foreign key constraint on the primary key, which is being altered.
return True
# Is the constraint targeting the field being altered?
return altered_field.name in field.to_fields
def _related_non_m2m_objects(old_field, new_field):
# Filter out m2m objects from reverse relations.
# Return (old_relation, new_relation) tuples.
return zip(
(obj for obj in old_field.model._meta.related_objects if _is_relevant_relation(obj, old_field)),
(obj for obj in new_field.model._meta.related_objects if _is_relevant_relation(obj, new_field))
)
class BaseDatabaseSchemaEditor:
"""
This class and its subclasses are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
sql_unique_constraint = "UNIQUE (%(columns)s)"
sql_check_constraint = "CHECK (%(check)s)"
sql_delete_constraint = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_constraint = "CONSTRAINT %(name)s %(constraint)s"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = sql_delete_constraint
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = sql_delete_constraint
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) "
"REFERENCES %(to_table)s (%(to_column)s)%(deferrable)s"
)
sql_create_inline_fk = None
sql_create_column_inline_fk = None
sql_delete_fk = sql_delete_constraint
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s%(condition)s"
sql_create_unique_index = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(condition)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = sql_delete_constraint
sql_delete_procedure = 'DROP PROCEDURE %(procedure)s'
def __init__(self, connection, collect_sql=False, atomic=True):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
self.atomic_migration = self.connection.features.can_rollback_ddl and atomic
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.atomic_migration:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.atomic_migration:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=()):
"""Execute the given SQL statement, with optional parameters."""
# Don't perform the transactional DDL check if SQL is being collected
# as it's not going to be executed anyway.
if not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl:
raise TransactionManagementError(
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
# Account for non-string statement objects.
sql = str(sql)
# Log the command we're running, then run it
logger.debug("%s; (params %r)", sql, params, extra={'params': params, 'sql': sql})
if self.collect_sql:
ending = "" if sql.endswith(";") else ";"
if params is not None:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending)
else:
self.collected_sql.append(sql + ending)
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Take a field and return its column definition.
The field must already have had set_attributes_from_name() called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
include_default = include_default and not self.skip_default(field)
if include_default:
default_value = self.effective_default(field)
if default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null and not self.connection.features.implied_column_null:
sql += " NULL"
elif not null:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def skip_default(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob).
"""
return False
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError(
'subclasses of BaseDatabaseSchemaEditor for backends which have '
'requires_literal_defaults must provide a prepare_default() method'
)
@staticmethod
def _effective_default(field):
# This method allows testing its logic without a connection.
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
if field.get_internal_type() == "BinaryField":
default = bytes()
else:
default = str()
elif getattr(field, 'auto_now', False) or getattr(field, 'auto_now_add', False):
default = datetime.now()
internal_type = field.get_internal_type()
if internal_type == 'DateField':
default = default.date()
elif internal_type == 'TimeField':
default = default.time()
elif internal_type == 'DateTimeField':
default = timezone.now()
else:
default = None
return default
def effective_default(self, field):
"""Return a field's effective database default value."""
return field.get_db_prep_save(self._effective_default(field), self.connection)
def quote_value(self, value):
"""
Return a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Create a table and any accompanying indexes or unique constraints for
the given `model`.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " " + self.sql_check_constraint % db_params
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK
if field.remote_field and field.db_constraint:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column
if self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
elif self.connection.features.supports_foreign_keys:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() in ("AutoField", "BigAutoField"):
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers (always deferred, as some fields might be
# created afterwards, like geometry fields with some backends)
for fields in model._meta.unique_together:
columns = [model._meta.get_field(field).column for field in fields]
self.deferred_sql.append(self._create_unique_sql(model, columns))
constraints = [constraint.constraint_sql(model, self) for constraint in model._meta.constraints]
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(constraint for constraint in (*column_sqls, *constraints) if constraint),
}
if model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
if tablespace_sql:
sql += ' ' + tablespace_sql
# Prevent using [] as params, in the case a literal '%' is used in the definition
self.execute(sql, params or None)
# Add any field index and index_together's (deferred as SQLite _remake_table needs it)
self.deferred_sql.extend(self._model_indexes_sql(model))
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.create_model(field.remote_field.through)
def delete_model(self, model):
"""Delete a model from the database."""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
# Remove all deferred statements referencing the deleted table.
for sql in list(self.deferred_sql):
if isinstance(sql, Statement) and sql.references_table(model._meta.db_table):
self.deferred_sql.remove(sql)
def add_index(self, model, index):
"""Add an index on a model."""
self.execute(index.create_sql(model, self), params=None)
def remove_index(self, model, index):
"""Remove an index from a model."""
self.execute(index.remove_sql(model, self))
def add_constraint(self, model, constraint):
"""Add a check constraint to a model."""
sql = constraint.create_sql(model, self)
if sql:
self.execute(sql)
def remove_constraint(self, model, constraint):
"""Remove a check constraint from a model."""
sql = constraint.remove_sql(model, self)
if sql:
self.execute(sql)
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deal with a model changing its unique_together. The input
unique_togethers must be doubly-nested, not the single-nested
["foo", "bar"] format.
"""
olds = {tuple(fields) for fields in old_unique_together}
news = {tuple(fields) for fields in new_unique_together}
# Deleted uniques
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field(field).column for field in fields]
self.execute(self._create_unique_sql(model, columns))
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deal with a model changing its index_together. The input
index_togethers must be doubly-nested, not the single-nested
["foo", "bar"] format.
"""
olds = {tuple(fields) for fields in old_index_together}
news = {tuple(fields) for fields in new_index_together}
# Deleted indexes
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index)
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_index_sql(model, fields, suffix="_idx"))
def _delete_composed_index(self, model, fields, constraint_kwargs, sql):
columns = [model._meta.get_field(field).column for field in fields]
constraint_names = self._constraint_names(model, columns, **constraint_kwargs)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(self._delete_constraint_sql(sql, model, constraint_names[0]))
def alter_db_table(self, model, old_db_table, new_db_table):
"""Rename the table a model points to."""
if (old_db_table == new_db_table or
(self.connection.features.ignores_table_name_case and
old_db_table.lower() == new_db_table.lower())):
return
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
# Rename all references to the old table name.
for sql in self.deferred_sql:
if isinstance(sql, Statement):
sql.rename_table_references(old_db_table, new_db_table)
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""Move a model's table between tablespaces."""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Create a field on a model. Usually involves adding a column, but may
involve adding a table instead (for M2M fields).
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " " + self.sql_check_constraint % db_params
if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint:
constraint_suffix = '_fk_%(to_table)s_%(to_column)s'
# Add FK constraint inline, if supported.
if self.sql_create_column_inline_fk:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column
definition += " " + self.sql_create_column_inline_fk % {
'name': self._fk_constraint_name(model, field, constraint_suffix),
'column': self.quote_name(field.column),
'to_table': self.quote_name(to_table),
'to_column': self.quote_name(to_column),
'deferrable': self.connection.ops.deferrable_sql()
}
# Otherwise, add FK constraints later.
else:
self.deferred_sql.append(self._create_fk_sql(model, field, constraint_suffix))
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if not self.skip_default(field) and self.effective_default(field) is not None:
changes_sql, params = self._alter_column_default_sql(model, None, field, drop=True)
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": changes_sql,
}
self.execute(sql, params)
# Add an index, if required
self.deferred_sql.extend(self._field_indexes_sql(model, field))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Remove a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.remote_field:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_fk_sql(model, fk_name))
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
# Remove all deferred statements referencing the deleted column.
for sql in list(self.deferred_sql):
if isinstance(sql, Statement) and sql.references_column(model._meta.db_table, field.column):
self.deferred_sql.remove(sql)
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allow a field's type, uniqueness, nullability, default, column,
constraints, etc. to be modified.
`old_field` is required to compute the necessary changes.
If `strict` is True, raise errors if the old column does not match
`old_field` precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if ((old_type is None and old_field.remote_field is None) or
(new_type is None and new_field.remote_field is None)):
raise ValueError(
"Cannot alter field %s into %s - they do not properly define "
"db_type (are you using a badly-written custom field?)" %
(old_field, new_field),
)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
old_field.remote_field.through._meta.auto_created and
new_field.remote_field.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
not old_field.remote_field.through._meta.auto_created and
not new_field.remote_field.through._meta.auto_created):
# Both sides have through models; this is a no-op.
return
elif old_type is None or new_type is None:
raise ValueError(
"Cannot alter field %s into %s - they are not compatible types "
"(you cannot alter to or from M2M fields, or add or remove "
"through= on M2M fields)" % (old_field, new_field)
)
self._alter_field(model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if old_field.remote_field and old_field.db_constraint:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
fks_dropped.add((old_field.column,))
self.execute(self._delete_fk_sql(model, fk_name))
# Has unique been removed?
if old_field.unique and (not new_field.unique or self._field_became_primary_key(old_field, new_field)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True, primary_key=False)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_unique_sql(model, constraint_name))
# Drop incoming FK constraints if the field is a primary key or unique,
# which might be a to_field target, and things are going to change.
drop_foreign_keys = (
(
(old_field.primary_key and new_field.primary_key) or
(old_field.unique and new_field.unique)
) and old_type != new_type
)
if drop_foreign_keys:
# '_meta.related_field' also contains M2M reverse fields, these
# will be filtered out
for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field):
rel_fk_names = self._constraint_names(
new_rel.related_model, [new_rel.field.column], foreign_key=True
)
for fk_name in rel_fk_names:
self.execute(self._delete_fk_sql(new_rel.related_model, fk_name))
# Removed an index? (no strict check, as multiple indexes are possible)
# Remove indexes if db_index switched to False or a unique constraint
# will now be used in lieu of an index. The following lines from the
# truth table show all True cases; the rest are False:
#
# old_field.db_index | old_field.unique | new_field.db_index | new_field.unique
# ------------------------------------------------------------------------------
# True | False | False | False
# True | False | False | True
# True | False | True | True
if old_field.db_index and not old_field.unique and (not new_field.db_index or new_field.unique):
# Find the index for this field
meta_index_names = {index.name for index in model._meta.indexes}
# Retrieve only BTREE indexes since this is what's created with
# db_index=True.
index_names = self._constraint_names(model, [old_field.column], index=True, type_=Index.suffix)
for index_name in index_names:
if index_name not in meta_index_names:
# The only way to check if an index was created with
# db_index=True or with Index(['field'], name='foo')
# is to look at its name (refs #28053).
self.execute(self._delete_index_sql(model, index_name))
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_check_sql(model, constraint_name))
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type))
# Rename all references to the renamed column.
for sql in self.deferred_sql:
if isinstance(sql, Statement):
sql.rename_column_references(model._meta.db_table, old_field.column, new_field.column)
# Next, start accumulating actions to do
actions = []
null_actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(model, old_field, new_field, new_type)
actions.append(fragment)
post_actions.extend(other_actions)
# When changing a column NULL constraint to NOT NULL with a given
# default value, we need to perform 4 steps:
# 1. Add a default for new incoming writes
# 2. Update existing NULL rows with new default
# 3. Replace NULL constraint with NOT NULL
# 4. Drop the default again.
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
needs_database_default = (
old_field.null and
not new_field.null and
old_default != new_default and
new_default is not None and
not self.skip_default(new_field)
)
if needs_database_default:
actions.append(self._alter_column_default_sql(model, old_field, new_field))
# Nullability change?
if old_field.null != new_field.null:
fragment = self._alter_column_null_sql(model, old_field, new_field)
if fragment:
null_actions.append(fragment)
# Only if we have a default and there is a change from NULL to NOT NULL
four_way_default_alteration = (
new_field.has_default() and
(old_field.null and not new_field.null)
)
if actions or null_actions:
if not four_way_default_alteration:
# If we don't have to do a 4-way default alteration we can
# directly run a (NOT) NULL alteration
actions = actions + null_actions
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters and actions:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), sum(params, []))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if four_way_default_alteration:
# Update existing rows with default value
self.execute(
self.sql_update_with_default % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
)
# Since we didn't run a NOT NULL change before we need to do it
# now
for sql, params in null_actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# If primary_key changed to False, delete the primary key constraint.
if old_field.primary_key and not new_field.primary_key:
self._delete_primary_key(model, strict)
# Added a unique?
if self._unique_should_be_added(old_field, new_field):
self.execute(self._create_unique_sql(model, [new_field.column]))
# Added an index? Add an index if db_index switched to True or a unique
# constraint will no longer be used in lieu of an index. The following
# lines from the truth table show all True cases; the rest are False:
#
# old_field.db_index | old_field.unique | new_field.db_index | new_field.unique
# ------------------------------------------------------------------------------
# False | False | True | False
# False | True | True | False
# True | True | True | False
if (not old_field.db_index or old_field.unique) and new_field.db_index and not new_field.unique:
self.execute(self._create_index_sql(model, [new_field]))
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Changed to become primary key?
if self._field_became_primary_key(old_field, new_field):
# Make the new one
self.execute(self._create_primary_key_sql(model, new_field))
# Update all referencing columns
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Handle our type alters on the other end of rels from the PK stuff above
for old_rel, new_rel in rels_to_update:
rel_db_params = new_rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
fragment, other_actions = self._alter_column_type_sql(
new_rel.related_model, old_rel.field, new_rel.field, rel_type
)
self.execute(
self.sql_alter_column % {
"table": self.quote_name(new_rel.related_model._meta.db_table),
"changes": fragment[0],
},
fragment[1],
)
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (new_field.remote_field and
(fks_dropped or not old_field.remote_field or not old_field.db_constraint) and
new_field.db_constraint):
self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s"))
# Rebuild FKs that pointed to us if we previously had to drop them
if drop_foreign_keys:
for rel in new_field.model._meta.related_objects:
if _is_relevant_relation(rel, new_field) and rel.field.db_constraint:
self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk"))
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
constraint_name = self._create_index_name(model._meta.db_table, [new_field.column], suffix='_check')
self.execute(self._create_check_sql(model, constraint_name, new_db_params['check']))
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if needs_database_default:
changes_sql, params = self._alter_column_default_sql(model, old_field, new_field, drop=True)
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": changes_sql,
}
self.execute(sql, params)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_null_sql(self, model, old_field, new_field):
"""
Hook to specialize column null alteration.
Return a (sql, params) fragment to set a column to null or non-null
as required by new_field, or None if no changes are required.
"""
if (self.connection.features.interprets_empty_strings_as_nulls and
new_field.get_internal_type() in ("CharField", "TextField")):
# The field is nullable in the database anyway, leave it alone.
return
else:
new_db_params = new_field.db_parameters(connection=self.connection)
sql = self.sql_alter_column_null if new_field.null else self.sql_alter_column_not_null
return (
sql % {
'column': self.quote_name(new_field.column),
'type': new_db_params['type'],
},
[],
)
def _alter_column_default_sql(self, model, old_field, new_field, drop=False):
"""
Hook to specialize column default alteration.
Return a (sql, params) fragment to add or drop (depending on the drop
argument) a default to new_field's column.
"""
new_default = self.effective_default(new_field)
default = '%s'
params = [new_default]
if drop:
params = []
elif self.connection.features.requires_literal_defaults:
# Some databases (Oracle) can't take defaults as a parameter
# If this is the case, the SchemaEditor for that database should
# implement prepare_default().
default = self.prepare_default(new_default)
params = []
new_db_params = new_field.db_parameters(connection=self.connection)
sql = self.sql_alter_column_no_default if drop else self.sql_alter_column_default
return (
sql % {
'column': self.quote_name(new_field.column),
'type': new_db_params['type'],
'default': default,
},
params,
)
def _alter_column_type_sql(self, model, old_field, new_field, new_type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Return a two-tuple of: an SQL fragment of (sql, params) to insert into
an ALTER TABLE statement and a list of extra (sql, params) tuples to
run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""Alter M2Ms to repoint their to= endpoints."""
# Rename the through table
if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table:
self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.remote_field.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
)
self.alter_field(
new_field.remote_field.through,
# for self-referential models we need to alter field from the other end too
old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()),
)
def _create_index_name(self, table_name, column_names, suffix=""):
"""
Generate a unique name for an index/unique constraint.
The name is divided into 3 parts: the table name, the column names,
and a unique digest and suffix.
"""
_, table_name = split_identifier(table_name)
hash_suffix_part = '%s%s' % (names_digest(table_name, *column_names, length=8), suffix)
max_length = self.connection.ops.max_name_length() or 200
# If everything fits into max_length, use that name.
index_name = '%s_%s_%s' % (table_name, '_'.join(column_names), hash_suffix_part)
if len(index_name) <= max_length:
return index_name
# Shorten a long suffix.
if len(hash_suffix_part) > max_length / 3:
hash_suffix_part = hash_suffix_part[:max_length // 3]
other_length = (max_length - len(hash_suffix_part)) // 2 - 1
index_name = '%s_%s_%s' % (
table_name[:other_length],
'_'.join(column_names)[:other_length],
hash_suffix_part,
)
# Prepend D if needed to prevent the name from starting with an
# underscore or a number (not permitted on Oracle).
if index_name[0] == "_" or index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _get_index_tablespace_sql(self, model, fields, db_tablespace=None):
if db_tablespace is None:
if len(fields) == 1 and fields[0].db_tablespace:
db_tablespace = fields[0].db_tablespace
elif model._meta.db_tablespace:
db_tablespace = model._meta.db_tablespace
if db_tablespace is not None:
return ' ' + self.connection.ops.tablespace_sql(db_tablespace)
return ''
def _create_index_sql(self, model, fields, *, name=None, suffix='', using='',
db_tablespace=None, col_suffixes=(), sql=None, opclasses=(),
condition=None):
"""
Return the SQL statement to create the index for one or several fields.
`sql` can be specified if the syntax differs from the standard (GIS
indexes, ...).
"""
tablespace_sql = self._get_index_tablespace_sql(model, fields, db_tablespace=db_tablespace)
columns = [field.column for field in fields]
sql_create_index = sql or self.sql_create_index
table = model._meta.db_table
def create_index_name(*args, **kwargs):
nonlocal name
if name is None:
name = self._create_index_name(*args, **kwargs)
return self.quote_name(name)
return Statement(
sql_create_index,
table=Table(table, self.quote_name),
name=IndexName(table, columns, suffix, create_index_name),
using=using,
columns=self._index_columns(table, columns, col_suffixes, opclasses),
extra=tablespace_sql,
condition=(' WHERE ' + condition) if condition else '',
)
def _delete_index_sql(self, model, name):
return Statement(
self.sql_delete_index,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(name),
)
def _index_columns(self, table, columns, col_suffixes, opclasses):
return Columns(table, columns, self.quote_name, col_suffixes=col_suffixes)
def _model_indexes_sql(self, model):
"""
Return a list of all index SQL statements (field indexes,
index_together, Meta.indexes) for the specified model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for field in model._meta.local_fields:
output.extend(self._field_indexes_sql(model, field))
for field_names in model._meta.index_together:
fields = [model._meta.get_field(field) for field in field_names]
output.append(self._create_index_sql(model, fields, suffix="_idx"))
for index in model._meta.indexes:
output.append(index.create_sql(model, self))
return output
def _field_indexes_sql(self, model, field):
"""
Return a list of all index SQL statements for the specified field.
"""
output = []
if self._field_should_be_indexed(model, field):
output.append(self._create_index_sql(model, [field]))
return output
def _field_should_be_indexed(self, model, field):
return field.db_index and not field.unique
def _field_became_primary_key(self, old_field, new_field):
return not old_field.primary_key and new_field.primary_key
def _unique_should_be_added(self, old_field, new_field):
return (not old_field.unique and new_field.unique) or (
old_field.primary_key and not new_field.primary_key and new_field.unique
)
def _rename_field_sql(self, table, old_field, new_field, new_type):
return self.sql_rename_column % {
"table": self.quote_name(table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
}
def _create_fk_sql(self, model, field, suffix):
table = Table(model._meta.db_table, self.quote_name)
name = self._fk_constraint_name(model, field, suffix)
column = Columns(model._meta.db_table, [field.column], self.quote_name)
to_table = Table(field.target_field.model._meta.db_table, self.quote_name)
to_column = Columns(field.target_field.model._meta.db_table, [field.target_field.column], self.quote_name)
deferrable = self.connection.ops.deferrable_sql()
return Statement(
self.sql_create_fk,
table=table,
name=name,
column=column,
to_table=to_table,
to_column=to_column,
deferrable=deferrable,
)
def _fk_constraint_name(self, model, field, suffix):
def create_fk_name(*args, **kwargs):
return self.quote_name(self._create_index_name(*args, **kwargs))
return ForeignKeyName(
model._meta.db_table,
[field.column],
split_identifier(field.target_field.model._meta.db_table)[1],
[field.target_field.column],
suffix,
create_fk_name,
)
def _delete_fk_sql(self, model, name):
return self._delete_constraint_sql(self.sql_delete_fk, model, name)
def _unique_sql(self, model, fields, name, condition=None):
if condition:
# Databases support conditional unique constraints via a unique
# index.
sql = self._create_unique_sql(model, fields, name=name, condition=condition)
if sql:
self.deferred_sql.append(sql)
return None
constraint = self.sql_unique_constraint % {
'columns': ', '.join(map(self.quote_name, fields)),
}
return self.sql_constraint % {
'name': self.quote_name(name),
'constraint': constraint,
}
def _create_unique_sql(self, model, columns, name=None, condition=None):
def create_unique_name(*args, **kwargs):
return self.quote_name(self._create_index_name(*args, **kwargs))
table = Table(model._meta.db_table, self.quote_name)
if name is None:
name = IndexName(model._meta.db_table, columns, '_uniq', create_unique_name)
else:
name = self.quote_name(name)
columns = Columns(table, columns, self.quote_name)
if condition:
return Statement(
self.sql_create_unique_index,
table=table,
name=name,
columns=columns,
condition=' WHERE ' + condition,
) if self.connection.features.supports_partial_indexes else None
else:
return Statement(
self.sql_create_unique,
table=table,
name=name,
columns=columns,
)
def _delete_unique_sql(self, model, name, condition=None):
if condition:
return (
self._delete_constraint_sql(self.sql_delete_index, model, name)
if self.connection.features.supports_partial_indexes else None
)
return self._delete_constraint_sql(self.sql_delete_unique, model, name)
def _check_sql(self, name, check):
return self.sql_constraint % {
'name': self.quote_name(name),
'constraint': self.sql_check_constraint % {'check': check},
}
def _create_check_sql(self, model, name, check):
return Statement(
self.sql_create_check,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(name),
check=check,
)
def _delete_check_sql(self, model, name):
return self._delete_constraint_sql(self.sql_delete_check, model, name)
def _delete_constraint_sql(self, template, model, name):
return Statement(
template,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(name),
)
def _constraint_names(self, model, column_names=None, unique=None,
primary_key=None, index=None, foreign_key=None,
check=None, type_=None):
"""Return all constraint names matching the columns and conditions."""
if column_names is not None:
column_names = [
self.connection.introspection.identifier_converter(name)
for name in column_names
]
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
if type_ is not None and infodict['type'] != type_:
continue
result.append(name)
return result
def _delete_primary_key(self, model, strict=False):
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError('Found wrong number (%s) of PK constraints for %s' % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(self._delete_primary_key_sql(model, constraint_name))
def _create_primary_key_sql(self, model, field):
return Statement(
self.sql_create_pk,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(
self._create_index_name(model._meta.db_table, [field.column], suffix="_pk")
),
columns=Columns(model._meta.db_table, [field.column], self.quote_name),
)
def _delete_primary_key_sql(self, model, name):
return self._delete_constraint_sql(self.sql_delete_pk, model, name)
def remove_procedure(self, procedure_name, param_types=()):
sql = self.sql_delete_procedure % {
'procedure': self.quote_name(procedure_name),
'param_types': ','.join(param_types),
}
self.execute(sql)
|
0ce9c5257438342377045b516e8e89b26a3ddcd2d31365d84180af46e5a2df5a | from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.models import NOT_PROVIDED
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
# No 'CASCADE' which works as a no-op in MySQL but is undocumented
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s"
sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_create_column_inline_fk = (
', ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) '
'REFERENCES %(to_table)s(%(to_column)s)'
)
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
sql_create_index = 'CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s'
def quote_value(self, value):
self.connection.ensure_connection()
quoted = self.connection.connection.escape(value, self.connection.connection.encoders)
if isinstance(value, str):
quoted = quoted.decode()
return quoted
def _is_limited_data_type(self, field):
db_type = field.db_type(self.connection)
return db_type is not None and db_type.lower() in self.connection._limited_data_types
def skip_default(self, field):
return self._is_limited_data_type(field)
def add_field(self, model, field):
super().add_field(model, field)
# Simulate the effect of a one-off default.
# field.default may be unhashable, so a set isn't used for "in" check.
if self.skip_default(field) and field.default not in (None, NOT_PROVIDED):
effective_default = self.effective_default(field)
self.execute('UPDATE %(table)s SET %(column)s = %%s' % {
'table': self.quote_name(model._meta.db_table),
'column': self.quote_name(field.column),
}, [effective_default])
def _field_should_be_indexed(self, model, field):
create_index = super()._field_should_be_indexed(model, field)
storage = self.connection.introspection.get_storage_engine(
self.connection.cursor(), model._meta.db_table
)
# No need to create an index for ForeignKey fields except if
# db_constraint=False because the index from that constraint won't be
# created.
if (storage == "InnoDB" and
create_index and
field.get_internal_type() == 'ForeignKey' and
field.db_constraint):
return False
return not self._is_limited_data_type(field) and create_index
def _delete_composed_index(self, model, fields, *args):
"""
MySQL can remove an implicit FK index on a field when that field is
covered by another index like a unique_together. "covered" here means
that the more complex index starts like the simpler one.
http://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757
We check here before removing the [unique|index]_together if we have to
recreate a FK index.
"""
first_field = model._meta.get_field(fields[0])
if first_field.get_internal_type() == 'ForeignKey':
constraint_names = self._constraint_names(model, [first_field.column], index=True)
if not constraint_names:
self.execute(self._create_index_sql(model, [first_field], suffix=""))
return super()._delete_composed_index(model, fields, *args)
def _set_field_new_type_null_status(self, field, new_type):
"""
Keep the null property of the old field. If it has changed, it will be
handled separately.
"""
if field.null:
new_type += " NULL"
else:
new_type += " NOT NULL"
return new_type
def _alter_column_type_sql(self, model, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super()._alter_column_type_sql(model, old_field, new_field, new_type)
def _rename_field_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super()._rename_field_sql(table, old_field, new_field, new_type)
|
52a30534718bea4439adc2401a76932916b8b944a518fba945802158d46eccfb | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import threading
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import connections
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.utils import DatabaseError as WrappedDatabaseError
from django.utils.functional import cached_property
from django.utils.safestring import SafeString
from django.utils.version import get_version_tuple
try:
import psycopg2 as Database
import psycopg2.extensions
import psycopg2.extras
except ImportError as e:
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
def psycopg2_version():
version = psycopg2.__version__.split(' ', 1)[0]
return get_version_tuple(version)
PSYCOPG2_VERSION = psycopg2_version()
if PSYCOPG2_VERSION < (2, 5, 4):
raise ImproperlyConfigured("psycopg2_version 2.5.4 or newer is required; you have %s" % psycopg2.__version__)
# Some of these import psycopg2, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
from .utils import utc_tzinfo_factory # NOQA isort:skip
psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString)
psycopg2.extras.register_uuid()
# Register support for inet[] manually so we don't have to handle the Inet()
# object on load all the time.
INETARRAY_OID = 1041
INETARRAY = psycopg2.extensions.new_array_type(
(INETARRAY_OID,),
'INETARRAY',
psycopg2.extensions.UNICODE,
)
psycopg2.extensions.register_type(INETARRAY)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
display_name = 'PostgreSQL'
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BigAutoField': 'bigserial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'interval',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'uuid',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, E'\\', E'\\\\'), E'%%', E'\\%%'), E'_', E'\\_')"
pattern_ops = {
'contains': "LIKE '%%' || {} || '%%'",
'icontains': "LIKE '%%' || UPPER({}) || '%%'",
'startswith': "LIKE {} || '%%'",
'istartswith': "LIKE UPPER({}) || '%%'",
'endswith': "LIKE '%%' || {}",
'iendswith': "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
# PostgreSQL backend-specific attributes.
_named_cursor_idx = 0
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
if len(settings_dict['NAME'] or '') > self.ops.max_name_length():
raise ImproperlyConfigured(
"The database name '%s' (%d characters) is longer than "
"PostgreSQL's limit of %d characters. Supply a shorter NAME "
"in settings.DATABASES." % (
settings_dict['NAME'],
len(settings_dict['NAME']),
self.ops.max_name_length(),
)
)
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
**settings_dict['OPTIONS'],
}
conn_params.pop('isolation_level', None)
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = settings_dict['PASSWORD']
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict['OPTIONS']
try:
self.isolation_level = options['isolation_level']
except KeyError:
self.isolation_level = connection.isolation_level
else:
# Set the isolation level to the value from OPTIONS.
if self.isolation_level != connection.isolation_level:
connection.set_session(isolation_level=self.isolation_level)
return connection
def ensure_timezone(self):
if not self.is_usable():
return False
conn_timezone_name = self.connection.get_parameter_status('TimeZone')
timezone_name = self.timezone_name
if timezone_name and conn_timezone_name != timezone_name:
with self.connection.cursor() as cursor:
cursor.execute(self.ops.set_time_zone_sql(), [timezone_name])
return True
return False
def init_connection_state(self):
self.connection.set_client_encoding('UTF8')
self.ensure_connection()
timezone_changed = self.ensure_timezone()
if timezone_changed:
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
def create_cursor(self, name=None):
if name:
# In autocommit mode, the cursor will be used outside of a
# transaction, hence use a holdable cursor.
cursor = self.connection.cursor(name, scrollable=False, withhold=self.connection.autocommit)
else:
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def chunked_cursor(self):
self._named_cursor_idx += 1
return self._cursor(
name='_django_curs_%d_%d' % (
# Avoid reusing name in other threads
threading.current_thread().ident,
self._named_cursor_idx,
)
)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
if self.connection is None:
return False
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@property
def _nodb_connection(self):
nodb_connection = super()._nodb_connection
try:
nodb_connection.ensure_connection()
except (Database.DatabaseError, WrappedDatabaseError):
warnings.warn(
"Normally Django will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Django was unable to create a connection to the 'postgres' database "
"and will use the first PostgreSQL database instead.",
RuntimeWarning
)
for connection in connections.all():
if connection.vendor == 'postgresql' and connection.settings_dict['NAME'] != 'postgres':
return self.__class__(
{**self.settings_dict, 'NAME': connection.settings_dict['NAME']},
alias=self.alias,
allow_thread_sharing=False,
)
return nodb_connection
@cached_property
def pg_version(self):
with self.temporary_connection():
return self.connection.server_version
|
41dfe5facca3a7ccbfa913c8c04e6ba37b318ca3591626c67b30a7287da2bf84 | import psycopg2
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.ddl_references import IndexColumns
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s USING %(column)s::%(type)s"
sql_create_sequence = "CREATE SEQUENCE %(sequence)s"
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s%(using)s (%(columns)s)%(extra)s%(condition)s"
sql_delete_index = "DROP INDEX IF EXISTS %(name)s"
sql_create_column_inline_fk = 'REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s'
# Setting the constraint to IMMEDIATE runs any deferred checks to allow
# dropping it in the same transaction.
sql_delete_fk = "SET CONSTRAINTS %(name)s IMMEDIATE; ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_delete_procedure = 'DROP FUNCTION %(procedure)s(%(param_types)s)'
def quote_value(self, value):
return psycopg2.extensions.adapt(value)
def _field_indexes_sql(self, model, field):
output = super()._field_indexes_sql(model, field)
like_index_statement = self._create_like_index_sql(model, field)
if like_index_statement is not None:
output.append(like_index_statement)
return output
def _create_like_index_sql(self, model, field):
"""
Return the statement to create an index with varchar operator pattern
when the column type is 'varchar' or 'text', otherwise return None.
"""
db_type = field.db_type(connection=self.connection)
if db_type is not None and (field.db_index or field.unique):
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
#
# The same doesn't apply to array fields such as varchar[size]
# and text[size], so skip them.
if '[' in db_type:
return None
if db_type.startswith('varchar'):
return self._create_index_sql(model, [field], suffix='_like', opclasses=['varchar_pattern_ops'])
elif db_type.startswith('text'):
return self._create_index_sql(model, [field], suffix='_like', opclasses=['text_pattern_ops'])
return None
def _alter_column_type_sql(self, model, old_field, new_field, new_type):
"""Make ALTER TYPE with SERIAL make sense."""
table = model._meta.db_table
if new_type.lower() in ("serial", "bigserial"):
column = new_field.column
sequence_name = "%s_%s_seq" % (table, column)
col_type = "integer" if new_type.lower() == "serial" else "bigint"
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(column),
"type": col_type,
},
[],
),
[
(
self.sql_delete_sequence % {
"sequence": self.quote_name(sequence_name),
},
[],
),
(
self.sql_create_sequence % {
"sequence": self.quote_name(sequence_name),
},
[],
),
(
self.sql_alter_column % {
"table": self.quote_name(table),
"changes": self.sql_alter_column_default % {
"column": self.quote_name(column),
"default": "nextval('%s')" % self.quote_name(sequence_name),
}
},
[],
),
(
self.sql_set_sequence_max % {
"table": self.quote_name(table),
"column": self.quote_name(column),
"sequence": self.quote_name(sequence_name),
},
[],
),
],
)
else:
return super()._alter_column_type_sql(model, old_field, new_field, new_type)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
# Drop indexes on varchar/text/citext columns that are changing to a
# different type.
if (old_field.db_index or old_field.unique) and (
(old_type.startswith('varchar') and not new_type.startswith('varchar')) or
(old_type.startswith('text') and not new_type.startswith('text')) or
(old_type.startswith('citext') and not new_type.startswith('citext'))
):
index_name = self._create_index_name(model._meta.db_table, [old_field.column], suffix='_like')
self.execute(self._delete_index_sql(model, index_name))
super()._alter_field(
model, old_field, new_field, old_type, new_type, old_db_params,
new_db_params, strict,
)
# Added an index? Create any PostgreSQL-specific indexes.
if ((not (old_field.db_index or old_field.unique) and new_field.db_index) or
(not old_field.unique and new_field.unique)):
like_index_statement = self._create_like_index_sql(model, new_field)
if like_index_statement is not None:
self.execute(like_index_statement)
# Removed an index? Drop any PostgreSQL-specific indexes.
if old_field.unique and not (new_field.db_index or new_field.unique):
index_to_remove = self._create_index_name(model._meta.db_table, [old_field.column], suffix='_like')
self.execute(self._delete_index_sql(model, index_to_remove))
def _index_columns(self, table, columns, col_suffixes, opclasses):
if opclasses:
return IndexColumns(table, columns, self.quote_name, col_suffixes=col_suffixes, opclasses=opclasses)
return super()._index_columns(table, columns, col_suffixes, opclasses)
|
44d052bc1c171a67fedbeaca6781b57b903e2983bcead7b9dab1177aff277d77 | from django.db.backends.base.features import BaseDatabaseFeatures
from .base import Database
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite can read from a cursor since SQLite 3.6.5, subject to the caveat
# that statements within a connection aren't isolated from each other. See
# https://sqlite.org/isolation.html.
can_use_chunked_reads = True
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
max_query_params = 999
supports_mixed_date_datetime_comparisons = False
can_introspect_autofield = True
can_introspect_decimal_field = False
can_introspect_duration_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
introspected_big_auto_field_type = 'AutoField'
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_atomic_references_rename = Database.sqlite_version_info >= (3, 26, 0)
can_create_inline_fk = False
supports_paramstyle_pyformat = False
supports_sequence_reset = False
can_clone_databases = True
supports_temporal_subtraction = True
ignores_table_name_case = True
supports_cast_with_precision = False
time_cast_precision = 3
can_release_savepoints = True
# Is "ALTER TABLE ... RENAME COLUMN" supported?
can_alter_table_rename_column = Database.sqlite_version_info >= (3, 25, 0)
supports_parentheses_in_compound = False
# Deferred constraint checks can be emulated on SQLite < 3.20 but not in a
# reasonably performant way.
supports_pragma_foreign_key_check = Database.sqlite_version_info >= (3, 20, 0)
can_defer_constraint_checks = supports_pragma_foreign_key_check
supports_functions_in_partial_indexes = Database.sqlite_version_info >= (3, 15, 0)
|
c70641727c9aff750a7c2a594c9af1205179e53ac1a3ac4dc89fab7ddf2c462e | import re
from collections import namedtuple
import sqlparse
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo,
)
from django.db.models.indexes import Index
FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('pk',))
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'varchar': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower().split('(', 1)[0].strip()
return self.base_data_types_reverse[key]
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.pk and field_type in {'BigIntegerField', 'IntegerField'}:
# No support for BigAutoField as SQLite treats all integer primary
# keys as signed 64-bit integers.
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(table_name))
return [
FieldInfo(
name, data_type, None, get_field_size(data_type), None, None,
not notnull, default, pk == 1,
)
for cid, name, data_type, notnull, default, pk in cursor.fetchall()
]
def get_sequences(self, cursor, table_name, table_fields=()):
pk_col = self.get_primary_key_column(cursor, table_name)
return [{'table': table_name, 'column': pk_col}]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
create_sql, table_type = cursor.fetchone()
if table_type == 'view':
# It might be a view, then no results will be returned
return relations
results = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_primary_key_column(self, cursor, table_name):
"""Return the column name of the primary key for the given table."""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
create_sql, table_type = row
if table_type == 'view':
# Views don't have a primary key.
return None
fields_sql = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')]
for field_desc in fields_sql.split(','):
field_desc = field_desc.strip()
m = re.match(r'(?:(?:["`\[])(.*)(?:["`\]])|(\w+)).*PRIMARY KEY.*', field_desc)
if m:
return m.group(1) if m.group(1) else m.group(2)
return None
def _get_foreign_key_constraints(self, cursor, table_name):
constraints = {}
cursor.execute('PRAGMA foreign_key_list(%s)' % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Remaining on_update/on_delete/match values are of no interest.
id_, _, table, from_, to = row[:5]
constraints['fk_%d' % id_] = {
'columns': [from_],
'primary_key': False,
'unique': False,
'foreign_key': (table, to),
'check': False,
'index': False,
}
return constraints
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Find inline check constraints.
try:
table_schema = cursor.execute(
"SELECT sql FROM sqlite_master WHERE type='table' and name=%s" % (
self.connection.ops.quote_name(table_name),
)
).fetchone()[0]
except TypeError:
# table_name is a view.
pass
else:
# Check constraint parsing is based of SQLite syntax diagram.
# https://www.sqlite.org/syntaxdiagrams.html#table-constraint
def next_ttype(ttype):
for token in tokens:
if token.ttype == ttype:
return token
statement = sqlparse.parse(table_schema)[0]
tokens = statement.flatten()
for token in tokens:
name = None
if token.match(sqlparse.tokens.Keyword, 'CONSTRAINT'):
# Table constraint
name_token = next_ttype(sqlparse.tokens.Literal.String.Symbol)
name = name_token.value[1:-1]
token = next_ttype(sqlparse.tokens.Keyword)
if token.match(sqlparse.tokens.Keyword, 'UNIQUE'):
constraints[name] = {
'unique': True,
'columns': [],
'primary_key': False,
'foreign_key': False,
'check': False,
'index': False,
}
if token.match(sqlparse.tokens.Keyword, 'CHECK'):
# Column check constraint
if name is None:
column_token = next_ttype(sqlparse.tokens.Literal.String.Symbol)
column = column_token.value[1:-1]
name = '__check__%s' % column
columns = [column]
else:
columns = []
constraints[name] = {
'check': True,
'columns': columns,
'primary_key': False,
'unique': False,
'foreign_key': False,
'index': False,
}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# SQLite 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Add type and column orders for indexes
if constraints[index]['index'] and not constraints[index]['unique']:
# SQLite doesn't support any index type other than b-tree
constraints[index]['type'] = Index.suffix
cursor.execute(
"SELECT sql FROM sqlite_master "
"WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index)
)
orders = []
# There would be only 1 row to loop over
for sql, in cursor.fetchall():
order_info = sql.split('(')[-1].split(')')[0].split(',')
orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info]
constraints[index]['orders'] = orders
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
constraints.update(self._get_foreign_key_constraints(cursor, table_name))
return constraints
|
c64e1dad83f35506ee3570e1fd08460b4da691677d5561357cd71fa8e08de0ee | """
SQLite backend for the sqlite3 module in the standard library.
"""
import datetime
import decimal
import functools
import math
import operator
import re
import statistics
import warnings
from itertools import chain
from sqlite3 import dbapi2 as Database
import pytz
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import timezone
from django.utils.dateparse import parse_datetime, parse_time
from django.utils.duration import duration_microseconds
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
def decoder(conv_func):
"""
Convert bytestrings from Python's sqlite3 interface to a regular string.
"""
return lambda s: conv_func(s.decode())
def none_guard(func):
"""
Decorator that returns None if any of the arguments to the decorated
function are None. Many SQL functions return NULL if any of their arguments
are NULL. This decorator simplifies the implementation of this for the
custom functions registered below.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
return None if None in args else func(*args, **kwargs)
return wrapper
def list_aggregate(function):
"""
Return an aggregate class that accumulates values in a list and applies
the provided function to the data.
"""
return type('ListAggregate', (list,), {'finalize': function, 'step': list.append})
def check_sqlite_version():
if Database.sqlite_version_info < (3, 8, 3):
raise ImproperlyConfigured('SQLite 3.8.3 or later is required (found %s).' % Database.sqlite_version)
check_sqlite_version()
Database.register_converter("bool", b'1'.__eq__)
Database.register_converter("time", decoder(parse_time))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_converter("TIMESTAMP", decoder(parse_datetime))
Database.register_adapter(decimal.Decimal, str)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
display_name = 'SQLite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See https://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
**settings_dict['OPTIONS'],
}
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False, 'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_datetime_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_trunc", 2, _sqlite_time_trunc)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function('regexp', 2, _sqlite_regexp)
conn.create_function('ACOS', 1, none_guard(math.acos))
conn.create_function('ASIN', 1, none_guard(math.asin))
conn.create_function('ATAN', 1, none_guard(math.atan))
conn.create_function('ATAN2', 2, none_guard(math.atan2))
conn.create_function('CEILING', 1, none_guard(math.ceil))
conn.create_function('COS', 1, none_guard(math.cos))
conn.create_function('COT', 1, none_guard(lambda x: 1 / math.tan(x)))
conn.create_function('DEGREES', 1, none_guard(math.degrees))
conn.create_function('EXP', 1, none_guard(math.exp))
conn.create_function('FLOOR', 1, none_guard(math.floor))
conn.create_function('LN', 1, none_guard(math.log))
conn.create_function('LOG', 2, none_guard(lambda x, y: math.log(y, x)))
conn.create_function('LPAD', 3, _sqlite_lpad)
conn.create_function('MOD', 2, none_guard(math.fmod))
conn.create_function('PI', 0, lambda: math.pi)
conn.create_function('POWER', 2, none_guard(operator.pow))
conn.create_function('RADIANS', 1, none_guard(math.radians))
conn.create_function('REPEAT', 2, none_guard(operator.mul))
conn.create_function('REVERSE', 1, none_guard(lambda x: x[::-1]))
conn.create_function('RPAD', 3, _sqlite_rpad)
conn.create_function('SIN', 1, none_guard(math.sin))
conn.create_function('SQRT', 1, none_guard(math.sqrt))
conn.create_function('TAN', 1, none_guard(math.tan))
conn.create_aggregate('STDDEV_POP', 1, list_aggregate(statistics.pstdev))
conn.create_aggregate('STDDEV_SAMP', 1, list_aggregate(statistics.stdev))
conn.create_aggregate('VAR_POP', 1, list_aggregate(statistics.pvariance))
conn.create_aggregate('VAR_SAMP', 1, list_aggregate(statistics.variance))
conn.execute('PRAGMA foreign_keys = ON')
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def disable_constraint_checking(self):
with self.cursor() as cursor:
cursor.execute('PRAGMA foreign_keys = OFF')
# Foreign key constraints cannot be turned off while in a multi-
# statement transaction. Fetch the current state of the pragma
# to determine if constraints are effectively disabled.
enabled = cursor.execute('PRAGMA foreign_keys').fetchone()[0]
return not bool(enabled)
def enable_constraint_checking(self):
self.cursor().execute('PRAGMA foreign_keys = ON')
def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
"""
if self.features.supports_pragma_foreign_key_check:
with self.cursor() as cursor:
if table_names is None:
violations = self.cursor().execute('PRAGMA foreign_key_check').fetchall()
else:
violations = chain.from_iterable(
cursor.execute('PRAGMA foreign_key_check(%s)' % table_name).fetchall()
for table_name in table_names
)
# See https://www.sqlite.org/pragma.html#pragma_foreign_key_check
for table_name, rowid, referenced_table_name, foreign_key_index in violations:
foreign_key = cursor.execute(
'PRAGMA foreign_key_list(%s)' % table_name
).fetchall()[foreign_key_index]
column_name, referenced_column_name = foreign_key[3:5]
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
primary_key_value, bad_value = cursor.execute(
'SELECT %s, %s FROM %s WHERE rowid = %%s' % (
primary_key_column_name, column_name, table_name
),
(rowid,),
).fetchone()
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, primary_key_value, table_name, column_name,
bad_value, referenced_table_name, referenced_column_name
)
)
else:
with self.cursor() as cursor:
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict['NAME'])
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_datetime_parse(dt, tzname=None):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (TypeError, ValueError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_date_trunc(lookup_type, dt):
dt = _sqlite_datetime_parse(dt)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'hour':
return "%02i:00:00" % dt.hour
elif lookup_type == 'minute':
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == 'second':
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname=None):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
elif lookup_type == 'iso_year':
return dt.isocalendar()[0]
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
@none_guard
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a datetime
"""
try:
real_lhs = datetime.timedelta(0, 0, lhs) if isinstance(lhs, int) else backend_utils.typecast_timestamp(lhs)
real_rhs = datetime.timedelta(0, 0, rhs) if isinstance(rhs, int) else backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
@none_guard
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
@none_guard
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return duration_microseconds(left - right)
@none_guard
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, str(re_string)))
@none_guard
def _sqlite_lpad(text, length, fill_text):
if len(text) >= length:
return text[:length]
return (fill_text * length)[:length - len(text)] + text
@none_guard
def _sqlite_rpad(text, length, fill_text):
return (text + fill_text * length)[:length]
|
7c351d759d7711fe3f74d3f2abafc3bbf5b39f597136262951d20a9b07f87171 | import inspect
import types
from collections import defaultdict
from itertools import chain
from django.apps import apps
from django.core.checks import Error, Tags, register
@register(Tags.models)
def check_all_models(app_configs=None, **kwargs):
db_table_models = defaultdict(list)
errors = []
if app_configs is None:
models = apps.get_models()
else:
models = chain.from_iterable(app_config.get_models() for app_config in app_configs)
for model in models:
if model._meta.managed and not model._meta.proxy:
db_table_models[model._meta.db_table].append(model._meta.label)
if not inspect.ismethod(model.check):
errors.append(
Error(
"The '%s.check()' class method is currently overridden by %r."
% (model.__name__, model.check),
obj=model,
id='models.E020'
)
)
else:
errors.extend(model.check(**kwargs))
for db_table, model_labels in db_table_models.items():
if len(model_labels) != 1:
errors.append(
Error(
"db_table '%s' is used by multiple models: %s."
% (db_table, ', '.join(db_table_models[db_table])),
obj=db_table,
id='models.E028',
)
)
return errors
def _check_lazy_references(apps, ignore=None):
"""
Ensure all lazy (i.e. string) model references have been resolved.
Lazy references are used in various places throughout Django, primarily in
related fields and model signals. Identify those common cases and provide
more helpful error messages for them.
The ignore parameter is used by StateApps to exclude swappable models from
this check.
"""
pending_models = set(apps._pending_operations) - (ignore or set())
# Short circuit if there aren't any errors.
if not pending_models:
return []
from django.db.models import signals
model_signals = {
signal: name for name, signal in vars(signals).items()
if isinstance(signal, signals.ModelSignal)
}
def extract_operation(obj):
"""
Take a callable found in Apps._pending_operations and identify the
original callable passed to Apps.lazy_model_operation(). If that
callable was a partial, return the inner, non-partial function and
any arguments and keyword arguments that were supplied with it.
obj is a callback defined locally in Apps.lazy_model_operation() and
annotated there with a `func` attribute so as to imitate a partial.
"""
operation, args, keywords = obj, [], {}
while hasattr(operation, 'func'):
args.extend(getattr(operation, 'args', []))
keywords.update(getattr(operation, 'keywords', {}))
operation = operation.func
return operation, args, keywords
def app_model_error(model_key):
try:
apps.get_app_config(model_key[0])
model_error = "app '%s' doesn't provide model '%s'" % model_key
except LookupError:
model_error = "app '%s' isn't installed" % model_key[0]
return model_error
# Here are several functions which return CheckMessage instances for the
# most common usages of lazy operations throughout Django. These functions
# take the model that was being waited on as an (app_label, modelname)
# pair, the original lazy function, and its positional and keyword args as
# determined by extract_operation().
def field_error(model_key, func, args, keywords):
error_msg = (
"The field %(field)s was declared with a lazy reference "
"to '%(model)s', but %(model_error)s."
)
params = {
'model': '.'.join(model_key),
'field': keywords['field'],
'model_error': app_model_error(model_key),
}
return Error(error_msg % params, obj=keywords['field'], id='fields.E307')
def signal_connect_error(model_key, func, args, keywords):
error_msg = (
"%(receiver)s was connected to the '%(signal)s' signal with a "
"lazy reference to the sender '%(model)s', but %(model_error)s."
)
receiver = args[0]
# The receiver is either a function or an instance of class
# defining a `__call__` method.
if isinstance(receiver, types.FunctionType):
description = "The function '%s'" % receiver.__name__
elif isinstance(receiver, types.MethodType):
description = "Bound method '%s.%s'" % (receiver.__self__.__class__.__name__, receiver.__name__)
else:
description = "An instance of class '%s'" % receiver.__class__.__name__
signal_name = model_signals.get(func.__self__, 'unknown')
params = {
'model': '.'.join(model_key),
'receiver': description,
'signal': signal_name,
'model_error': app_model_error(model_key),
}
return Error(error_msg % params, obj=receiver.__module__, id='signals.E001')
def default_error(model_key, func, args, keywords):
error_msg = "%(op)s contains a lazy reference to %(model)s, but %(model_error)s."
params = {
'op': func,
'model': '.'.join(model_key),
'model_error': app_model_error(model_key),
}
return Error(error_msg % params, obj=func, id='models.E022')
# Maps common uses of lazy operations to corresponding error functions
# defined above. If a key maps to None, no error will be produced.
# default_error() will be used for usages that don't appear in this dict.
known_lazy = {
('django.db.models.fields.related', 'resolve_related_class'): field_error,
('django.db.models.fields.related', 'set_managed'): None,
('django.dispatch.dispatcher', 'connect'): signal_connect_error,
}
def build_error(model_key, func, args, keywords):
key = (func.__module__, func.__name__)
error_fn = known_lazy.get(key, default_error)
return error_fn(model_key, func, args, keywords) if error_fn else None
return sorted(filter(None, (
build_error(model_key, *extract_operation(func))
for model_key in pending_models
for func in apps._pending_operations[model_key]
)), key=lambda error: error.msg)
@register(Tags.models)
def check_lazy_references(app_configs=None, **kwargs):
return _check_lazy_references(apps)
|
a6f57a923aefe49a7b84b3c796785ae4d886eec390559b736a421b36ef52e7b6 | import cgi
import mimetypes
import os
import posixpath
import shutil
import stat
import tempfile
from importlib import import_module
from os import path
from urllib.request import urlretrieve
import django
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import handle_extensions
from django.template import Context, Engine
from django.utils import archive
from django.utils.version import get_docs_version
class TemplateCommand(BaseCommand):
"""
Copy either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
requires_system_checks = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
# Rewrite the following suffixes when determining the target filename.
rewrite_template_suffixes = (
# Allow shipping invalid .py files without byte-compilation.
('.py-tpl', '.py'),
)
def add_arguments(self, parser):
parser.add_argument('name', help='Name of the application or project.')
parser.add_argument('directory', nargs='?', help='Optional destination directory')
parser.add_argument('--template', help='The path or URL to load the template from.')
parser.add_argument(
'--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.'
)
parser.add_argument(
'--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. Separate multiple file names '
'with commas, or use -n multiple times.'
)
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = options['verbosity']
self.validate_name(name, app_or_project)
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except FileExistsError:
raise CommandError("'%s' already exists" % top_dir)
except OSError as e:
raise CommandError(e)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(handle_extensions(options['extensions']))
extra_files = []
for file in options['files']:
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
camel_case_name = 'camel_case_%s_name' % app_or_project
camel_case_value = ''.join(x for x in name.title() if x != '_')
context = Context({
**options,
base_name: name,
base_directory: top_dir,
camel_case_name: camel_case_value,
'docs_version': get_docs_version(),
'django_version': django.__version__,
}, autoescape=False)
# Setup a stub settings environment for template rendering
if not settings.configured:
settings.configure()
django.setup()
template_dir = self.handle_template(options['template'],
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
os.makedirs(target_dir, exist_ok=True)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
for old_suffix, new_suffix in self.rewrite_template_suffixes:
if new_path.endswith(old_suffix):
new_path = new_path[:-len(old_suffix)] + new_suffix
break # Only rewrite once
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
if new_path.endswith(extensions) or filename in extra_files:
with open(old_path, encoding='utf-8') as template_file:
content = template_file.read()
template = Engine().from_string(content)
content = template.render(context)
with open(new_path, 'w', encoding='utf-8') as new_file:
new_file.write(content)
else:
shutil.copyfile(old_path, new_path)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove)
def handle_template(self, template, subdir):
"""
Determine where the app or project templates are.
Use django.__path__[0] as the default because the Django install
directory isn't known.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def validate_name(self, name, app_or_project):
a_or_an = 'an' if app_or_project == 'app' else 'a'
if name is None:
raise CommandError('you must provide {an} {app} name'.format(
an=a_or_an,
app=app_or_project,
))
# Check it's a valid directory name.
if not name.isidentifier():
raise CommandError(
"'{name}' is not a valid {app} name. Please make sure the "
"name is a valid identifier.".format(
name=name,
app=app_or_project,
)
)
# Check it cannot be imported.
try:
import_module(name)
except ImportError:
pass
else:
raise CommandError(
"'{name}' conflicts with the name of an existing Python "
"module and cannot be used as {an} {app} name. Please try "
"another name.".format(
name=name,
an=a_or_an,
app=app_or_project,
)
)
def download(self, url):
"""
Download the given URL and return the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except OSError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extract the given file to a temporarily and return
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, OSError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""Return True if the name looks like a URL."""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
|
9eefd4ac62ca797f1f2732c3d86a543813274774c1d66051bc7692960207887c | import mimetypes
from email import (
charset as Charset, encoders as Encoders, generator, message_from_string,
)
from email.errors import InvalidHeaderDefect, NonASCIILocalPartDefect
from email.header import Header
from email.headerregistry import Address
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate, getaddresses, make_msgid, parseaddr
from io import BytesIO, StringIO
from pathlib import Path
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_str
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
utf8_charset_qp = Charset.Charset('utf-8')
utf8_charset_qp.body_encoding = Charset.QP
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
RFC5322_EMAIL_LINE_LENGTH_LIMIT = 998
class BadHeaderError(ValueError):
pass
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbid multi-line headers to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = str(val) # val may be lazy
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding) for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return name, val
def split_addr(addr, encoding):
"""
Split the address into local part and domain and encode them.
When non-ascii characters are present in the local part, it must be
MIME-word encoded. The domain name must be idna-encoded if it contains
non-ascii characters.
"""
if '@' in addr:
localpart, domain = addr.split('@', 1)
# Try to get the simplest encoding - ascii if possible so that
# [email protected] doesn't become [email protected]. This
# makes unit testing a bit easier and more readable.
try:
localpart.encode('ascii')
except UnicodeEncodeError:
localpart = Header(localpart, encoding).encode()
domain = domain.encode('idna').decode('ascii')
else:
localpart = Header(addr, encoding).encode()
domain = ''
return (localpart, domain)
def sanitize_address(addr, encoding):
"""
Format a pair of (name, address) or an email address string.
"""
if not isinstance(addr, tuple):
addr = parseaddr(addr)
nm, addr = addr
localpart, domain = None, None
nm = Header(nm, encoding).encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN or non-ascii in the local part
localpart, domain = split_addr(addr, encoding)
# An `email.headerregistry.Address` object is used since
# email.utils.formataddr() naively encodes the name as ascii (see #25986).
if localpart and domain:
address = Address(nm, username=localpart, domain=domain)
return str(address)
try:
address = Address(nm, addr_spec=addr)
except (InvalidHeaderDefect, NonASCIILocalPartDefect):
localpart, domain = split_addr(addr, encoding)
address = Address(nm, username=localpart, domain=domain)
return str(address)
class MIMEMixin:
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = StringIO()
g = generator.Generator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype='plain', _charset=None):
self.encoding = _charset
MIMEText.__init__(self, _text, _subtype=_subtype, _charset=_charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
def set_payload(self, payload, charset=None):
if charset == 'utf-8' and not isinstance(charset, Charset.Charset):
has_long_lines = any(
len(l.encode()) > RFC5322_EMAIL_LINE_LENGTH_LIMIT
for l in payload.splitlines()
)
# Quoted-Printable encoding has the side effect of shortening long
# lines, if any (#22561).
charset = utf8_charset_qp if has_long_lines else utf8_charset
MIMEText.set_payload(self, payload, charset=charset)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage:
"""A container for email information."""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
"""
if to:
if isinstance(to, str):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, str):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, str):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, str):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body or ''
self.attachments = []
if attachments:
for attachment in attachments:
if isinstance(attachment, MIMEBase):
self.attach(attachment)
else:
self.attach(*attachment)
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
self._set_list_header_if_not_empty(msg, 'To', self.to)
self._set_list_header_if_not_empty(msg, 'Cc', self.cc)
self._set_list_header_if_not_empty(msg, 'Reply-To', self.reply_to)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
# formatdate() uses stdlib methods to format the date, which use
# the stdlib/OS concept of a timezone, however, Django sets the
# TZ environment variable based on the TIME_ZONE setting which
# will get picked up by formatdate().
msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME)
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() != 'from': # From is already handled
msg[name] = value
return msg
def recipients(self):
"""
Return a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return [email for email in (self.to + self.cc + self.bcc) if email]
def send(self, fail_silently=False):
"""Send the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attach a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass, insert it directly
into the resulting message attachments.
For a text/* mimetype (guessed or specified), when a bytes object is
specified as content, decode it as UTF-8. If that fails, set the
mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
mimetype = mimetype or mimetypes.guess_type(filename)[0] or DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
if isinstance(content, bytes):
try:
content = content.decode()
except UnicodeDecodeError:
# If mimetype suggests the file is text but it's
# actually binary, read() raises a UnicodeDecodeError.
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""
Attach a file from the filesystem.
Set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE if it isn't specified
and cannot be guessed.
For a text/* mimetype (guessed or specified), decode the file's content
as UTF-8. If that fails, set the mimetype to
DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.
"""
path = Path(path)
with path.open('rb') as file:
content = file.read()
self.attach(path.name, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body or body_msg.is_multipart():
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Convert the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(force_str(content))
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Convert the filename, content, mimetype triple into a MIME attachment
object.
"""
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment', filename=filename)
return attachment
def _set_list_header_if_not_empty(self, msg, header, values):
"""
Set msg's header, either from self.extra_headers, if present, or from
the values argument.
"""
if values:
try:
value = self.extra_headers[header]
except KeyError:
value = ', '.join(str(v) for v in values)
msg[header] = value
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None, reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
"""
super().__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
|
13c8d5c58b84efe260d18066c8961e40115da8f187a11ad3290e6ed751a2f511 | import time
from importlib import import_module
from django.apps import apps
from django.core.checks import Tags, run_checks
from django.core.management.base import (
BaseCommand, CommandError, no_translations,
)
from django.core.management.sql import (
emit_post_migrate_signal, emit_pre_migrate_signal,
)
from django.db import DEFAULT_DB_ALIAS, connections, router
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.db.migrations.state import ModelState, ProjectState
from django.utils.module_loading import module_has_submodule
from django.utils.text import Truncator
class Command(BaseCommand):
help = "Updates database schema. Manages both apps with migrations and those without."
def add_arguments(self, parser):
parser.add_argument(
'app_label', nargs='?',
help='App label of an application to synchronize the state.',
)
parser.add_argument(
'migration_name', nargs='?',
help='Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.',
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.',
)
parser.add_argument(
'--fake', action='store_true',
help='Mark migrations as run without actually running them.',
)
parser.add_argument(
'--fake-initial', action='store_true',
help='Detect if tables already exist and fake-apply initial migrations if so. Make sure '
'that the current database schema matches your initial migration before using this '
'flag. Django will only check for an existing table name.',
)
parser.add_argument(
'--plan', action='store_true',
help='Shows a list of the migration actions that will be performed.',
)
parser.add_argument(
'--run-syncdb', action='store_true',
help='Creates tables for apps without migrations.',
)
def _run_checks(self, **kwargs):
issues = run_checks(tags=[Tags.database])
issues.extend(super()._run_checks(**kwargs))
return issues
@no_translations
def handle(self, *args, **options):
self.verbosity = options['verbosity']
self.interactive = options['interactive']
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options['database']
connection = connections[db]
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Raise an error if any migrations are applied before their dependencies.
executor.loader.check_consistent_history(connection)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
run_syncdb = options['run_syncdb']
target_app_labels_only = True
if options['app_label']:
# Validate app_label.
app_label = options['app_label']
try:
apps.get_app_config(app_label)
except LookupError as err:
raise CommandError(str(err))
if run_syncdb:
if app_label in executor.loader.migrated_apps:
raise CommandError("Can't use run_syncdb with app '%s' as it has migrations." % app_label)
elif app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations." % app_label)
if options['app_label'] and options['migration_name']:
migration_name = options['migration_name']
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." %
(migration_name, app_label)
)
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (
migration_name, app_label))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif options['app_label']:
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
plan = executor.migration_plan(targets)
if options['plan']:
self.stdout.write('Planned operations:', self.style.MIGRATE_LABEL)
if not plan:
self.stdout.write(' No planned migration operations.')
for migration, backwards in plan:
self.stdout.write(str(migration), self.style.MIGRATE_HEADING)
for operation in migration.operations:
message, is_error = self.describe_operation(operation, backwards)
style = self.style.WARNING if is_error else None
self.stdout.write(' ' + message, style)
return
# At this point, ignore run_syncdb if there aren't any apps to sync.
run_syncdb = options['run_syncdb'] and executor.loader.unmigrated_apps
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
if options['app_label']:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated app: %s" % app_label)
)
else:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") +
(", ".join(sorted(executor.loader.unmigrated_apps)))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ") +
(", ".join(sorted({a for a, n in targets})) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(
" Unapply all migrations: ") + "%s" % (targets[0][0],)
)
else:
self.stdout.write(self.style.MIGRATE_LABEL(
" Target specific migration: ") + "%s, from %s"
% (targets[0][1], targets[0][0])
)
pre_migrate_state = executor._create_project_state(with_applied_migrations=True)
pre_migrate_apps = pre_migrate_state.apps
emit_pre_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=pre_migrate_apps, plan=plan,
)
# Run the syncdb phase.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
if options['app_label']:
self.sync_apps(connection, [app_label])
else:
self.sync_apps(connection, executor.loader.unmigrated_apps)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(
" Your models have changes that are not yet reflected "
"in a migration, and so won't be applied."
))
self.stdout.write(self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
))
fake = False
fake_initial = False
else:
fake = options['fake']
fake_initial = options['fake_initial']
post_migrate_state = executor.migrate(
targets, plan=plan, state=pre_migrate_state.clone(), fake=fake,
fake_initial=fake_initial,
)
# post_migrate signals have access to all models. Ensure that all models
# are reloaded in case any are delayed.
post_migrate_state.clear_delayed_apps_cache()
post_migrate_apps = post_migrate_state.apps
# Re-render models of real apps to include relationships now that
# we've got a final state. This wouldn't be necessary if real apps
# models were rendered with relationships in the first place.
with post_migrate_apps.bulk_update():
model_keys = []
for model_state in post_migrate_apps.real_models:
model_key = model_state.app_label, model_state.name_lower
model_keys.append(model_key)
post_migrate_apps.unregister_model(*model_key)
post_migrate_apps.render_multiple([
ModelState.from_model(apps.get_model(*model)) for model in model_keys
])
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=post_migrate_apps, plan=plan,
)
def migration_progress_callback(self, action, migration=None, fake=False):
if self.verbosity >= 1:
compute_time = self.verbosity > 1
if action == "apply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Applying %s…" % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "unapply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Unapplying %s…" % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "render_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Rendering model states…", ending="")
self.stdout.flush()
elif action == "render_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
self.stdout.write(self.style.SUCCESS(" DONE" + elapsed))
def sync_apps(self, connection, app_labels):
"""Run the old syncdb-style operation on a list of app_labels."""
with connection.cursor() as cursor:
tables = connection.introspection.table_names(cursor)
# Build the manifest of apps and models that are to be synchronized.
all_models = [
(
app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=False),
)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.identifier_converter
return not (
(converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables)
)
manifest = {
app_name: list(filter(model_installed, model_list))
for app_name, model_list in all_models
}
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables…\n")
with connection.schema_editor() as editor:
for app_name, model_list in manifest.items():
for model in model_list:
# Never install unmanaged models, etc.
if not model._meta.can_migrate(connection):
continue
if self.verbosity >= 3:
self.stdout.write(
" Processing %s.%s model\n" % (app_name, model._meta.object_name)
)
if self.verbosity >= 1:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
editor.create_model(model)
# Deferred SQL is executed when exiting the editor's context.
if self.verbosity >= 1:
self.stdout.write(" Running deferred SQL…\n")
@staticmethod
def describe_operation(operation, backwards):
"""Return a string that describes a migration operation for --plan."""
prefix = ''
if hasattr(operation, 'code'):
code = operation.reverse_code if backwards else operation.code
action = code.__doc__ if code else ''
elif hasattr(operation, 'sql'):
action = operation.reverse_sql if backwards else operation.sql
else:
action = ''
if backwards:
prefix = 'Undo '
if action is None:
action = 'IRREVERSIBLE'
is_error = True
else:
action = str(action).replace('\n', '')
is_error = False
if action:
action = ' -> ' + action
truncated = Truncator(action)
return prefix + operation.describe() + truncated.chars(40), is_error
|
01f881ef2ae5efda05cba1aba00bd105ac2e300374d90336ec2b0114159d46b3 | import warnings
from django.apps import apps
from django.core import serializers
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import parse_apps_and_model_labels
from django.db import DEFAULT_DB_ALIAS, router
class ProxyModelWarning(Warning):
pass
class Command(BaseCommand):
help = (
"Output the contents of the database as a fixture of the given format "
"(using each model's default manager unless --all is specified)."
)
def add_arguments(self, parser):
parser.add_argument(
'args', metavar='app_label[.ModelName]', nargs='*',
help='Restricts dumped data to the specified app_label or app_label.ModelName.',
)
parser.add_argument(
'--format', default='json',
help='Specifies the output serialization format for fixtures.',
)
parser.add_argument(
'--indent', type=int,
help='Specifies the indent level to use when pretty-printing output.',
)
parser.add_argument(
'--database',
default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to dump fixtures from. '
'Defaults to the "default" database.',
)
parser.add_argument(
'-e', '--exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude '
'(use multiple --exclude to exclude multiple apps/models).',
)
parser.add_argument(
'--natural-foreign', action='store_true', dest='use_natural_foreign_keys',
help='Use natural foreign keys if they are available.',
)
parser.add_argument(
'--natural-primary', action='store_true', dest='use_natural_primary_keys',
help='Use natural primary keys if they are available.',
)
parser.add_argument(
'-a', '--all', action='store_true', dest='use_base_manager',
help="Use Django's base manager to dump all models stored in the database, "
"including those that would otherwise be filtered or modified by a custom manager.",
)
parser.add_argument(
'--pks', dest='primary_keys',
help="Only dump objects with given primary keys. Accepts a comma-separated "
"list of keys. This option only works when you specify one model.",
)
parser.add_argument(
'-o', '--output',
help='Specifies file to which the output is written.'
)
def handle(self, *app_labels, **options):
format = options['format']
indent = options['indent']
using = options['database']
excludes = options['exclude']
output = options['output']
show_traceback = options['traceback']
use_natural_foreign_keys = options['use_natural_foreign_keys']
use_natural_primary_keys = options['use_natural_primary_keys']
use_base_manager = options['use_base_manager']
pks = options['primary_keys']
if pks:
primary_keys = [pk.strip() for pk in pks.split(',')]
else:
primary_keys = []
excluded_models, excluded_apps = parse_apps_and_model_labels(excludes)
if not app_labels:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = dict.fromkeys(
app_config for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config not in excluded_apps
)
else:
if len(app_labels) > 1 and primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = {}
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
try:
model = app_config.get_model(model_label)
except LookupError:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
app_list_value = app_list.setdefault(app_config, [])
# We may have previously seen a "all-models" request for
# this app (no model qualifier was given). In this case
# there is no need adding specific models to the list.
if app_list_value is not None:
if model not in app_list_value:
app_list_value.append(model)
except ValueError:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
# This is just an app - no model qualifier
app_label = label
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
app_list[app_config] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
try:
serializers.get_serializer(format)
except serializers.SerializerDoesNotExist:
pass
raise CommandError("Unknown serialization format: %s" % format)
def get_objects(count_only=False):
"""
Collate the objects to be serialized. If count_only is True, just
count the number of objects to be serialized.
"""
models = serializers.sort_dependencies(app_list.items())
for model in models:
if model in excluded_models:
continue
if model._meta.proxy and model._meta.proxy_for_model not in models:
warnings.warn(
"%s is a proxy model and won't be serialized." % model._meta.label,
category=ProxyModelWarning,
)
if not model._meta.proxy and router.allow_migrate_model(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
queryset = objects.using(using).order_by(model._meta.pk.name)
if primary_keys:
queryset = queryset.filter(pk__in=primary_keys)
if count_only:
yield queryset.order_by().count()
else:
yield from queryset.iterator()
try:
self.stdout.ending = None
progress_output = None
object_count = 0
# If dumpdata is outputting to stdout, there is no way to display progress
if output and self.stdout.isatty() and options['verbosity'] > 0:
progress_output = self.stdout
object_count = sum(get_objects(count_only=True))
stream = open(output, 'w') if output else None
try:
serializers.serialize(
format, get_objects(), indent=indent,
use_natural_foreign_keys=use_natural_foreign_keys,
use_natural_primary_keys=use_natural_primary_keys,
stream=stream or self.stdout, progress_output=progress_output,
object_count=object_count,
)
finally:
if stream:
stream.close()
except Exception as e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
|
41f32e80b32547f57d03d4d0f664fa1e7326bf7f56e7e4317c4c6f4f0a66ccf3 | import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import (
WSGIServer, get_internal_wsgi_application, run,
)
from django.utils import autoreload
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
stealth_options = ('shutdown_message',)
default_addr = '127.0.0.1'
default_addr_ipv6 = '::1'
default_port = '8000'
protocol = 'http'
server_cls = WSGIServer
def add_arguments(self, parser):
parser.add_argument(
'addrport', nargs='?',
help='Optional port number, or ipaddr:port'
)
parser.add_argument(
'--ipv6', '-6', action='store_true', dest='use_ipv6',
help='Tells Django to use an IPv6 address.',
)
parser.add_argument(
'--nothreading', action='store_false', dest='use_threading',
help='Tells Django to NOT use threading.',
)
parser.add_argument(
'--noreload', action='store_false', dest='use_reloader',
help='Tells Django to NOT use the auto-reloader.',
)
def execute(self, *args, **options):
if options['no_color']:
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ["DJANGO_COLORS"] = "nocolor"
super().execute(*args, **options)
def get_handler(self, *args, **options):
"""Return the default WSGI handler for the runner."""
return get_internal_wsgi_application()
def handle(self, *args, **options):
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options['use_ipv6']
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options['addrport']:
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = self.default_addr_ipv6 if self.use_ipv6 else self.default_addr
self._raw_ipv6 = self.use_ipv6
self.run(**options)
def run(self, **options):
"""Run the server, using the autoreloader if needed."""
use_reloader = options['use_reloader']
if use_reloader:
autoreload.run_with_reloader(self.inner_run, **options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options['use_threading']
# 'shutdown_message' is a stealth option.
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks…\n\n")
self.check(display_num_errors=True)
# Need to check migrations here, so can't use the
# requires_migrations_check attribute.
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at %(protocol)s://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"protocol": self.protocol,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading, server_cls=self.server_cls)
except OSError as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = e
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
# Kept for backward compatibility
BaseRunserverCommand = Command
|
e18e6b3ad0711b3fe10dde7f276b12ecfed62728a80dcd6ff1bbcff9b5c55588 | import keyword
import re
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.models.constants import LOOKUP_SEP
class Command(BaseCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
requires_system_checks = False
stealth_options = ('table_name_filter',)
db_module = 'django.db'
def add_arguments(self, parser):
parser.add_argument(
'table', nargs='*', type=str,
help='Selects what tables or views should be introspected.',
)
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to introspect. Defaults to using the "default" database.',
)
parser.add_argument(
'--include-partitions', action='store_true', help='Also output models for partition tables.',
)
parser.add_argument(
'--include-views', action='store_true', help='Also output models for database views.',
)
def handle(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options['database']]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
def table2model(table_name):
return re.sub(r'[^a-zA-Z0-9]', '', table_name.title())
with connection.cursor() as cursor:
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# * Make sure each ForeignKey has `on_delete` set to the desired behavior."
yield (
"# * Remove `managed = False` lines if you wish to allow "
"Django to create, modify, and delete the table"
)
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield 'from %s import models' % self.db_module
known_models = []
table_info = connection.introspection.get_table_list(cursor)
# Determine types of tables and/or views to be introspected.
types = {'t'}
if options['include_partitions']:
types.add('p')
if options['include_views']:
types.add('v')
for table_name in (options['table'] or sorted(info.name for info in table_info if info.type in types)):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
try:
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
constraints = connection.introspection.get_constraints(cursor, table_name)
except NotImplementedError:
constraints = {}
primary_key_column = connection.introspection.get_primary_key_column(cursor, table_name)
unique_columns = [
c['columns'][0] for c in constraints.values()
if c['unique'] and len(c['columns']) == 1
]
table_description = connection.introspection.get_table_description(cursor, table_name)
except Exception as e:
yield "# Unable to inspect table '%s'" % table_name
yield "# The error was: %s" % e
continue
yield ''
yield ''
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
used_column_names = [] # Holds column names used in the table so far
column_to_field_name = {} # Maps column names to names of model fields
for row in table_description:
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
column_name = row.name
is_relation = column_name in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
column_to_field_name[column_name] = att_name
# Add primary_key and unique, if necessary.
if column_name == primary_key_column:
extra_params['primary_key'] = True
elif column_name in unique_columns:
extra_params['unique'] = True
if is_relation:
rel_to = (
"self" if relations[column_name][1] == table_name
else table2model(relations[column_name][1])
)
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional parameters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and extra_params == {'primary_key': True}:
if field_type == 'AutoField(':
continue
elif field_type == 'IntegerField(' and not connection.features.can_introspect_autofield:
comment_notes.append('AutoField?')
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row.null_ok: # If it's NULL...
extra_params['blank'] = True
extra_params['null'] = True
field_desc = '%s = %s%s' % (
att_name,
# Custom fields will have a dotted path
'' if '.' in field_type else 'models.',
field_type,
)
if field_type.startswith('ForeignKey('):
field_desc += ', models.DO_NOTHING'
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join('%s=%r' % (k, v) for k, v in extra_params.items())
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
is_view = any(info.name == table_name and info.type == 'v' for info in table_info)
is_partition = any(info.name == table_name and info.type == 'p' for info in table_info)
for meta_line in self.get_meta(table_name, constraints, column_to_field_name, is_view, is_partition):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find(LOOKUP_SEP) >= 0:
while new_name.find(LOOKUP_SEP) >= 0:
new_name = new_name.replace(LOOKUP_SEP, '_')
if col_name.lower().find(LOOKUP_SEP) >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row.type_code, row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# Add max_length for all CharFields.
if field_type == 'CharField' and row.internal_size:
field_params['max_length'] = int(row.internal_size)
if field_type == 'DecimalField':
if row.precision is None or row.scale is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row.precision if row.precision is not None else 10
field_params['decimal_places'] = row.scale if row.scale is not None else 5
else:
field_params['max_digits'] = row.precision
field_params['decimal_places'] = row.scale
return field_type, field_params, field_notes
def get_meta(self, table_name, constraints, column_to_field_name, is_view, is_partition):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
unique_together = []
has_unsupported_constraint = False
for params in constraints.values():
if params['unique']:
columns = params['columns']
if None in columns:
has_unsupported_constraint = True
columns = [x for x in columns if x is not None]
if len(columns) > 1:
unique_together.append(str(tuple(column_to_field_name[c] for c in columns)))
if is_view:
managed_comment = " # Created from a view. Don't remove."
elif is_partition:
managed_comment = " # Created from a partition. Don't remove."
else:
managed_comment = ''
meta = ['']
if has_unsupported_constraint:
meta.append(' # A unique constraint could not be introspected.')
meta += [
' class Meta:',
' managed = False%s' % managed_comment,
' db_table = %r' % table_name
]
if unique_together:
tup = '(' + ', '.join(unique_together) + ',)'
meta += [" unique_together = %s" % tup]
return meta
|
406b09b64360ed37eb73a2855e20d32a5d9169a828ff8ade813dc27268deb8fb | "File-based cache backend"
import glob
import hashlib
import os
import pickle
import random
import tempfile
import time
import zlib
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.core.files import locks
from django.core.files.move import file_move_safe
class FileBasedCache(BaseCache):
cache_suffix = '.djcache'
pickle_protocol = pickle.HIGHEST_PROTOCOL
def __init__(self, dir, params):
super().__init__(params)
self._dir = os.path.abspath(dir)
self._createdir()
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
if self.has_key(key, version):
return False
self.set(key, value, timeout, version)
return True
def get(self, key, default=None, version=None):
fname = self._key_to_file(key, version)
try:
with open(fname, 'rb') as f:
if not self._is_expired(f):
return pickle.loads(zlib.decompress(f.read()))
except FileNotFoundError:
pass
return default
def _write_content(self, file, timeout, value):
expiry = self.get_backend_timeout(timeout)
file.write(pickle.dumps(expiry, self.pickle_protocol))
file.write(zlib.compress(pickle.dumps(value, self.pickle_protocol)))
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
self._createdir() # Cache dir can be deleted at any time.
fname = self._key_to_file(key, version)
self._cull() # make some room if necessary
fd, tmp_path = tempfile.mkstemp(dir=self._dir)
renamed = False
try:
with open(fd, 'wb') as f:
self._write_content(f, timeout, value)
file_move_safe(tmp_path, fname, allow_overwrite=True)
renamed = True
finally:
if not renamed:
os.remove(tmp_path)
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
try:
with open(self._key_to_file(key, version), 'r+b') as f:
try:
locks.lock(f, locks.LOCK_EX)
if self._is_expired(f):
return False
else:
previous_value = pickle.loads(zlib.decompress(f.read()))
f.seek(0)
self._write_content(f, timeout, previous_value)
return True
finally:
locks.unlock(f)
except FileNotFoundError:
return False
def delete(self, key, version=None):
self._delete(self._key_to_file(key, version))
def _delete(self, fname):
if not fname.startswith(self._dir) or not os.path.exists(fname):
return
try:
os.remove(fname)
except FileNotFoundError:
# The file may have been removed by another process.
pass
def has_key(self, key, version=None):
fname = self._key_to_file(key, version)
if os.path.exists(fname):
with open(fname, 'rb') as f:
return not self._is_expired(f)
return False
def _cull(self):
"""
Remove random cache entries if max_entries is reached at a ratio
of num_entries / cull_frequency. A value of 0 for CULL_FREQUENCY means
that the entire cache will be purged.
"""
filelist = self._list_cache_files()
num_entries = len(filelist)
if num_entries < self._max_entries:
return # return early if no culling is required
if self._cull_frequency == 0:
return self.clear() # Clear the cache when CULL_FREQUENCY = 0
# Delete a random selection of entries
filelist = random.sample(filelist,
int(num_entries / self._cull_frequency))
for fname in filelist:
self._delete(fname)
def _createdir(self):
os.makedirs(self._dir, 0o700, exist_ok=True)
def _key_to_file(self, key, version=None):
"""
Convert a key into a cache file path. Basically this is the
root cache path joined with the md5sum of the key and a suffix.
"""
key = self.make_key(key, version=version)
self.validate_key(key)
return os.path.join(self._dir, ''.join(
[hashlib.md5(key.encode()).hexdigest(), self.cache_suffix]))
def clear(self):
"""
Remove all the cache files.
"""
if not os.path.exists(self._dir):
return
for fname in self._list_cache_files():
self._delete(fname)
def _is_expired(self, f):
"""
Take an open cache file `f` and delete it if it's expired.
"""
try:
exp = pickle.load(f)
except EOFError:
exp = 0 # An empty file is considered expired.
if exp is not None and exp < time.time():
f.close() # On Windows a file has to be closed before deleting
self._delete(f.name)
return True
return False
def _list_cache_files(self):
"""
Get a list of paths to all the cache files. These are all the files
in the root cache dir that end on the cache_suffix.
"""
if not os.path.exists(self._dir):
return []
filelist = [os.path.join(self._dir, fname) for fname
in glob.glob1(self._dir, '*%s' % self.cache_suffix)]
return filelist
|
fe05d24893246bee50ed673edbae1c545d6bd7868cd3bda2da84afd9c688a909 | from django.contrib import auth
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from .validators import UnicodeUsernameValidator
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
class PermissionManager(models.Manager):
use_in_migrations = True
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.db_manager(self.db).get_by_natural_key(app_label, model),
)
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
- The "view" permission limits the ability to view an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
The permissions listed above are automatically created for each model.
"""
name = models.CharField(_('name'), max_length=255)
content_type = models.ForeignKey(
ContentType,
models.CASCADE,
verbose_name=_('content type'),
)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return '%s | %s' % (self.content_type, self.name)
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
use_in_migrations = True
def get_by_natural_key(self, name):
return self.get(name=name)
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group 'Site editors' has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=150, unique=True)
permissions = models.ManyToManyField(
Permission,
verbose_name=_('permissions'),
blank=True,
)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, email, password, **extra_fields):
"""
Create and save a user with the given username, email, and password.
"""
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
username = self.model.normalize_username(username)
user = self.model(username=username, email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, email, password, **extra_fields)
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_perm'):
continue
try:
if backend.has_perm(user, perm, obj):
return True
except PermissionDenied:
return False
return False
def _user_has_module_perms(user, app_label):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_module_perms'):
continue
try:
if backend.has_module_perms(user, app_label):
return True
except PermissionDenied:
return False
return False
class PermissionsMixin(models.Model):
"""
Add the fields and methods necessary to support the Group and Permission
models using the ModelBackend.
"""
is_superuser = models.BooleanField(
_('superuser status'),
default=False,
help_text=_(
'Designates that this user has all permissions without '
'explicitly assigning them.'
),
)
groups = models.ManyToManyField(
Group,
verbose_name=_('groups'),
blank=True,
help_text=_(
'The groups this user belongs to. A user will get all permissions '
'granted to each of their groups.'
),
related_name="user_set",
related_query_name="user",
)
user_permissions = models.ManyToManyField(
Permission,
verbose_name=_('user permissions'),
blank=True,
help_text=_('Specific permissions for this user.'),
related_name="user_set",
related_query_name="user",
)
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Return a list of permission strings that this user has through their
groups. Query all available auth backends. If an object is passed in,
return only permissions matching this object.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Return True if the user has the specified permission. Query all
available auth backends, but return immediately if any backend returns
True. Thus, a user who has permission from a single auth backend is
assumed to have permission in general. If an object is provided, check
permissions for that object.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Return True if the user has each of the specified permissions. If
object is passed, check if the user has all required perms for it.
"""
return all(self.has_perm(perm, obj) for perm in perm_list)
def has_module_perms(self, app_label):
"""
Return True if the user has any permissions in the given app label.
Use similar logic as has_perm(), above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username and password are required. Other fields are optional.
"""
username_validator = UnicodeUsernameValidator()
username = models.CharField(
_('username'),
max_length=150,
unique=True,
help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),
validators=[username_validator],
error_messages={
'unique': _("A user with that username already exists."),
},
)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=150, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def clean(self):
super().clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""Return the short name for the user."""
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""Send an email to this user."""
send_mail(subject, message, from_email, [self.email], **kwargs)
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username and password are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
class AnonymousUser:
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __hash__(self):
return 1 # instances always return the same hash value
def __int__(self):
raise TypeError('Cannot cast AnonymousUser to int. Are you trying to use it in place of User?')
def save(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def delete(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def set_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def check_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
@property
def groups(self):
return self._groups
@property
def user_permissions(self):
return self._user_permissions
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
return all(self.has_perm(perm, obj) for perm in perm_list)
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
@property
def is_anonymous(self):
return True
@property
def is_authenticated(self):
return False
def get_username(self):
return self.username
|
1a42776dd076921baeda70a8152683f40594db44d38f90d1dfb49859c86312a6 | import functools
import gzip
import re
from difflib import SequenceMatcher
from pathlib import Path
from django.conf import settings
from django.core.exceptions import (
FieldDoesNotExist, ImproperlyConfigured, ValidationError,
)
from django.utils.functional import lazy
from django.utils.html import format_html, format_html_join
from django.utils.module_loading import import_string
from django.utils.translation import gettext as _, ngettext
@functools.lru_cache(maxsize=None)
def get_default_password_validators():
return get_password_validators(settings.AUTH_PASSWORD_VALIDATORS)
def get_password_validators(validator_config):
validators = []
for validator in validator_config:
try:
klass = import_string(validator['NAME'])
except ImportError:
msg = "The module in NAME could not be imported: %s. Check your AUTH_PASSWORD_VALIDATORS setting."
raise ImproperlyConfigured(msg % validator['NAME'])
validators.append(klass(**validator.get('OPTIONS', {})))
return validators
def validate_password(password, user=None, password_validators=None):
"""
Validate whether the password meets all validator requirements.
If the password is valid, return ``None``.
If the password is invalid, raise ValidationError with all error messages.
"""
errors = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
try:
validator.validate(password, user)
except ValidationError as error:
errors.append(error)
if errors:
raise ValidationError(errors)
def password_changed(password, user=None, password_validators=None):
"""
Inform all validators that have implemented a password_changed() method
that the password has been changed.
"""
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
password_changed = getattr(validator, 'password_changed', lambda *a: None)
password_changed(password, user)
def password_validators_help_texts(password_validators=None):
"""
Return a list of all help texts of all configured validators.
"""
help_texts = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
help_texts.append(validator.get_help_text())
return help_texts
def _password_validators_help_text_html(password_validators=None):
"""
Return an HTML string with all help texts of all configured validators
in an <ul>.
"""
help_texts = password_validators_help_texts(password_validators)
help_items = format_html_join('', '<li>{}</li>', ((help_text,) for help_text in help_texts))
return format_html('<ul>{}</ul>', help_items) if help_items else ''
password_validators_help_text_html = lazy(_password_validators_help_text_html, str)
class MinimumLengthValidator:
"""
Validate whether the password is of a minimum length.
"""
def __init__(self, min_length=8):
self.min_length = min_length
def validate(self, password, user=None):
if len(password) < self.min_length:
raise ValidationError(
ngettext(
"This password is too short. It must contain at least %(min_length)d character.",
"This password is too short. It must contain at least %(min_length)d characters.",
self.min_length
),
code='password_too_short',
params={'min_length': self.min_length},
)
def get_help_text(self):
return ngettext(
"Your password must contain at least %(min_length)d character.",
"Your password must contain at least %(min_length)d characters.",
self.min_length
) % {'min_length': self.min_length}
class UserAttributeSimilarityValidator:
"""
Validate whether the password is sufficiently different from the user's
attributes.
If no specific attributes are provided, look at a sensible list of
defaults. Attributes that don't exist are ignored. Comparison is made to
not only the full attribute value, but also its components, so that, for
example, a password is validated against either part of an email address,
as well as the full address.
"""
DEFAULT_USER_ATTRIBUTES = ('username', 'first_name', 'last_name', 'email')
def __init__(self, user_attributes=DEFAULT_USER_ATTRIBUTES, max_similarity=0.7):
self.user_attributes = user_attributes
self.max_similarity = max_similarity
def validate(self, password, user=None):
if not user:
return
for attribute_name in self.user_attributes:
value = getattr(user, attribute_name, None)
if not value or not isinstance(value, str):
continue
value_parts = re.split(r'\W+', value) + [value]
for value_part in value_parts:
if SequenceMatcher(a=password.lower(), b=value_part.lower()).quick_ratio() >= self.max_similarity:
try:
verbose_name = str(user._meta.get_field(attribute_name).verbose_name)
except FieldDoesNotExist:
verbose_name = attribute_name
raise ValidationError(
_("The password is too similar to the %(verbose_name)s."),
code='password_too_similar',
params={'verbose_name': verbose_name},
)
def get_help_text(self):
return _("Your password can't be too similar to your other personal information.")
class CommonPasswordValidator:
"""
Validate whether the password is a common password.
The password is rejected if it occurs in a provided list of passwords,
which may be gzipped. The list Django ships with contains 20000 common
passwords (lowercased and deduplicated), created by Royce Williams:
https://gist.github.com/roycewilliams/281ce539915a947a23db17137d91aeb7
The password list must be lowercased to match the comparison in validate().
"""
DEFAULT_PASSWORD_LIST_PATH = Path(__file__).resolve().parent / 'common-passwords.txt.gz'
def __init__(self, password_list_path=DEFAULT_PASSWORD_LIST_PATH):
try:
with gzip.open(str(password_list_path)) as f:
common_passwords_lines = f.read().decode().splitlines()
except OSError:
with open(str(password_list_path)) as f:
common_passwords_lines = f.readlines()
self.passwords = {p.strip() for p in common_passwords_lines}
def validate(self, password, user=None):
if password.lower().strip() in self.passwords:
raise ValidationError(
_("This password is too common."),
code='password_too_common',
)
def get_help_text(self):
return _("Your password can't be a commonly used password.")
class NumericPasswordValidator:
"""
Validate whether the password is alphanumeric.
"""
def validate(self, password, user=None):
if password.isdigit():
raise ValidationError(
_("This password is entirely numeric."),
code='password_entirely_numeric',
)
def get_help_text(self):
return _("Your password can't be entirely numeric.")
|
53cfc9cc6258a64544139e2b844340a02c169f9414a0f03b5d4aaf8782b6ee8e | import copy
import json
import operator
import re
from functools import partial, reduce, update_wrapper
from urllib.parse import quote as urlquote
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks,
)
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects, construct_change_message, flatten_fieldsets,
get_deleted_objects, lookup_needs_distinct, model_format_dict,
model_ngettext, quote, unquote,
)
from django.contrib.admin.views.autocomplete import AutocompleteJsonView
from django.contrib.admin.widgets import (
AutocompleteSelect, AutocompleteSelectMultiple,
)
from django.contrib.auth import get_permission_codename
from django.core.exceptions import (
FieldDoesNotExist, FieldError, PermissionDenied, ValidationError,
)
from django.core.paginator import Paginator
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import BLANK_CHOICE_DASH
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet, inlineformset_factory, modelform_defines_fields,
modelform_factory, modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.html import format_html
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, format_lazy, get_text_list
from django.utils.translation import gettext as _, ngettext
from django.views.decorators.csrf import csrf_protect
from django.views.generic import RedirectView
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return 'radiolist' if radio_style == VERTICAL else 'radiolist inline'
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.EmailField: {'widget': widgets.AdminEmailInputWidget},
models.UUIDField: {'widget': widgets.AdminUUIDInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(metaclass=forms.MediaDefiningClass):
"""Functionality common to both ModelAdmin and InlineAdmin."""
autocomplete_fields = ()
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
sortable_by = None
view_on_site = True
show_full_result_count = True
checks_class = BaseModelAdminChecks
def check(self, **kwargs):
return self.checks_class().check(self, **kwargs)
def __init__(self):
# Merge FORMFIELD_FOR_DBFIELD_DEFAULTS with the formfield_overrides
# rather than simply overwriting.
overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS)
for k, v in self.formfield_overrides.items():
overrides.setdefault(k, {}).update(v)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, request, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = {**self.formfield_overrides[db_field.__class__], **kwargs}
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.remote_field.model)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(request),
can_delete_related=related_modeladmin.has_delete_permission(request),
can_view_related=related_modeladmin.has_view_permission(request),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.remote_field, self.admin_site, **wrapper_kwargs
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = {**copy.deepcopy(self.formfield_overrides[klass]), **kwargs}
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(return None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.remote_field.model)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.remote_field.model._default_manager.using(db).order_by(*ordering)
return None
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if 'widget' not in kwargs:
if db_field.name in self.get_autocomplete_fields(request):
kwargs['widget'] = AutocompleteSelect(db_field.remote_field, self.admin_site, using=db)
elif db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.remote_field, self.admin_site, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = _('None') if db_field.blank else None
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get('using')
autocomplete_fields = self.get_autocomplete_fields(request)
if db_field.name in autocomplete_fields:
kwargs['widget'] = AutocompleteSelectMultiple(db_field.remote_field, self.admin_site, using=db)
elif db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.remote_field, self.admin_site, using=db)
elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]:
kwargs['widget'] = widgets.FilteredSelectMultiple(
db_field.verbose_name,
db_field.name in self.filter_vertical
)
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
form_field = db_field.formfield(**kwargs)
if (isinstance(form_field.widget, SelectMultiple) and
not isinstance(form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple))):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
help_text = form_field.help_text
form_field.help_text = format_lazy('{} {}', help_text, msg) if help_text else msg
return form_field
def get_autocomplete_fields(self, request):
"""
Return a list of ForeignKey and/or ManyToMany fields which should use
an autocomplete widget.
"""
return self.autocomplete_fields
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': get_content_type_for_model(obj).pk,
'object_id': obj.pk
})
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin or AdminSite.
"""
try:
return mark_safe(self.empty_value_display)
except AttributeError:
return mark_safe(self.admin_site.empty_value_display)
def get_exclude(self, request, obj=None):
"""
Hook for specifying exclude.
"""
return self.exclude
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
if self.fields:
return self.fields
# _get_form_for_get_fields() is implemented in subclasses.
form = self._get_form_for_get_fields(request, obj)
return [*form.base_fields, *self.get_readonly_fields(request, obj)]
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
if self.fieldsets:
return self.fieldsets
return [(None, {'fields': self.get_fields(request, obj)})]
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Return a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def get_sortable_by(self, request):
"""Hook for specifying which fields can be sorted in the changelist."""
return self.sortable_by if self.sortable_by is not None else self.get_list_display(request)
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for fk_lookup in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(fk_lookup):
fk_lookup = fk_lookup()
if (lookup, value) in widgets.url_params_from_lookup_dict(fk_lookup).items():
return True
relation_parts = []
prev_field = None
for part in lookup.split(LOOKUP_SEP):
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on nonexistent fields are ok, since they're ignored
# later.
break
# It is allowed to filter on values that would be found from local
# model anyways. For example, if you filter on employee__department__id,
# then the id value would be found already from employee__department_id.
if not prev_field or (prev_field.is_relation and
field not in prev_field.get_path_info()[-1].target_fields):
relation_parts.append(part)
if not getattr(field, 'get_path_info', None):
# This is not a relational field, so further parts
# must be transforms.
break
prev_field = field
model = field.get_path_info()[-1].to_opts.model
if len(relation_parts) <= 1:
# Either a local field filter, or no fields at all.
return True
valid_lookups = {self.date_hierarchy}
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter):
valid_lookups.add(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.add(filter_item[0])
else:
valid_lookups.add(filter_item)
# Is it a valid relational lookup?
return not {
LOOKUP_SEP.join(relation_parts),
LOOKUP_SEP.join(relation_parts + [part])
}.isdisjoint(valid_lookups)
def to_field_allowed(self, request, to_field):
"""
Return True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f for f in opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
remote_field = related_object.field.remote_field
if (any(issubclass(model, related_model) for model in registered_models) and
hasattr(remote_field, 'get_related_field') and
remote_field.get_related_field() == field):
return True
return False
def has_add_permission(self, request):
"""
Return True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Return True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Return True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_view_permission(self, request, obj=None):
"""
Return True if the given request has permission to view the given
Django model instance. The default implementation doesn't examine the
`obj` parameter.
If overridden by the user in subclasses, it should return True if the
given request has permission to view the `obj` model instance. If `obj`
is None, it should return True if the request has permission to view
any object of the given type.
"""
opts = self.opts
codename_view = get_permission_codename('view', opts)
codename_change = get_permission_codename('change', opts)
return (
request.user.has_perm('%s.%s' % (opts.app_label, codename_view)) or
request.user.has_perm('%s.%s' % (opts.app_label, codename_change))
)
def has_view_or_change_permission(self, request, obj=None):
return self.has_view_permission(request, obj) or self.has_change_permission(request, obj)
def has_module_permission(self, request):
"""
Return True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
class ModelAdmin(BaseModelAdmin):
"""Encapsulate all admin options and functionality for a given model."""
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_as_continue = True
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
popup_response_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super().__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_view_or_change_permission(request, obj) or
inline.has_add_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request, obj):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.urls import path
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
path('', wrap(self.changelist_view), name='%s_%s_changelist' % info),
path('add/', wrap(self.add_view), name='%s_%s_add' % info),
path('autocomplete/', wrap(self.autocomplete_view), name='%s_%s_autocomplete' % info),
path('<path:object_id>/history/', wrap(self.history_view), name='%s_%s_history' % info),
path('<path:object_id>/delete/', wrap(self.delete_view), name='%s_%s_delete' % info),
path('<path:object_id>/change/', wrap(self.change_view), name='%s_%s_change' % info),
# For backwards compatibility (was the change url before 1.9)
path('<path:object_id>/', wrap(RedirectView.as_view(
pattern_name='%s:%s_%s_change' % ((self.admin_site.name,) + info)
))),
]
return urlpatterns
@property
def urls(self):
return self.get_urls()
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'core.js',
'admin/RelatedObjectLookups.js',
'actions%s.js' % extra,
'urlify.js',
'prepopulate%s.js' % extra,
'vendor/xregexp/xregexp%s.js' % extra,
]
return forms.Media(js=['admin/js/%s' % url for url in js])
def get_model_perms(self, request):
"""
Return a dict of all perms for this model. This dict has the keys
``add``, ``change``, ``delete``, and ``view`` mapping to the True/False
for each of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
'view': self.has_view_permission(request),
}
def _get_form_for_get_fields(self, request, obj):
return self.get_form(request, obj, fields=None)
def get_form(self, request, obj=None, change=False, **kwargs):
"""
Return a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
readonly_fields = self.get_readonly_fields(request, obj)
exclude.extend(readonly_fields)
# Exclude all fields if it's a change form and the user doesn't have
# the change permission.
if change and hasattr(request, 'user') and not self.has_change_permission(request, obj):
exclude.extend(fields)
if excluded is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
# Remove declared form fields which are in readonly_fields.
new_attrs = dict.fromkeys(f for f in readonly_fields if f in self.form.declared_fields)
form = type(self.form.__name__, (self.form,), new_attrs)
defaults = {
'form': form,
'fields': fields,
'exclude': exclude,
'formfield_callback': partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError(
'%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__)
)
def get_changelist(self, request, **kwargs):
"""
Return the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_changelist_instance(self, request):
"""
Return a `ChangeList` instance based on `request`. May raise
`IncorrectLookupParameters`.
"""
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
# Add the action checkboxes if any actions are available.
if self.get_actions(request):
list_display = ['action_checkbox', *list_display]
sortable_by = self.get_sortable_by(request)
ChangeList = self.get_changelist(request)
return ChangeList(
request,
self.model,
list_display,
list_display_links,
self.get_list_filter(request),
self.date_hierarchy,
self.get_search_fields(request),
self.get_list_select_related(request),
self.list_per_page,
self.list_max_show_all,
self.list_editable,
self,
sortable_by,
)
def get_object(self, request, object_id, from_field=None):
"""
Return an instance matching the field and value provided, the primary
key is used if no field is provided. Return ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = model._meta.pk if from_field is None else model._meta.get_field(from_field)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Return a Form class for use in the Formset on the changelist page.
"""
defaults = {
'formfield_callback': partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
if defaults.get('fields') is None and not modelform_defines_fields(defaults.get('form')):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Return a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
'formfield_callback': partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
return modelformset_factory(
self.model, self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults
)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yield formsets and the corresponding inlines.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=str(object),
action_flag=ADDITION,
change_message=message,
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=str(object),
action_flag=CHANGE,
change_message=message,
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=object_repr,
action_flag=DELETION,
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, str(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle">')
def _get_base_actions(self):
"""Return the list of actions, prior to any request-based filtering."""
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Add actions from this ModelAdmin.
actions.extend(self.get_action(action) for action in self.actions or [])
# get_action might have returned None, so filter any of those out.
return filter(None, actions)
def _filter_actions_by_permissions(self, request, actions):
"""Filter out any actions that the user doesn't have access to."""
filtered_actions = []
for action in actions:
callable = action[0]
if not hasattr(callable, 'allowed_permissions'):
filtered_actions.append(action)
continue
permission_checks = (
getattr(self, 'has_%s_permission' % permission)
for permission in callable.allowed_permissions
)
if any(has_permission(request) for has_permission in permission_checks):
filtered_actions.append(action)
return filtered_actions
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is set to None that means actions are disabled on
# this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return {}
actions = self._filter_actions_by_permissions(request, self._get_base_actions())
return {name: (func, name, desc) for func, name, desc in actions}
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in self.get_actions(request).values():
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or self.list_display_links is None or not list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Return a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_list_select_related(self, request):
"""
Return a list of fields to add to the select_related() part of the
changelist items query.
"""
return self.list_select_related
def get_search_fields(self, request):
"""
Return a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Return a tuple containing a queryset to implement the search
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
# Use field_name if it includes a lookup.
opts = queryset.model._meta
lookup_fields = field_name.split(LOOKUP_SEP)
# Go through the fields, following all relations.
prev_field = None
for path_part in lookup_fields:
if path_part == 'pk':
path_part = opts.pk.name
try:
field = opts.get_field(path_part)
except FieldDoesNotExist:
# Use valid query lookups.
if prev_field and prev_field.get_lookup(path_part):
return field_name
else:
prev_field = field
if hasattr(field, 'get_path_info'):
# Update opts to follow the relation.
opts = field.get_path_info()[-1].to_opts
# Otherwise, use the field with icontains.
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
use_distinct |= any(lookup_needs_distinct(self.opts, search_spec) for search_spec in orm_lookups)
return queryset, use_distinct
def get_preserved_filters(self, request):
"""
Return the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def construct_change_message(self, request, form, formsets, add=False):
"""
Construct a JSON structure describing changes from a changed object.
"""
return construct_change_message(form, formsets, add)
def message_user(self, request, message, level=messages.INFO, extra_tags='',
fail_silently=False):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ', '.join('`%s`' % l for l in levels)
raise ValueError(
'Bad message level string: `%s`. Possible values are: %s'
% (level, levels_repr)
)
messages.add_message(request, level, message, extra_tags=extra_tags, fail_silently=fail_silently)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def delete_queryset(self, request, queryset):
"""Given a queryset, delete it from the database."""
queryset.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url)
view_on_site_url = self.get_view_on_site_url(obj)
has_editable_inline_admin_formsets = False
for inline in context['inline_admin_formsets']:
if inline.has_add_permission or inline.has_change_permission or inline.has_delete_permission:
has_editable_inline_admin_formsets = True
break
context.update({
'add': add,
'change': change,
'has_view_permission': self.has_view_permission(request, obj),
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_editable_inline_admin_formsets': has_editable_inline_admin_formsets,
'has_file_field': context['adminform'].form.is_multipart() or any(
admin_formset.formset.form().is_multipart()
for admin_formset in context['inline_admin_formsets']
),
'has_absolute_url': view_on_site_url is not None,
'absolute_url': view_on_site_url,
'form_url': form_url,
'opts': opts,
'content_type_id': get_content_type_for_model(self.model).pk,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'to_field_var': TO_FIELD_VAR,
'is_popup_var': IS_POPUP_VAR,
'app_label': app_label,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(request, form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determine the HttpResponse for the add_view stage.
"""
opts = obj._meta
preserved_filters = self.get_preserved_filters(request)
obj_url = reverse(
'admin:%s_%s_change' % (opts.app_label, opts.model_name),
args=(quote(obj.pk),),
current_app=self.admin_site.name,
)
# Add a link to the object's change form if the user can edit the obj.
if self.has_change_permission(request, obj):
obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj)
else:
obj_repr = str(obj)
msg_dict = {
'name': opts.verbose_name,
'obj': obj_repr,
}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
popup_response_data = json.dumps({
'value': str(value),
'obj': str(obj),
})
return TemplateResponse(request, self.popup_response_template or [
'admin/%s/%s/popup_response.html' % (opts.app_label, opts.model_name),
'admin/%s/popup_response.html' % opts.app_label,
'admin/popup_response.html',
], {
'popup_response_data': popup_response_data,
})
elif "_continue" in request.POST or (
# Redirecting after "Save as new".
"_saveasnew" in request.POST and self.save_as_continue and
self.has_change_permission(request, obj)
):
msg = _('The {name} "{obj}" was added successfully.')
if self.has_change_permission(request, obj):
msg += ' ' + _('You may edit it again below.')
self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS)
if post_url_continue is None:
post_url_continue = obj_url
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = format_html(
_('The {name} "{obj}" was added successfully. You may add another {name} below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_('The {name} "{obj}" was added successfully.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determine the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
opts = obj._meta
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else opts.pk.attname
value = request.resolver_match.kwargs['object_id']
new_value = obj.serializable_value(attr)
popup_response_data = json.dumps({
'action': 'change',
'value': str(value),
'obj': str(obj),
'new_value': str(new_value),
})
return TemplateResponse(request, self.popup_response_template or [
'admin/%s/%s/popup_response.html' % (opts.app_label, opts.model_name),
'admin/%s/popup_response.html' % opts.app_label,
'admin/popup_response.html',
], {
'popup_response_data': popup_response_data,
})
opts = self.model._meta
preserved_filters = self.get_preserved_filters(request)
msg_dict = {
'name': opts.verbose_name,
'obj': format_html('<a href="{}">{}</a>', urlquote(request.path), obj),
}
if "_continue" in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully. You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = format_html(
_('The {name} "{obj}" was added successfully. You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(obj.pk,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully. You may add another {name} below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_('The {name} "{obj}" was changed successfully.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def _response_post_save(self, request, obj):
opts = self.model._meta
if self.has_view_or_change_permission(request):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
return self._response_post_save(request, obj)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
return self._response_post_save(request, obj)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determine the HttpResponse for the delete_view stage.
"""
opts = self.model._meta
if IS_POPUP_VAR in request.POST:
popup_response_data = json.dumps({
'action': 'delete',
'value': str(obj_id),
})
return TemplateResponse(request, self.popup_response_template or [
'admin/%s/%s/popup_response.html' % (opts.app_label, opts.model_name),
'admin/%s/popup_response.html' % opts.app_label,
'admin/popup_response.html',
], {
'popup_response_data': popup_response_data,
})
self.message_user(
request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': opts.verbose_name,
'obj': obj_display,
},
messages.SUCCESS,
)
if self.has_change_permission(request, None):
post_url = reverse(
'admin:%s_%s_changelist' % (opts.app_label, opts.model_name),
current_app=self.admin_site.name,
)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
else:
post_url = reverse('admin:index', current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
media=self.media,
)
return TemplateResponse(
request,
self.delete_confirmation_template or [
"admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html",
],
context,
)
def get_inline_formsets(self, request, formsets, inline_instances, obj=None):
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
has_add_permission = inline.has_add_permission(request, obj)
has_change_permission = inline.has_change_permission(request, obj)
has_delete_permission = inline.has_delete_permission(request, obj)
has_view_permission = inline.has_view_permission(request, obj)
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(
inline, formset, fieldsets, prepopulated, readonly, model_admin=self,
has_add_permission=has_add_permission, has_change_permission=has_change_permission,
has_delete_permission=has_delete_permission, has_view_permission=has_view_permission,
)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data from the request's GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
def _get_obj_does_not_exist_redirect(self, request, opts, object_id):
"""
Create a message informing the user that the object doesn't exist
and return a redirect to the admin index page.
"""
msg = _("""%(name)s with ID "%(key)s" doesn't exist. Perhaps it was deleted?""") % {
'name': opts.verbose_name,
'key': unquote(object_id),
}
self.message_user(request, msg, messages.WARNING)
url = reverse('admin:index', current_app=self.admin_site.name)
return HttpResponseRedirect(url)
@csrf_protect_m
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._changeform_view(request, object_id, form_url, extra_context)
def _changeform_view(self, request, object_id, form_url, extra_context):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
if request.method == 'POST' and '_saveasnew' in request.POST:
object_id = None
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_view_or_change_permission(request, obj):
raise PermissionDenied
if obj is None:
return self._get_obj_does_not_exist_redirect(request, opts, object_id)
ModelForm = self.get_form(request, obj, change=not add)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
form_validated = form.is_valid()
if form_validated:
new_object = self.save_form(request, form, change=not add)
else:
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(request, form, formsets, add)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, form.instance, change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
if not add and not self.has_change_permission(request, obj):
readonly_fields = flatten_fieldsets(self.get_fieldsets(request, obj))
else:
readonly_fields = self.get_readonly_fields(request, obj)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
# Clear prepopulated fields on a view-only form to avoid a crash.
self.get_prepopulated_fields(request, obj) if add or self.has_change_permission(request, obj) else {},
readonly_fields,
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
if add:
title = _('Add %s')
elif self.has_change_permission(request, obj):
title = _('Change %s')
else:
title = _('View %s')
context = {
**self.admin_site.each_context(request),
'title': title % opts.verbose_name,
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET,
'to_field': to_field,
'media': media,
'inline_admin_formsets': inline_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'preserved_filters': self.get_preserved_filters(request),
}
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if request.method == 'POST' and not form_validated and "_saveasnew" in request.POST:
context['show_save'] = False
context['show_save_and_continue'] = False
# Use the change template instead of the add template.
add = False
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
def autocomplete_view(self, request):
return AutocompleteJsonView.as_view(model_admin=self)(request)
def add_view(self, request, form_url='', extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
def _get_edited_object_pks(self, request, prefix):
"""Return POST data values of list_editable primary keys."""
pk_pattern = re.compile(r'{}-\d+-{}$'.format(prefix, self.model._meta.pk.name))
return [value for key, value in request.POST.items() if pk_pattern.match(key)]
def _get_list_editable_queryset(self, request, prefix):
"""
Based on POST data, return a queryset of the objects that were edited
via list_editable.
"""
object_pks = self._get_edited_object_pks(request, prefix)
queryset = self.get_queryset(request)
validate = queryset.model._meta.pk.to_python
try:
for pk in object_pks:
validate(pk)
except ValidationError:
# Disable the optimization if the POST data was tampered with.
return queryset
return queryset.filter(pk__in=object_pks)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_view_or_change_permission(request):
raise PermissionDenied
try:
cl = self.get_changelist_instance(request)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET:
return SimpleTemplateResponse('admin/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
actions = self.get_actions(request)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
if action_failed:
# Redirect back to the changelist page to avoid resubmitting the
# form if the user refreshes the browser or uses the "No, take
# me back" button on the action confirmation page.
return HttpResponseRedirect(request.get_full_path())
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if request.method == 'POST' and cl.list_editable and '_save' in request.POST:
if not self.has_change_permission(request):
raise PermissionDenied
FormSet = self.get_changelist_formset(request)
modified_objects = self._get_list_editable_queryset(request, FormSet.get_default_prefix())
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=modified_objects)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
msg = ngettext(
"%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount
) % {
'count': changecount,
'name': model_ngettext(opts, changecount),
}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable and self.has_change_permission(request):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
media += action_form.media
else:
action_form = None
selection_note_all = ngettext(
'%(total_count)s selected',
'All %(total_count)s selected',
cl.result_count
)
context = {
**self.admin_site.each_context(request),
'module_name': str(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
'title': cl.title,
'is_popup': cl.is_popup,
'to_field': cl.to_field,
'cl': cl,
'media': media,
'has_add_permission': self.has_add_permission(request),
'opts': cl.opts,
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
'preserved_filters': self.get_preserved_filters(request),
**(extra_context or {}),
}
request.current_app = self.admin_site.name
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
def get_deleted_objects(self, objs, request):
"""
Hook for customizing the delete process for the delete view and the
"delete selected" action.
"""
return get_deleted_objects(objs, request, self.admin_site)
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._delete_view(request, object_id, extra_context)
def _delete_view(self, request, object_id, extra_context):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
return self._get_obj_does_not_exist_redirect(request, opts, object_id)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
deleted_objects, model_count, perms_needed, protected = self.get_deleted_objects([obj], request)
if request.POST and not protected: # The user has confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = str(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = str(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = {
**self.admin_site.each_context(request),
'title': title,
'object_name': object_name,
'object': obj,
'deleted_objects': deleted_objects,
'model_count': dict(model_count).items(),
'perms_lacking': perms_needed,
'protected': protected,
'opts': opts,
'app_label': app_label,
'preserved_filters': self.get_preserved_filters(request),
'is_popup': IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET,
'to_field': to_field,
**(extra_context or {}),
}
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
return self._get_obj_does_not_exist_redirect(request, model._meta, object_id)
if not self.has_view_or_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = {
**self.admin_site.each_context(request),
'title': _('Change history: %s') % obj,
'action_list': action_list,
'module_name': str(capfirst(opts.verbose_name_plural)),
'object': obj,
'opts': opts,
'preserved_filters': self.get_preserved_filters(request),
**(extra_context or {}),
}
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = {
'instance': obj,
'prefix': prefix,
'queryset': inline.get_queryset(request),
}
if request.method == 'POST':
formset_params.update({
'data': request.POST.copy(),
'files': request.FILES,
'save_as_new': '_saveasnew' in request.POST
})
formset = FormSet(**formset_params)
def user_deleted_form(request, obj, formset, index):
"""Return whether or not the user deleted the form."""
return (
inline.has_delete_permission(request, obj) and
'{}-{}-DELETE'.format(formset.prefix, index) in request.POST
)
# Bypass validation of each view-only inline form (since the form's
# data won't be in request.POST), unless the form was deleted.
if not inline.has_change_permission(request, obj if change else None):
for index, form in enumerate(formset.initial_forms):
if user_deleted_form(request, obj, formset, index):
continue
form._errors = {}
form.cleaned_data = form.initial
formsets.append(formset)
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
classes = None
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super().__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['vendor/jquery/jquery%s.js' % extra, 'jquery.init.js',
'inlines%s.js' % extra]
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
if self.classes and 'collapse' in self.classes:
js.append('collapse%s.js' % extra)
return forms.Media(js=['admin/js/%s' % url for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Return a BaseInlineFormSet class for use in admin add/change views."""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
exclude.extend(self.get_readonly_fields(request, obj))
if excluded is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
'form': self.form,
'formset': self.formset,
'fk_name': self.fk_name,
'fields': fields,
'exclude': exclude,
'formfield_callback': partial(self.formfield_for_dbfield, request=request),
'extra': self.get_extra(request, obj, **kwargs),
'min_num': self.get_min_num(request, obj, **kwargs),
'max_num': self.get_max_num(request, obj, **kwargs),
'can_delete': can_delete,
**kwargs,
}
base_model_form = defaults['form']
can_change = self.has_change_permission(request, obj) if request else True
can_add = self.has_add_permission(request, obj) if request else True
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance._state.adding:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance representation,
# suitable to be an item in a list.
_('%(class_name)s %(instance)s') % {
'class_name': p._meta.verbose_name,
'instance': p}
)
params = {
'class_name': self._meta.model._meta.verbose_name,
'instance': self.instance,
'related_objects': get_text_list(objs, _('and')),
}
msg = _("Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s")
raise ValidationError(msg, code='deleting_protected', params=params)
def is_valid(self):
result = super().is_valid()
self.hand_clean_DELETE()
return result
def has_changed(self):
# Protect against unauthorized edits.
if not can_change and not self.instance._state.adding:
return False
if not can_add and self.instance._state.adding:
return False
return super().has_changed()
defaults['form'] = DeleteProtectedModelForm
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def _get_form_for_get_fields(self, request, obj=None):
return self.get_formset(request, obj, fields=None).form
def get_queryset(self, request):
queryset = super().get_queryset(request)
if not self.has_view_or_change_permission(request):
queryset = queryset.none()
return queryset
def has_add_permission(self, request, obj):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the view permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_view_permission(request, obj)
return super().has_add_permission(request)
def has_change_permission(self, request, obj=None):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the view permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_view_permission(request, obj)
return super().has_change_permission(request)
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the view permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_view_permission(request, obj)
return super().has_delete_permission(request, obj)
def has_view_permission(self, request, obj=None):
if self.opts.auto_created:
opts = self.opts
# The model was auto-created as intermediary for a many-to-many
# Many-relationship; find the target model.
for field in opts.fields:
if field.remote_field and field.remote_field.model != self.parent_model:
opts = field.remote_field.model._meta
break
return (
request.user.has_perm('%s.%s' % (opts.app_label, get_permission_codename('view', opts))) or
request.user.has_perm('%s.%s' % (opts.app_label, get_permission_codename('change', opts)))
)
return super().has_view_permission(request)
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
|
34fb54d18e389274057f799ef629009ffac31b7589d1bff3c5e13b175b64ae90 | import hashlib
import json
import os
import posixpath
import re
import warnings
from urllib.parse import unquote, urldefrag, urlsplit, urlunsplit
from django.conf import settings
from django.contrib.staticfiles.utils import check_settings, matches_patterns
from django.core.cache import (
InvalidCacheBackendError, cache as default_cache, caches,
)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.deprecation import RemovedInDjango31Warning
from django.utils.functional import LazyObject
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super().__init__(location, base_url, *args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super().path(name)
class HashedFilesMixin:
default_template = """url("%s")"""
max_post_process_passes = 5
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._patterns = {}
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Return a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None, filename=None):
# `filename` is the name of file to hash if `content` isn't given.
# `name` is the base name to construct the new hashed filename from.
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
filename = (filename and urlsplit(unquote(filename)).path.strip()) or clean_name
opened = content is None
if opened:
if not self.exists(filename):
raise ValueError("The file '%s' could not be found with %r." % (filename, self))
try:
content = self.open(filename)
except OSError:
# Handle directory paths and fragments
return name
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def _url(self, hashed_name_func, name, force=False, hashed_files=None):
"""
Return the non-hashed URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
args = (clean_name,)
if hashed_files is not None:
args += (hashed_files,)
hashed_name = hashed_name_func(*args)
final_url = super().url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url(self, name, force=False):
"""
Return the non-hashed URL in DEBUG mode.
"""
return self._url(self.stored_name, name, force)
def url_converter(self, name, hashed_files, template=None):
"""
Return the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Convert the matched URL to a normalized and hashed URL.
This requires figuring out which files the matched URL resolves
to and calling the url() method of the storage.
"""
matched, url = matchobj.groups()
# Ignore absolute/protocol-relative and data-uri URLs.
if re.match(r'^[a-z]+:', url):
return matched
# Ignore absolute URLs that don't point to a static file (dynamic
# CSS / JS?). Note that STATIC_URL cannot be empty.
if url.startswith('/') and not url.startswith(settings.STATIC_URL):
return matched
# Strip off the fragment so a path-like fragment won't interfere.
url_path, fragment = urldefrag(url)
if url_path.startswith('/'):
# Otherwise the condition above would have returned prematurely.
assert url_path.startswith(settings.STATIC_URL)
target_name = url_path[len(settings.STATIC_URL):]
else:
# We're using the posixpath module to mix paths and URLs conveniently.
source_name = name if os.sep == '/' else name.replace(os.sep, '/')
target_name = posixpath.join(posixpath.dirname(source_name), url_path)
# Determine the hashed name of the target file with the storage backend.
hashed_url = self._url(
self._stored_name, unquote(target_name),
force=True, hashed_files=hashed_files,
)
transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])
# Restore the fragment that was stripped off earlier.
if fragment:
transformed_url += ('?#' if '?#' in url else '#') + fragment
# Return the hashed version to the file
return template % unquote(transformed_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given dictionary of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = {}
# build a list of adjustable files
adjustable_paths = [
path for path in paths
if matches_patterns(path, self._patterns)
]
# Do a single pass first. Post-process all files once, then repeat for
# adjustable files.
for name, hashed_name, processed, _ in self._post_process(paths, adjustable_paths, hashed_files):
yield name, hashed_name, processed
paths = {path: paths[path] for path in adjustable_paths}
for i in range(self.max_post_process_passes):
substitutions = False
for name, hashed_name, processed, subst in self._post_process(paths, adjustable_paths, hashed_files):
yield name, hashed_name, processed
substitutions = substitutions or subst
if not substitutions:
break
if substitutions:
yield 'All', None, RuntimeError('Max post-process passes exceeded.')
# Store the processed paths
self.hashed_files.update(hashed_files)
def _post_process(self, paths, adjustable_paths, hashed_files):
# Sort the files by directory level
def path_level(name):
return len(name.split(os.sep))
for name in sorted(paths, key=path_level, reverse=True):
substitutions = True
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
# generate the hash with the original content, even for
# adjustable files.
if hash_key not in hashed_files:
hashed_name = self.hashed_name(name, original_file)
else:
hashed_name = hashed_files[hash_key]
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
old_hashed_name = hashed_name
content = original_file.read().decode(settings.FILE_CHARSET)
for extension, patterns in self._patterns.items():
if matches_patterns(path, (extension,)):
for pattern, template in patterns:
converter = self.url_converter(name, hashed_files, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc, False
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(content.encode())
# Save intermediate file for reference
saved_name = self._save(hashed_name, content_file)
hashed_name = self.hashed_name(name, content_file)
if self.exists(hashed_name):
self.delete(hashed_name)
saved_name = self._save(hashed_name, content_file)
hashed_name = self.clean_name(saved_name)
# If the file hash stayed the same, this file didn't change
if old_hashed_name == hashed_name:
substitutions = False
processed = True
if not processed:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = self.clean_name(saved_name)
# and then set the cache accordingly
hashed_files[hash_key] = hashed_name
yield name, hashed_name, processed, substitutions
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def _stored_name(self, name, hashed_files):
# Normalize the path to avoid multiple names for the same file like
# ../foo/bar.css and ../foo/../foo/bar.css which normalize to the same
# path.
name = posixpath.normpath(name)
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
return cache_name
def stored_name(self, name):
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name:
return cache_name
# No cached name found, recalculate it from the files.
intermediate_name = name
for i in range(self.max_post_process_passes + 1):
cache_name = self.clean_name(
self.hashed_name(name, content=None, filename=intermediate_name)
)
if intermediate_name == cache_name:
# Store the hashed name if there was a miss.
self.hashed_files[hash_key] = cache_name
return cache_name
else:
# Move on to the next intermediate file.
intermediate_name = cache_name
# If the cache name can't be determined after the max number of passes,
# the intermediate files on disk may be corrupt; avoid an infinite loop.
raise ValueError("The name '%s' could not be hashed with %r." % (name, self))
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = '1.0' # the manifest format standard
manifest_name = 'staticfiles.json'
manifest_strict = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.open(self.manifest_name) as manifest:
return manifest.read().decode()
except OSError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return {}
try:
stored = json.loads(content)
except json.JSONDecodeError:
pass
else:
version = stored.get('version')
if version == '1.0':
return stored.get('paths', {})
raise ValueError("Couldn't load manifest '%s' (version %s)" %
(self.manifest_name, self.manifest_version))
def post_process(self, *args, **kwargs):
self.hashed_files = {}
yield from super().post_process(*args, **kwargs)
self.save_manifest()
def save_manifest(self):
payload = {'paths': self.hashed_files, 'version': self.manifest_version}
if self.exists(self.manifest_name):
self.delete(self.manifest_name)
contents = json.dumps(payload).encode()
self._save(self.manifest_name, ContentFile(contents))
def stored_name(self, name):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
hash_key = self.hash_key(clean_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
if self.manifest_strict:
raise ValueError("Missing staticfiles manifest entry for '%s'" % clean_name)
cache_name = self.clean_name(self.hashed_name(name))
unparsed_name = list(parsed_name)
unparsed_name[2] = cache_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
class _MappingCache:
"""
A small dict-like wrapper for a given cache backend instance.
"""
def __init__(self, cache):
self.cache = cache
def __setitem__(self, key, value):
self.cache.set(key, value)
def __getitem__(self, key):
value = self.cache.get(key)
if value is None:
raise KeyError("Couldn't find a file name '%s'" % key)
return value
def clear(self):
self.cache.clear()
def update(self, data):
self.cache.set_many(data)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class CachedFilesMixin(HashedFilesMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.hashed_files = _MappingCache(caches['staticfiles'])
except InvalidCacheBackendError:
# Use the default backend
self.hashed_files = _MappingCache(default_cache)
def hash_key(self, name):
key = hashlib.md5(self.clean_name(name).encode()).hexdigest()
return 'staticfiles:%s' % key
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
'CachedStaticFilesStorage is deprecated in favor of '
'ManifestStaticFilesStorage.',
RemovedInDjango31Warning, stacklevel=2,
)
super().__init__(*args, **kwargs)
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
|
72186015015482742ef678436cc262834515c1b57b99e870f9af5657af8141c5 | from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.admin import FieldListFilter
from django.contrib.admin.exceptions import (
DisallowedModelAdminLookup, DisallowedModelAdminToField,
)
from django.contrib.admin.options import (
IS_POPUP_VAR, TO_FIELD_VAR, IncorrectLookupParameters,
)
from django.contrib.admin.utils import (
get_fields_from_path, lookup_needs_distinct, prepare_lookup_value, quote,
)
from django.core.exceptions import (
FieldDoesNotExist, ImproperlyConfigured, SuspiciousOperation,
)
from django.core.paginator import InvalidPage
from django.db import models
from django.db.models.expressions import Combinable, F, OrderBy
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.timezone import make_aware
from django.utils.translation import gettext
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR)
class ChangeList:
def __init__(self, request, model, list_display, list_display_links,
list_filter, date_hierarchy, search_fields, list_select_related,
list_per_page, list_max_show_all, list_editable, model_admin, sortable_by):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_queryset = model_admin.get_queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.has_filters = None
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_max_show_all = list_max_show_all
self.model_admin = model_admin
self.preserved_filters = model_admin.get_preserved_filters(request)
self.sortable_by = sortable_by
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
to_field = request.GET.get(TO_FIELD_VAR)
if to_field and not model_admin.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
self.to_field = to_field
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.query = request.GET.get(SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
self.get_results(request)
if self.is_popup:
title = gettext('Select %s')
elif self.model_admin.has_change_permission(request):
title = gettext('Select %s to change')
else:
title = gettext('Select %s to view')
self.title = title % self.opts.verbose_name
self.pk_attname = self.lookup_opts.pk.attname
def get_filters_params(self, params=None):
"""
Return all params except IGNORED_PARAMS.
"""
params = params or self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
for key, value in lookup_params.items():
if not self.model_admin.lookup_allowed(key, value):
raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key)
filter_specs = []
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(request, lookup_params, self.model, self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for the
# type of the given field.
field, field_list_filter_class = list_filter, FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model, field_path)[-1]
lookup_params_count = len(lookup_params)
spec = field_list_filter_class(
field, request, lookup_params,
self.model, self.model_admin, field_path=field_path,
)
# field_list_filter_class removes any lookup_params it
# processes. If that happened, check if distinct() is needed to
# remove duplicate results.
if lookup_params_count > len(lookup_params):
use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, field_path)
if spec and spec.has_output():
filter_specs.append(spec)
if self.date_hierarchy:
# Create bounded lookup parameters so that the query is more
# efficient.
year = lookup_params.pop('%s__year' % self.date_hierarchy, None)
if year is not None:
month = lookup_params.pop('%s__month' % self.date_hierarchy, None)
day = lookup_params.pop('%s__day' % self.date_hierarchy, None)
try:
from_date = datetime(
int(year),
int(month if month is not None else 1),
int(day if day is not None else 1),
)
except ValueError as e:
raise IncorrectLookupParameters(e) from e
if settings.USE_TZ:
from_date = make_aware(from_date)
if day:
to_date = from_date + timedelta(days=1)
elif month:
# In this branch, from_date will always be the first of a
# month, so advancing 32 days gives the next month.
to_date = (from_date + timedelta(days=32)).replace(day=1)
else:
to_date = from_date.replace(year=from_date.year + 1)
lookup_params.update({
'%s__gte' % self.date_hierarchy: from_date,
'%s__lt' % self.date_hierarchy: to_date,
})
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, key)
return filter_specs, bool(filter_specs), lookup_params, use_distinct
except FieldDoesNotExist as e:
raise IncorrectLookupParameters(e) from e
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
if self.model_admin.show_full_result_count:
full_result_count = self.root_queryset.count()
else:
full_result_count = None
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.queryset._clone()
else:
try:
result_list = paginator.page(self.page_num + 1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.show_full_result_count = self.model_admin.show_full_result_count
# Admin actions are shown if there is at least one entry
# or if entries are not counted because show_full_result_count is disabled
self.show_admin_actions = not self.show_full_result_count or bool(full_result_count)
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Return the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Return None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Return the list of ordering fields for the change list.
First check the get_ordering() method in model admin, then check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by calling _get_deterministic_ordering() with the
constructed ordering.
"""
params = self.params
ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
if hasattr(order_field, 'as_sql'):
# order_field is an expression.
ordering.append(order_field.desc() if pfx == '-' else order_field.asc())
# reverse order if order_field has already "-" as prefix
elif order_field.startswith('-') and pfx == '-':
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
return self._get_deterministic_ordering(ordering)
def _get_deterministic_ordering(self, ordering):
"""
Ensure a deterministic order across all database backends. Search for a
single field or unique together set of fields providing a total
ordering. If these are missing, augment the ordering with a descendant
primary key.
"""
ordering = list(ordering)
ordering_fields = set()
total_ordering_fields = {'pk'} | {
field.attname for field in self.lookup_opts.fields
if field.unique and not field.null
}
for part in ordering:
# Search for single field providing a total ordering.
field_name = None
if isinstance(part, str):
field_name = part.lstrip('-')
elif isinstance(part, F):
field_name = part.name
elif isinstance(part, OrderBy) and isinstance(part.expression, F):
field_name = part.expression.name
if field_name:
# Normalize attname references by using get_field().
try:
field = self.lookup_opts.get_field(field_name)
except FieldDoesNotExist:
# Could be "?" for random ordering or a related field
# lookup. Skip this part of introspection for now.
continue
# Ordering by a related field name orders by the referenced
# model's ordering. Skip this part of introspection for now.
if field.remote_field and field_name == field.name:
continue
if field.attname in total_ordering_fields:
break
ordering_fields.add(field.attname)
else:
# No single total ordering field, try unique_together.
for field_names in self.lookup_opts.unique_together:
# Normalize attname references by using get_field().
fields = [self.lookup_opts.get_field(field_name) for field_name in field_names]
# Composite unique constraints containing a nullable column
# cannot ensure total ordering.
if any(field.null for field in fields):
continue
if ordering_fields.issuperset(field.attname for field in fields):
break
else:
# If no set of unique fields is present in the ordering, rely
# on the primary key to provide total ordering.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Return a dictionary of ordering field column numbers and asc/desc.
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = {}
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if isinstance(field, (Combinable, OrderBy)):
if not isinstance(field, OrderBy):
field = field.asc()
if isinstance(field.expression, F):
order_type = 'desc' if field.descending else 'asc'
field = field.expression.name
else:
continue
elif field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.root_queryset
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.model_admin.get_search_results(request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.list_select_related is True:
return qs.select_related()
if self.list_select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.list_select_related:
return qs.select_related(*self.list_select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.lookup_opts.get_field(field_name)
except FieldDoesNotExist:
pass
else:
if isinstance(field.remote_field, models.ManyToOneRel):
# <FK>_id field names don't require a join.
if field_name != field.get_attname():
return True
return False
def url_for_result(self, result):
pk = getattr(result, self.pk_attname)
return reverse('admin:%s_%s_change' % (self.opts.app_label,
self.opts.model_name),
args=(quote(pk),),
current_app=self.model_admin.admin_site.name)
|
d2c6295e4aeb6521a111498b1f3bd9c0f49e71bd192b8c3b2d41f3f1d2b7eba6 | import datetime
import logging
import os
import shutil
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.base import (
VALID_KEY_CHARS, CreateError, SessionBase, UpdateError,
)
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.utils import timezone
class SessionStore(SessionBase):
"""
Implement a file based session store.
"""
def __init__(self, session_key=None):
self.storage_path = self._get_storage_path()
self.file_prefix = settings.SESSION_COOKIE_NAME
super().__init__(session_key)
@classmethod
def _get_storage_path(cls):
try:
return cls._storage_path
except AttributeError:
storage_path = getattr(settings, 'SESSION_FILE_PATH', None) or tempfile.gettempdir()
# Make sure the storage path is valid.
if not os.path.isdir(storage_path):
raise ImproperlyConfigured(
"The session storage path %r doesn't exist. Please set your"
" SESSION_FILE_PATH setting to an existing directory in which"
" Django can store session data." % storage_path)
cls._storage_path = storage_path
return storage_path
def _key_to_file(self, session_key=None):
"""
Get the file associated with this session key.
"""
if session_key is None:
session_key = self._get_or_create_session_key()
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
if not set(session_key).issubset(VALID_KEY_CHARS):
raise InvalidSessionKey(
"Invalid characters in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
def _last_modification(self):
"""
Return the modification time of the file storing the session's content.
"""
modification = os.stat(self._key_to_file()).st_mtime
if settings.USE_TZ:
modification = datetime.datetime.utcfromtimestamp(modification)
return modification.replace(tzinfo=timezone.utc)
return datetime.datetime.fromtimestamp(modification)
def _expiry_date(self, session_data):
"""
Return the expiry time of the file storing the session's content.
"""
return session_data.get('_session_expiry') or (
self._last_modification() + datetime.timedelta(seconds=settings.SESSION_COOKIE_AGE)
)
def load(self):
session_data = {}
try:
with open(self._key_to_file(), encoding='ascii') as session_file:
file_data = session_file.read()
# Don't fail if there is no data in the session file.
# We may have opened the empty placeholder file.
if file_data:
try:
session_data = self.decode(file_data)
except (EOFError, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(str(e))
self.create()
# Remove expired sessions.
expiry_age = self.get_expiry_age(expiry=self._expiry_date(session_data))
if expiry_age <= 0:
session_data = {}
self.delete()
self.create()
except (OSError, SuspiciousOperation):
self._session_key = None
return session_data
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
def save(self, must_create=False):
if self.session_key is None:
return self.create()
# Get the session data now, before we start messing
# with the file it is stored within.
session_data = self._get_session(no_load=must_create)
session_file_name = self._key_to_file()
try:
# Make sure the file exists. If it does not already exist, an
# empty placeholder file is created.
flags = os.O_WRONLY | getattr(os, 'O_BINARY', 0)
if must_create:
flags |= os.O_EXCL | os.O_CREAT
fd = os.open(session_file_name, flags)
os.close(fd)
except FileNotFoundError:
if not must_create:
raise UpdateError
except FileExistsError:
if must_create:
raise CreateError
# Write the session file without interfering with other threads
# or processes. By writing to an atomically generated temporary
# file and then using the atomic os.rename() to make the complete
# file visible, we avoid having to lock the session file, while
# still maintaining its integrity.
#
# Note: Locking the session file was explored, but rejected in part
# because in order to be atomic and cross-platform, it required a
# long-lived lock file for each session, doubling the number of
# files in the session storage directory at any given time. This
# rename solution is cleaner and avoids any additional overhead
# when reading the session data, which is the more common case
# unless SESSION_SAVE_EVERY_REQUEST = True.
#
# See ticket #8616.
dir, prefix = os.path.split(session_file_name)
try:
output_file_fd, output_file_name = tempfile.mkstemp(dir=dir, prefix=prefix + '_out_')
renamed = False
try:
try:
os.write(output_file_fd, self.encode(session_data).encode())
finally:
os.close(output_file_fd)
# This will atomically rename the file (os.rename) if the OS
# supports it. Otherwise this will result in a shutil.copy2
# and os.unlink (for example on Windows). See #9084.
shutil.move(output_file_name, session_file_name)
renamed = True
finally:
if not renamed:
os.unlink(output_file_name)
except (EOFError, OSError):
pass
def exists(self, session_key):
return os.path.exists(self._key_to_file(session_key))
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
os.unlink(self._key_to_file(session_key))
except OSError:
pass
def clean(self):
pass
@classmethod
def clear_expired(cls):
storage_path = cls._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
for session_file in os.listdir(storage_path):
if not session_file.startswith(file_prefix):
continue
session_key = session_file[len(file_prefix):]
session = cls(session_key)
# When an expired session is loaded, its file is removed, and a
# new file is immediately created. Prevent this by disabling
# the create() method.
session.create = lambda: None
session.load()
|
889703bd540361d65c203dccecdc0aa91a2afa4ca92c7d219a137b38c707f9ae | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString,
OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils.encoding import force_str
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping:
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {
1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.BigAutoField: OFTInteger64,
models.BooleanField: (OFTInteger, OFTReal, OFTString),
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.UUIDField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, str):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"Check the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
Check the Layer metadata and ensure that it's compatible with the
mapping information and model. Unlike previous revisions, there is no
need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.remote_field.model
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Check the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, str)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Check the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, str):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, return a dictionary of keyword arguments for
constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`), construct
and return the uniqueness keyword arguments -- a subset of the feature
kwargs.
"""
if isinstance(self.unique, str):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verify if the OGR Field contents are acceptable to the model field. If
they are, return the verified value, otherwise raise an exception.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding and ogr_field.value is not None:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_str(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and val is not None and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
retrieve the related model for the ForeignKey mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verify the geometry -- construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Return the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as exc:
raise LayerMapError(
'Could not translate between the data source and model geometry.'
) from exc
def geometry_field(self):
"Return the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Save the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom_value = getattr(m, self.geom_field)
if geom_value is None:
geom = OGRGeometry(kwargs[self.geom_field])
else:
geom = geom_value.ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d …\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except Exception: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
|
aeb8e51c05ab48c325ef6b7809b99d52b0176e4fcab6ac0c54c9e7760c81de9e | import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, c_char_p, c_int
from ctypes.util import find_library
from django.contrib.gis.gdal.error import GDALException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('django.contrib.gis')
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GDAL_LIBRARY_PATH
except (AttributeError, ImportError, ImproperlyConfigured, OSError):
lib_path = None
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT shared libraries
lib_names = ['gdal203', 'gdal202', 'gdal201', 'gdal20']
elif os.name == 'posix':
# *NIX library names.
lib_names = ['gdal', 'GDAL', 'gdal2.3.0', 'gdal2.2.0', 'gdal2.1.0', 'gdal2.0.0']
else:
raise ImproperlyConfigured('GDAL is unsupported on OS "%s".' % os.name)
# Using the ctypes `find_library` utility to find the
# path to the GDAL library from the list of library names.
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
if lib_path is None:
raise ImproperlyConfigured(
'Could not find the GDAL library (tried "%s"). Is GDAL installed? '
'If it is, try setting GDAL_LIBRARY_PATH in your settings.'
% '", "'.join(lib_names)
)
# This loads the GDAL/OGR C library
lgdal = CDLL(lib_path)
# On Windows, the GDAL binaries have some OSR routines exported with
# STDCALL, while others are not. Thus, the library will also need to
# be loaded up as WinDLL for said OSR functions that require the
# different calling convention.
if os.name == 'nt':
from ctypes import WinDLL
lwingdal = WinDLL(lib_path)
def std_call(func):
"""
Return the correct STDCALL function for certain OSR routines on Win32
platforms.
"""
if os.name == 'nt':
return lwingdal[func]
else:
return lgdal[func]
# #### Version-information functions. ####
# Return GDAL library version information with the given key.
_version_info = std_call('GDALVersionInfo')
_version_info.argtypes = [c_char_p]
_version_info.restype = c_char_p
def gdal_version():
"Return only the GDAL version number information."
return _version_info(b'RELEASE_NAME')
def gdal_full_version():
"Return the full GDAL version information."
return _version_info('')
version_regex = re.compile(r'^(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<subminor>\d+))?')
def gdal_version_info():
ver = gdal_version().decode()
m = version_regex.match(ver)
if not m:
raise GDALException('Could not parse GDAL version string "%s"' % ver)
return {key: m.group(key) for key in ('major', 'minor', 'subminor')}
_verinfo = gdal_version_info()
GDAL_MAJOR_VERSION = int(_verinfo['major'])
GDAL_MINOR_VERSION = int(_verinfo['minor'])
GDAL_SUBMINOR_VERSION = _verinfo['subminor'] and int(_verinfo['subminor'])
GDAL_VERSION = (GDAL_MAJOR_VERSION, GDAL_MINOR_VERSION, GDAL_SUBMINOR_VERSION)
del _verinfo
# Set library error handling so as errors are logged
CPLErrorHandler = CFUNCTYPE(None, c_int, c_int, c_char_p)
def err_handler(error_class, error_number, message):
logger.error('GDAL_ERROR %d: %s', error_number, message)
err_handler = CPLErrorHandler(err_handler)
def function(name, args, restype):
func = std_call(name)
func.argtypes = args
func.restype = restype
return func
set_error_handler = function('CPLSetErrorHandler', [CPLErrorHandler], CPLErrorHandler)
set_error_handler(err_handler)
|
4bf9bd165b33edfae0b6f56c7c1cf64de3b508dccc36bbb2de9580ac3c5df317 | """
SQL functions reference lists:
https://www.gaia-gis.it/gaia-sins/spatialite-sql-4.3.0.html
"""
from django.contrib.gis.db.backends.base.operations import (
BaseSpatialOperations,
)
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geos.geometry import GEOSGeometry, GEOSGeometryBase
from django.contrib.gis.geos.prototypes.io import wkb_r
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.operations import DatabaseOperations
from django.utils.functional import cached_property
from django.utils.version import get_version_tuple
class SpatialiteNullCheckOperator(SpatialOperator):
def as_sql(self, connection, lookup, template_params, sql_params):
sql, params = super().as_sql(connection, lookup, template_params, sql_params)
return '%s > 0' % sql, params
class SpatiaLiteOperations(BaseSpatialOperations, DatabaseOperations):
name = 'spatialite'
spatialite = True
Adapter = SpatiaLiteAdapter
collect = 'Collect'
extent = 'Extent'
makeline = 'MakeLine'
unionagg = 'GUnion'
from_text = 'GeomFromText'
gis_operators = {
# Binary predicates
'equals': SpatialiteNullCheckOperator(func='Equals'),
'disjoint': SpatialiteNullCheckOperator(func='Disjoint'),
'touches': SpatialiteNullCheckOperator(func='Touches'),
'crosses': SpatialiteNullCheckOperator(func='Crosses'),
'within': SpatialiteNullCheckOperator(func='Within'),
'overlaps': SpatialiteNullCheckOperator(func='Overlaps'),
'contains': SpatialiteNullCheckOperator(func='Contains'),
'intersects': SpatialiteNullCheckOperator(func='Intersects'),
'relate': SpatialiteNullCheckOperator(func='Relate'),
'coveredby': SpatialiteNullCheckOperator(func='CoveredBy'),
'covers': SpatialiteNullCheckOperator(func='Covers'),
# Returns true if B's bounding box completely contains A's bounding box.
'contained': SpatialOperator(func='MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains': SpatialOperator(func='MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps': SpatialOperator(func='MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as': SpatialiteNullCheckOperator(func='Equals'),
'exact': SpatialiteNullCheckOperator(func='Equals'),
# Distance predicates
'dwithin': SpatialOperator(func='PtDistWithin'),
}
disallowed_aggregates = (aggregates.Extent3D,)
select = 'CAST (AsEWKB(%s) AS BLOB)'
function_names = {
'ForcePolygonCW': 'ST_ForceLHR',
'Length': 'ST_Length',
'LineLocatePoint': 'ST_Line_Locate_Point',
'NumPoints': 'ST_NPoints',
'Reverse': 'ST_Reverse',
'Scale': 'ScaleCoords',
'Translate': 'ST_Translate',
'Union': 'ST_Union',
}
@cached_property
def unsupported_functions(self):
unsupported = {'BoundingCircle', 'MemSize'}
if not self.lwgeom_version():
unsupported |= {'Azimuth', 'GeoHash', 'IsValid', 'MakeValid'}
return unsupported
@cached_property
def spatial_version(self):
"""Determine the version of the SpatiaLite library."""
try:
version = self.spatialite_version_tuple()[1:]
except Exception as exc:
raise ImproperlyConfigured(
'Cannot determine the SpatiaLite version for the "%s" database. '
'Was the SpatiaLite initialization SQL loaded on this database?' % (
self.connection.settings_dict['NAME'],
)
) from exc
if version < (4, 3, 0):
raise ImproperlyConfigured('GeoDjango supports SpatiaLite 4.3.0 and above.')
return version
def convert_extent(self, box):
"""
Convert the polygon data received from SpatiaLite to min/max values.
"""
if box is None:
return None
shell = GEOSGeometry(box).shell
xmin, ymin = shell[0][:2]
xmax, ymax = shell[2][:2]
return (xmin, ymin, xmax, ymax)
def geo_db_type(self, f):
"""
Return None because geometry columns are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
if lookup_type == 'dwithin':
raise ValueError(
'Only numeric values of degree units are allowed on '
'geographic DWithin queries.'
)
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
Any error occurring in this method should be handled by the caller.
"""
cursor = self.connection._cursor()
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
finally:
cursor.close()
return row[0]
def geos_version(self):
"Return the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Return the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def lwgeom_version(self):
"""Return the version of LWGEOM library used by SpatiaLite."""
return self._get_spatialite_func('lwgeom_version()')
def spatialite_version(self):
"Return the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Return the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
version = self.spatialite_version()
return (version,) + get_version_tuple(version)
def spatial_aggregate_name(self, agg_name):
"""
Return the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteGeometryColumns
return SpatialiteGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys
return SpatialiteSpatialRefSys
def get_geometry_converter(self, expression):
geom_class = expression.output_field.geom_class
read = wkb_r().read
def converter(value, expression, connection):
return None if value is None else GEOSGeometryBase(read(value), geom_class)
return converter
|
fafca08719592787ed76361d00edf71698ddd20b1ae8fcc6caef3bcf361a5f67 | import datetime
import importlib
import io
import os
import sys
from unittest import mock
from django.apps import apps
from django.core.management import CommandError, call_command
from django.db import (
ConnectionHandler, DatabaseError, connection, connections, models,
)
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.utils import truncate_name
from django.db.migrations.exceptions import InconsistentMigrationHistory
from django.db.migrations.recorder import MigrationRecorder
from django.test import TestCase, override_settings
from .models import UnicodeModel, UnserializableModel
from .routers import TestRouter
from .test_base import MigrationTestBase
class MigrateTests(MigrationTestBase):
"""
Tests running the migrate command.
"""
databases = {'default', 'other'}
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate(self):
"""
Tests basic usage of the migrate command.
"""
# No tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run the migrations to 0001 only
stdout = io.StringIO()
call_command('migrate', 'migrations', '0001', verbosity=1, stdout=stdout, no_color=True)
stdout = stdout.getvalue()
self.assertIn('Target specific migration: 0001_initial, from migrations', stdout)
self.assertIn('Applying migrations.0001_initial… OK', stdout)
# The correct tables exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run migrations all the way
call_command("migrate", verbosity=0)
# The correct tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Unmigrate everything
stdout = io.StringIO()
call_command('migrate', 'migrations', 'zero', verbosity=1, stdout=stdout, no_color=True)
stdout = stdout.getvalue()
self.assertIn('Unapply all migrations: migrations', stdout)
self.assertIn('Unapplying migrations.0002_second… OK', stdout)
# Tables are gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'migrations.migrations_test_apps.migrated_app',
])
def test_migrate_with_system_checks(self):
out = io.StringIO()
call_command('migrate', skip_checks=False, no_color=True, stdout=out)
self.assertIn('Apply all migrations: migrated_app', out.getvalue())
@override_settings(INSTALLED_APPS=['migrations', 'migrations.migrations_test_apps.unmigrated_app_syncdb'])
def test_app_without_migrations(self):
msg = "App 'unmigrated_app_syncdb' does not have migrations."
with self.assertRaisesMessage(CommandError, msg):
call_command('migrate', app_label='unmigrated_app_syncdb')
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_clashing_prefix'})
def test_ambigious_prefix(self):
msg = (
"More than one migration matches 'a' in app 'migrations'. Please "
"be more specific."
)
with self.assertRaisesMessage(CommandError, msg):
call_command('migrate', app_label='migrations', migration_name='a')
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'})
def test_unknown_prefix(self):
msg = "Cannot find a migration matching 'nonexistent' from app 'migrations'."
with self.assertRaisesMessage(CommandError, msg):
call_command('migrate', app_label='migrations', migration_name='nonexistent')
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_initial_false"})
def test_migrate_initial_false(self):
"""
`Migration.initial = False` skips fake-initial detection.
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
# Fake rollback
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
# Make sure fake-initial detection does not run
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", "0001", fake_initial=True, verbosity=0)
call_command("migrate", "migrations", "0001", fake=True, verbosity=0)
# Real rollback
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations"},
DATABASE_ROUTERS=['migrations.routers.TestRouter'],
)
def test_migrate_fake_initial(self):
"""
--fake-initial only works if all tables created in the initial
migration of an app exists. Database routers must be obeyed when doing
that check.
"""
# Make sure no tables are created
for db in connections:
self.assertTableNotExists("migrations_author", using=db)
self.assertTableNotExists("migrations_tribble", using=db)
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
call_command("migrate", "migrations", "0001", verbosity=0, database="other")
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Also check the "other" database
self.assertTableNotExists("migrations_author", using="other")
self.assertTableExists("migrations_tribble", using="other")
# Fake a roll-back
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
call_command("migrate", "migrations", "zero", fake=True, verbosity=0, database="other")
# Make sure the tables still exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble", using="other")
# Try to run initial migration
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", "0001", verbosity=0)
# Run initial migration with an explicit --fake-initial
out = io.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: False):
call_command("migrate", "migrations", "0001", fake_initial=True, stdout=out, verbosity=1)
call_command("migrate", "migrations", "0001", fake_initial=True, verbosity=0, database="other")
self.assertIn(
"migrations.0001_initial… faked",
out.getvalue().lower()
)
# Run migrations all the way
call_command("migrate", verbosity=0)
call_command("migrate", verbosity=0, database="other")
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
self.assertTableNotExists("migrations_author", using="other")
self.assertTableNotExists("migrations_tribble", using="other")
self.assertTableNotExists("migrations_book", using="other")
# Fake a roll-back
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
call_command("migrate", "migrations", "zero", fake=True, verbosity=0, database="other")
# Make sure the tables still exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Try to run initial migration
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", verbosity=0)
# Run initial migration with an explicit --fake-initial
with self.assertRaises(DatabaseError):
# Fails because "migrations_tribble" does not exist but needs to in
# order to make --fake-initial work.
call_command("migrate", "migrations", fake_initial=True, verbosity=0)
# Fake an apply
call_command("migrate", "migrations", fake=True, verbosity=0)
call_command("migrate", "migrations", fake=True, verbosity=0, database="other")
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
call_command("migrate", "migrations", "zero", verbosity=0, database="other")
# Make sure it's all gone
for db in connections:
self.assertTableNotExists("migrations_author", using=db)
self.assertTableNotExists("migrations_tribble", using=db)
self.assertTableNotExists("migrations_book", using=db)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_fake_split_initial"})
def test_migrate_fake_split_initial(self):
"""
Split initial migrations can be faked with --fake-initial.
"""
call_command("migrate", "migrations", "0002", verbosity=0)
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
out = io.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: False):
call_command("migrate", "migrations", "0002", fake_initial=True, stdout=out, verbosity=1)
value = out.getvalue().lower()
self.assertIn("migrations.0001_initial… faked", value)
self.assertIn("migrations.0002_second… faked", value)
# Fake an apply
call_command("migrate", "migrations", fake=True, verbosity=0)
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_migrate_conflict_exit(self):
"""
migrate exits if it detects a conflict.
"""
with self.assertRaisesMessage(CommandError, "Conflicting migrations detected"):
call_command("migrate", "migrations")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_showmigrations_list(self):
"""
showmigrations --list displays migrations and whether or not they're
applied.
"""
out = io.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: True):
call_command("showmigrations", format='list', stdout=out, verbosity=0, no_color=False)
self.assertEqual(
'\x1b[1mmigrations\n\x1b[0m'
' [ ] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
call_command("migrate", "migrations", "0001", verbosity=0)
out = io.StringIO()
# Giving the explicit app_label tests for selective `show_list` in the command
call_command("showmigrations", "migrations", format='list', stdout=out, verbosity=0, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_showmigrations_plan(self):
"""
Tests --plan output of showmigrations command
"""
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third\n"
"[ ] migrations.0002_second\n",
out.getvalue().lower()
)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third … (migrations.0001_initial)\n"
"[ ] migrations.0002_second … (migrations.0001_initial, migrations.0003_third)\n",
out.getvalue().lower()
)
call_command("migrate", "migrations", "0003", verbosity=0)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third\n"
"[ ] migrations.0002_second\n",
out.getvalue().lower()
)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third … (migrations.0001_initial)\n"
"[ ] migrations.0002_second … (migrations.0001_initial, migrations.0003_third)\n",
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_plan'})
def test_migrate_plan(self):
"""Tests migrate --plan output."""
out = io.StringIO()
# Show the plan up to the third migration.
call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True)
self.assertEqual(
'Planned operations:\n'
'migrations.0001_initial\n'
' Create model Salamander\n'
' Raw Python operation -> Grow salamander tail.\n'
'migrations.0002_second\n'
' Create model Book\n'
" Raw SQL operation -> ['SELECT * FROM migrations_book']\n"
'migrations.0003_third\n'
' Create model Author\n'
" Raw SQL operation -> ['SELECT * FROM migrations_author']\n",
out.getvalue()
)
# Migrate to the third migration.
call_command('migrate', 'migrations', '0003', verbosity=0)
out = io.StringIO()
# Show the plan for when there is nothing to apply.
call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True)
self.assertEqual(
'Planned operations:\n'
' No planned migration operations.\n',
out.getvalue()
)
out = io.StringIO()
# Show the plan for reverse migration back to 0001.
call_command('migrate', 'migrations', '0001', plan=True, stdout=out, no_color=True)
self.assertEqual(
'Planned operations:\n'
'migrations.0003_third\n'
' Undo Create model Author\n'
" Raw SQL operation -> ['SELECT * FROM migrations_book']\n"
'migrations.0002_second\n'
' Undo Create model Book\n'
" Raw SQL operation -> ['SELECT * FROM migrations_salamand…\n",
out.getvalue()
)
out = io.StringIO()
# Show the migration plan to fourth, with truncated details.
call_command('migrate', 'migrations', '0004', plan=True, stdout=out, no_color=True)
self.assertEqual(
'Planned operations:\n'
'migrations.0004_fourth\n'
' Raw SQL operation -> SELECT * FROM migrations_author WHE…\n',
out.getvalue()
)
# Show the plan when an operation is irreversible.
# Migrate to the fourth migration.
call_command('migrate', 'migrations', '0004', verbosity=0)
out = io.StringIO()
call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True)
self.assertEqual(
'Planned operations:\n'
'migrations.0004_fourth\n'
' Raw SQL operation -> IRREVERSIBLE\n',
out.getvalue()
)
# Cleanup by unmigrating everything: fake the irreversible, then
# migrate all to zero.
call_command('migrate', 'migrations', '0003', fake=True, verbosity=0)
call_command('migrate', 'migrations', 'zero', verbosity=0)
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_empty'})
def test_showmigrations_no_migrations(self):
out = io.StringIO()
call_command('showmigrations', stdout=out, no_color=True)
self.assertEqual('migrations\n (no migrations)\n', out.getvalue().lower())
@override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app'])
def test_showmigrations_unmigrated_app(self):
out = io.StringIO()
call_command('showmigrations', 'unmigrated_app', stdout=out, no_color=True)
self.assertEqual('unmigrated_app\n (no migrations)\n', out.getvalue().lower())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"})
def test_showmigrations_plan_no_migrations(self):
"""
Tests --plan output of showmigrations command without migrations
"""
out = io.StringIO()
call_command('showmigrations', format='plan', stdout=out, no_color=True)
self.assertEqual('(no migrations)\n', out.getvalue().lower())
out = io.StringIO()
call_command('showmigrations', format='plan', stdout=out, verbosity=2, no_color=True)
self.assertEqual('(no migrations)\n', out.getvalue().lower())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_showmigrations_plan_squashed(self):
"""
Tests --plan output of showmigrations command with squashed migrations.
"""
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto\n"
"[ ] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower()
)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto … (migrations.1_auto)\n"
"[ ] migrations.3_squashed_5 … (migrations.2_auto)\n"
"[ ] migrations.6_auto … (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto … (migrations.6_auto)\n",
out.getvalue().lower()
)
call_command("migrate", "migrations", "3_squashed_5", verbosity=0)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto\n"
"[x] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower()
)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto … (migrations.1_auto)\n"
"[x] migrations.3_squashed_5 … (migrations.2_auto)\n"
"[ ] migrations.6_auto … (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto … (migrations.6_auto)\n",
out.getvalue().lower()
)
@override_settings(INSTALLED_APPS=[
'migrations.migrations_test_apps.mutate_state_b',
'migrations.migrations_test_apps.alter_fk.author_app',
'migrations.migrations_test_apps.alter_fk.book_app',
])
def test_showmigrations_plan_single_app_label(self):
"""
`showmigrations --plan app_label` output with a single app_label.
"""
# Single app with no dependencies on other apps.
out = io.StringIO()
call_command('showmigrations', 'mutate_state_b', format='plan', stdout=out)
self.assertEqual(
'[ ] mutate_state_b.0001_initial\n'
'[ ] mutate_state_b.0002_add_field\n',
out.getvalue()
)
# Single app with dependencies.
out = io.StringIO()
call_command('showmigrations', 'author_app', format='plan', stdout=out)
self.assertEqual(
'[ ] author_app.0001_initial\n'
'[ ] book_app.0001_initial\n'
'[ ] author_app.0002_alter_id\n',
out.getvalue()
)
# Some migrations already applied.
call_command('migrate', 'author_app', '0001', verbosity=0)
out = io.StringIO()
call_command('showmigrations', 'author_app', format='plan', stdout=out)
self.assertEqual(
'[X] author_app.0001_initial\n'
'[ ] book_app.0001_initial\n'
'[ ] author_app.0002_alter_id\n',
out.getvalue()
)
# Cleanup by unmigrating author_app.
call_command('migrate', 'author_app', 'zero', verbosity=0)
@override_settings(INSTALLED_APPS=[
'migrations.migrations_test_apps.mutate_state_b',
'migrations.migrations_test_apps.alter_fk.author_app',
'migrations.migrations_test_apps.alter_fk.book_app',
])
def test_showmigrations_plan_multiple_app_labels(self):
"""
`showmigrations --plan app_label` output with multiple app_labels.
"""
# Multiple apps: author_app depends on book_app; mutate_state_b doesn't
# depend on other apps.
out = io.StringIO()
call_command('showmigrations', 'mutate_state_b', 'author_app', format='plan', stdout=out)
self.assertEqual(
'[ ] author_app.0001_initial\n'
'[ ] book_app.0001_initial\n'
'[ ] author_app.0002_alter_id\n'
'[ ] mutate_state_b.0001_initial\n'
'[ ] mutate_state_b.0002_add_field\n',
out.getvalue()
)
# Multiple apps: args order shouldn't matter (the same result is
# expected as above).
out = io.StringIO()
call_command('showmigrations', 'author_app', 'mutate_state_b', format='plan', stdout=out)
self.assertEqual(
'[ ] author_app.0001_initial\n'
'[ ] book_app.0001_initial\n'
'[ ] author_app.0002_alter_id\n'
'[ ] mutate_state_b.0001_initial\n'
'[ ] mutate_state_b.0002_add_field\n',
out.getvalue()
)
@override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app'])
def test_showmigrations_plan_app_label_no_migrations(self):
out = io.StringIO()
call_command('showmigrations', 'unmigrated_app', format='plan', stdout=out, no_color=True)
self.assertEqual('(no migrations)\n', out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_forwards(self):
"""
sqlmigrate outputs forward looking SQL.
"""
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
output = out.getvalue().lower()
index_tx_start = output.find(connection.ops.start_transaction_sql().lower())
index_op_desc_author = output.find('-- create model author')
index_create_table = output.find('create table')
index_op_desc_tribble = output.find('-- create model tribble')
index_op_desc_unique_together = output.find('-- alter unique_together')
index_tx_end = output.find(connection.ops.end_transaction_sql().lower())
self.assertGreater(index_tx_start, -1, "Transaction start not found")
self.assertGreater(
index_op_desc_author, index_tx_start,
"Operation description (author) not found or found before transaction start"
)
self.assertGreater(
index_create_table, index_op_desc_author,
"CREATE TABLE not found or found before operation description (author)"
)
self.assertGreater(
index_op_desc_tribble, index_create_table,
"Operation description (tribble) not found or found before CREATE TABLE (author)"
)
self.assertGreater(
index_op_desc_unique_together, index_op_desc_tribble,
"Operation description (unique_together) not found or found before operation description (tribble)"
)
self.assertGreater(
index_tx_end, index_op_desc_unique_together,
"Transaction end not found or found before operation description (unique_together)"
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_backwards(self):
"""
sqlmigrate outputs reverse looking SQL.
"""
# Cannot generate the reverse SQL unless we've applied the migration.
call_command("migrate", "migrations", verbosity=0)
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out, backwards=True)
output = out.getvalue().lower()
index_tx_start = output.find(connection.ops.start_transaction_sql().lower())
index_op_desc_unique_together = output.find('-- alter unique_together')
index_op_desc_tribble = output.find('-- create model tribble')
index_op_desc_author = output.find('-- create model author')
index_drop_table = output.rfind('drop table')
index_tx_end = output.find(connection.ops.end_transaction_sql().lower())
self.assertGreater(index_tx_start, -1, "Transaction start not found")
self.assertGreater(
index_op_desc_unique_together, index_tx_start,
"Operation description (unique_together) not found or found before transaction start"
)
self.assertGreater(
index_op_desc_tribble, index_op_desc_unique_together,
"Operation description (tribble) not found or found before operation description (unique_together)"
)
self.assertGreater(
index_op_desc_author, index_op_desc_tribble,
"Operation description (author) not found or found before operation description (tribble)"
)
self.assertGreater(
index_drop_table, index_op_desc_author,
"DROP TABLE not found or found before operation description (author)"
)
self.assertGreater(
index_tx_end, index_op_desc_unique_together,
"Transaction end not found or found before DROP TABLE"
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_non_atomic"})
def test_sqlmigrate_for_non_atomic_migration(self):
"""
Transaction wrappers aren't shown for non-atomic migrations.
"""
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
output = out.getvalue().lower()
queries = [q.strip() for q in output.splitlines()]
if connection.ops.start_transaction_sql():
self.assertNotIn(connection.ops.start_transaction_sql().lower(), queries)
self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.migrated_unapplied_app",
"migrations.migrations_test_apps.unmigrated_app",
],
)
def test_regression_22823_unmigrated_fk_to_migrated_model(self):
"""
Assuming you have 3 apps, `A`, `B`, and `C`, such that:
* `A` has migrations
* `B` has a migration we want to apply
* `C` has no migrations, but has an FK to `A`
When we try to migrate "B", an exception occurs because the
"B" was not included in the ProjectState that is used to detect
soft-applied migrations (#22823).
"""
call_command("migrate", "migrated_unapplied_app", stdout=io.StringIO())
# unmigrated_app.SillyModel has a foreign key to 'migrations.Tribble',
# but that model is only defined in a migration, so the global app
# registry never sees it and the reference is left dangling. Remove it
# to avoid problems in subsequent tests.
del apps._pending_operations[('migrations', 'tribble')]
@override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app_syncdb'])
def test_migrate_syncdb_deferred_sql_executed_with_schemaeditor(self):
"""
For an app without migrations, editor.execute() is used for executing
the syncdb deferred SQL.
"""
stdout = io.StringIO()
with mock.patch.object(BaseDatabaseSchemaEditor, 'execute') as execute:
call_command('migrate', run_syncdb=True, verbosity=1, stdout=stdout, no_color=True)
create_table_count = len([call for call in execute.mock_calls if 'CREATE TABLE' in str(call)])
self.assertEqual(create_table_count, 2)
# There's at least one deferred SQL for creating the foreign key
# index.
self.assertGreater(len(execute.mock_calls), 2)
stdout = stdout.getvalue()
self.assertIn('Synchronize unmigrated apps: unmigrated_app_syncdb', stdout)
self.assertIn('Creating tables…', stdout)
table_name = truncate_name('unmigrated_app_syncdb_classroom', connection.ops.max_name_length())
self.assertIn('Creating table %s' % table_name, stdout)
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'})
def test_migrate_syncdb_app_with_migrations(self):
msg = "Can't use run_syncdb with app 'migrations' as it has migrations."
with self.assertRaisesMessage(CommandError, msg):
call_command('migrate', 'migrations', run_syncdb=True, verbosity=0)
@override_settings(INSTALLED_APPS=[
'migrations.migrations_test_apps.unmigrated_app_syncdb',
'migrations.migrations_test_apps.unmigrated_app_simple',
])
def test_migrate_syncdb_app_label(self):
"""
Running migrate --run-syncdb with an app_label only creates tables for
the specified app.
"""
stdout = io.StringIO()
with mock.patch.object(BaseDatabaseSchemaEditor, 'execute') as execute:
call_command('migrate', 'unmigrated_app_syncdb', run_syncdb=True, stdout=stdout)
create_table_count = len([call for call in execute.mock_calls if 'CREATE TABLE' in str(call)])
self.assertEqual(create_table_count, 2)
self.assertGreater(len(execute.mock_calls), 2)
self.assertIn('Synchronize unmigrated app: unmigrated_app_syncdb', stdout.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_migrate_record_replaced(self):
"""
Running a single squashed migration should record all of the original
replaced migrations as run.
"""
recorder = MigrationRecorder(connection)
out = io.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_squashed_0002 (2 squashed migrations)\n',
out.getvalue().lower()
)
applied_migrations = recorder.applied_migrations()
self.assertIn(("migrations", "0001_initial"), applied_migrations)
self.assertIn(("migrations", "0002_second"), applied_migrations)
self.assertIn(("migrations", "0001_squashed_0002"), applied_migrations)
# Rollback changes
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_migrate_record_squashed(self):
"""
Running migrate for a squashed migration should record as run
if all of the replaced migrations have been run (#25231).
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
out = io.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_squashed_0002 (2 squashed migrations)\n',
out.getvalue().lower()
)
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations()
)
# No changes were actually applied so there is nothing to rollback
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'})
def test_migrate_inconsistent_history(self):
"""
Running migrate with some migrations applied before their dependencies
should not be allowed.
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0002_second")
msg = "Migration migrations.0002_second is applied before its dependency migrations.0001_initial"
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
call_command("migrate")
applied_migrations = recorder.applied_migrations()
self.assertNotIn(("migrations", "0001_initial"), applied_migrations)
class MakeMigrationsTests(MigrationTestBase):
"""
Tests running the makemigrations command.
"""
def setUp(self):
super().setUp()
self._old_models = apps.app_configs['migrations'].models.copy()
def tearDown(self):
apps.app_configs['migrations'].models = self._old_models
apps.all_models['migrations'] = self._old_models
apps.clear_cache()
super().tearDown()
def test_files_content(self):
self.assertTableNotExists("migrations_unicodemodel")
apps.register_model('migrations', UnicodeModel)
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", verbosity=0)
# Check for empty __init__.py file in migrations folder
init_file = os.path.join(migration_dir, "__init__.py")
self.assertTrue(os.path.exists(init_file))
with open(init_file) as fp:
content = fp.read()
self.assertEqual(content, '')
# Check for existing 0001_initial.py file in migration folder
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
with open(initial_file, encoding='utf-8') as fp:
content = fp.read()
self.assertIn('migrations.CreateModel', content)
self.assertIn('initial = True', content)
self.assertIn('úñí©óðé µóðéø', content) # Meta.verbose_name
self.assertIn('úñí©óðé µóðéøß', content) # Meta.verbose_name_plural
self.assertIn('ÚÑÍ¢ÓÐÉ', content) # title.verbose_name
self.assertIn('“Ðjáñgó”', content) # title.default
def test_makemigrations_order(self):
"""
makemigrations should recognize number-only migrations (0001.py).
"""
module = 'migrations.test_migrations_order'
with self.temporary_migration_module(module=module) as migration_dir:
if hasattr(importlib, 'invalidate_caches'):
# importlib caches os.listdir() on some platforms like macOS
# (#23850).
importlib.invalidate_caches()
call_command('makemigrations', 'migrations', '--empty', '-n', 'a', '-v', '0')
self.assertTrue(os.path.exists(os.path.join(migration_dir, '0002_a.py')))
def test_makemigrations_empty_connections(self):
empty_connections = ConnectionHandler({'default': {}})
with mock.patch('django.core.management.commands.makemigrations.connections', new=empty_connections):
# with no apps
out = io.StringIO()
call_command('makemigrations', stdout=out)
self.assertIn('No changes detected', out.getvalue())
# with an app
with self.temporary_migration_module() as migration_dir:
call_command('makemigrations', 'migrations', verbosity=0)
init_file = os.path.join(migration_dir, '__init__.py')
self.assertTrue(os.path.exists(init_file))
@override_settings(INSTALLED_APPS=['migrations', 'migrations2'])
def test_makemigrations_consistency_checks_respect_routers(self):
"""
The history consistency checks in makemigrations respect
settings.DATABASE_ROUTERS.
"""
def patched_has_table(migration_recorder):
if migration_recorder.connection is connections['other']:
raise Exception('Other connection')
else:
return mock.DEFAULT
self.assertTableNotExists('migrations_unicodemodel')
apps.register_model('migrations', UnicodeModel)
with mock.patch.object(
MigrationRecorder, 'has_table',
autospec=True, side_effect=patched_has_table) as has_table:
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", verbosity=0)
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
self.assertEqual(has_table.call_count, 1) # 'default' is checked
# Router says not to migrate 'other' so consistency shouldn't
# be checked.
with self.settings(DATABASE_ROUTERS=['migrations.routers.TestRouter']):
call_command('makemigrations', 'migrations', verbosity=0)
self.assertEqual(has_table.call_count, 2) # 'default' again
# With a router that doesn't prohibit migrating 'other',
# consistency is checked.
with self.settings(DATABASE_ROUTERS=['migrations.routers.EmptyRouter']):
with self.assertRaisesMessage(Exception, 'Other connection'):
call_command('makemigrations', 'migrations', verbosity=0)
self.assertEqual(has_table.call_count, 4) # 'default' and 'other'
# With a router that doesn't allow migrating on any database,
# no consistency checks are made.
with self.settings(DATABASE_ROUTERS=['migrations.routers.TestRouter']):
with mock.patch.object(TestRouter, 'allow_migrate', return_value=False) as allow_migrate:
call_command('makemigrations', 'migrations', verbosity=0)
allow_migrate.assert_any_call('other', 'migrations', model_name='UnicodeModel')
# allow_migrate() is called with the correct arguments.
self.assertGreater(len(allow_migrate.mock_calls), 0)
for mock_call in allow_migrate.mock_calls:
_, call_args, call_kwargs = mock_call
connection_alias, app_name = call_args
self.assertIn(connection_alias, ['default', 'other'])
# Raises an error if invalid app_name/model_name occurs.
apps.get_app_config(app_name).get_model(call_kwargs['model_name'])
self.assertEqual(has_table.call_count, 4)
def test_failing_migration(self):
# If a migration fails to serialize, it shouldn't generate an empty file. #21280
apps.register_model('migrations', UnserializableModel)
with self.temporary_migration_module() as migration_dir:
with self.assertRaisesMessage(ValueError, 'Cannot serialize'):
call_command("makemigrations", "migrations", verbosity=0)
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertFalse(os.path.exists(initial_file))
def test_makemigrations_conflict_exit(self):
"""
makemigrations exits if it detects a conflict.
"""
with self.temporary_migration_module(module="migrations.test_migrations_conflict"):
with self.assertRaises(CommandError) as context:
call_command("makemigrations")
exception_message = str(context.exception)
self.assertIn(
'Conflicting migrations detected; multiple leaf nodes '
'in the migration graph:',
exception_message
)
self.assertIn('0002_second', exception_message)
self.assertIn('0002_conflicting_second', exception_message)
self.assertIn('in migrations', exception_message)
self.assertIn("To fix them run 'python manage.py makemigrations --merge'", exception_message)
def test_makemigrations_merge_no_conflict(self):
"""
makemigrations exits if in merge mode with no conflicts.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("makemigrations", merge=True, stdout=out)
self.assertIn("No conflicts detected to merge.", out.getvalue())
def test_makemigrations_empty_no_app_specified(self):
"""
makemigrations exits if no app is specified with 'empty' mode.
"""
msg = 'You must supply at least one app label when using --empty.'
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", empty=True)
def test_makemigrations_empty_migration(self):
"""
makemigrations properly constructs an empty migration.
"""
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", empty=True, verbosity=0)
# Check for existing 0001_initial.py file in migration folder
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
with open(initial_file, encoding='utf-8') as fp:
content = fp.read()
# Remove all whitespace to check for empty dependencies and operations
content = content.replace(' ', '')
self.assertIn('dependencies=[\n]', content)
self.assertIn('operations=[\n]', content)
@override_settings(MIGRATION_MODULES={"migrations": None})
def test_makemigrations_disabled_migrations_for_app(self):
"""
makemigrations raises a nice error when migrations are disabled for an
app.
"""
msg = (
"Django can't create migrations for app 'migrations' because migrations "
"have been disabled via the MIGRATION_MODULES setting."
)
with self.assertRaisesMessage(ValueError, msg):
call_command("makemigrations", "migrations", empty=True, verbosity=0)
def test_makemigrations_no_changes_no_apps(self):
"""
makemigrations exits when there are no changes and no apps are specified.
"""
out = io.StringIO()
call_command("makemigrations", stdout=out)
self.assertIn("No changes detected", out.getvalue())
def test_makemigrations_no_changes(self):
"""
makemigrations exits when there are no changes to an app.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_changes"):
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("No changes detected in app 'migrations'", out.getvalue())
def test_makemigrations_no_apps_initial(self):
"""
makemigrations should detect initial is needed on empty migration
modules if no app provided.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_empty"):
call_command("makemigrations", stdout=out)
self.assertIn("0001_initial.py", out.getvalue())
def test_makemigrations_no_init(self):
"""Migration directories without an __init__.py file are allowed."""
out = io.StringIO()
with self.temporary_migration_module(module='migrations.test_migrations_no_init'):
call_command('makemigrations', stdout=out)
self.assertIn('0001_initial.py', out.getvalue())
def test_makemigrations_migrations_announce(self):
"""
makemigrations announces the migration at the default verbosity level.
"""
out = io.StringIO()
with self.temporary_migration_module():
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("Migrations for 'migrations'", out.getvalue())
def test_makemigrations_no_common_ancestor(self):
"""
makemigrations fails to merge migrations with no common ancestor.
"""
with self.assertRaises(ValueError) as context:
with self.temporary_migration_module(module="migrations.test_migrations_no_ancestor"):
call_command("makemigrations", "migrations", merge=True)
exception_message = str(context.exception)
self.assertIn("Could not find common ancestor of", exception_message)
self.assertIn("0002_second", exception_message)
self.assertIn("0002_conflicting_second", exception_message)
def test_makemigrations_interactive_reject(self):
"""
makemigrations enters and exits interactive mode properly.
"""
# Monkeypatch interactive questioner to auto reject
with mock.patch('builtins.input', mock.Mock(return_value='N')):
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", name="merge", merge=True, interactive=True, verbosity=0)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
def test_makemigrations_interactive_accept(self):
"""
makemigrations enters interactive mode and merges properly.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch('builtins.input', mock.Mock(return_value='y')):
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", name="merge", merge=True, interactive=True, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertTrue(os.path.exists(merge_file))
self.assertIn("Created new merge migration", out.getvalue())
@mock.patch('django.db.migrations.utils.datetime')
def test_makemigrations_default_merge_name(self, mock_datetime):
mock_datetime.datetime.now.return_value = datetime.datetime(2016, 1, 2, 3, 4)
with mock.patch('builtins.input', mock.Mock(return_value='y')):
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", merge=True, interactive=True, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge_20160102_0304.py')
self.assertTrue(os.path.exists(merge_file))
self.assertIn("Created new merge migration", out.getvalue())
def test_makemigrations_non_interactive_not_null_addition(self):
"""
Non-interactive makemigrations fails when a default is missing on a
new not-null field.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_int = models.IntegerField()
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.assertRaises(SystemExit):
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
def test_makemigrations_non_interactive_not_null_alteration(self):
"""
Non-interactive makemigrations fails when a default is missing on a
field changed to not-null.
"""
class Author(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField()
age = models.IntegerField(default=0)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
self.assertIn("Alter field slug on author", out.getvalue())
def test_makemigrations_non_interactive_no_model_rename(self):
"""
makemigrations adds and removes a possible model rename in
non-interactive mode.
"""
class RenamedModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
self.assertIn("Delete model SillyModel", out.getvalue())
self.assertIn("Create model RenamedModel", out.getvalue())
def test_makemigrations_non_interactive_no_field_rename(self):
"""
makemigrations adds and removes a possible field rename in
non-interactive mode.
"""
class SillyModel(models.Model):
silly_rename = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
self.assertIn("Remove field silly_field from sillymodel", out.getvalue())
self.assertIn("Add field silly_rename to sillymodel", out.getvalue())
def test_makemigrations_handle_merge(self):
"""
makemigrations properly merges the conflicting migrations with --noinput.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", name="merge", merge=True, interactive=False, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertTrue(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertIn("Created new merge migration", output)
def test_makemigration_merge_dry_run(self):
"""
makemigrations respects --dry-run option when fixing migration
conflicts (#24427).
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command(
"makemigrations", "migrations", name="merge", dry_run=True,
merge=True, interactive=False, stdout=out,
)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
def test_makemigration_merge_dry_run_verbosity_3(self):
"""
`makemigrations --merge --dry-run` writes the merge migration file to
stdout with `verbosity == 3` (#24427).
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command(
"makemigrations", "migrations", name="merge", dry_run=True,
merge=True, interactive=False, stdout=out, verbosity=3,
)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
# Additional output caused by verbosity 3
# The complete merge migration file that would be written
self.assertIn("class Migration(migrations.Migration):", output)
self.assertIn("dependencies = [", output)
self.assertIn("('migrations', '0002_second')", output)
self.assertIn("('migrations', '0002_conflicting_second')", output)
self.assertIn("operations = [", output)
self.assertIn("]", output)
def test_makemigrations_dry_run(self):
"""
`makemigrations --dry-run` should not ask for defaults.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_date = models.DateField() # Added field without a default
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", dry_run=True, stdout=out)
# Output the expected changes directly, without asking for defaults
self.assertIn("Add field silly_date to sillymodel", out.getvalue())
def test_makemigrations_dry_run_verbosity_3(self):
"""
Allow `makemigrations --dry-run` to output the migrations file to
stdout (with verbosity == 3).
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_char = models.CharField(default="")
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", dry_run=True, stdout=out, verbosity=3)
# Normal --dry-run output
self.assertIn("- Add field silly_char to sillymodel", out.getvalue())
# Additional output caused by verbosity 3
# The complete migrations file that would be written
self.assertIn("class Migration(migrations.Migration):", out.getvalue())
self.assertIn("dependencies = [", out.getvalue())
self.assertIn("('migrations', '0001_initial'),", out.getvalue())
self.assertIn("migrations.AddField(", out.getvalue())
self.assertIn("model_name='sillymodel',", out.getvalue())
self.assertIn("name='silly_char',", out.getvalue())
def test_makemigrations_migrations_modules_path_not_exist(self):
"""
makemigrations creates migrations when specifying a custom location
for migration files using MIGRATION_MODULES if the custom path
doesn't already exist.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
migration_module = "migrations.test_migrations_path_doesnt_exist.foo.bar"
with self.temporary_migration_module(module=migration_module) as migration_dir:
call_command("makemigrations", "migrations", stdout=out)
# Migrations file is actually created in the expected path.
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
# Command output indicates the migration is created.
self.assertIn(" - Create model SillyModel", out.getvalue())
@override_settings(MIGRATION_MODULES={'migrations': 'some.nonexistent.path'})
def test_makemigrations_migrations_modules_nonexistent_toplevel_package(self):
msg = (
'Could not locate an appropriate location to create migrations '
'package some.nonexistent.path. Make sure the toplevel package '
'exists and can be imported.'
)
with self.assertRaisesMessage(ValueError, msg):
call_command('makemigrations', 'migrations', empty=True, verbosity=0)
def test_makemigrations_interactive_by_default(self):
"""
The user is prompted to merge by default if there are conflicts and
merge is True. Answer negative to differentiate it from behavior when
--noinput is specified.
"""
# Monkeypatch interactive questioner to auto reject
out = io.StringIO()
with mock.patch('builtins.input', mock.Mock(return_value='N')):
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", name="merge", merge=True, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
# This will fail if interactive is False by default
self.assertFalse(os.path.exists(merge_file))
self.assertNotIn("Created new merge migration", out.getvalue())
@override_settings(
INSTALLED_APPS=[
"migrations",
"migrations.migrations_test_apps.unspecified_app_with_conflict"])
def test_makemigrations_unspecified_app_with_conflict_no_merge(self):
"""
makemigrations does not raise a CommandError when an unspecified app
has conflicting migrations.
"""
with self.temporary_migration_module(module="migrations.test_migrations_no_changes"):
call_command("makemigrations", "migrations", merge=False, verbosity=0)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.unspecified_app_with_conflict"])
def test_makemigrations_unspecified_app_with_conflict_merge(self):
"""
makemigrations does not create a merge for an unspecified app even if
it has conflicting migrations.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch('builtins.input', mock.Mock(return_value='y')):
out = io.StringIO()
with self.temporary_migration_module(app_label="migrated_app") as migration_dir:
call_command("makemigrations", "migrated_app", name="merge", merge=True, interactive=True, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
self.assertIn("No conflicts detected to merge.", out.getvalue())
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.conflicting_app_with_dependencies"])
def test_makemigrations_merge_dont_output_dependency_operations(self):
"""
makemigrations --merge does not output any operations from apps that
don't belong to a given app.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch('builtins.input', mock.Mock(return_value='N')):
out = io.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: False):
call_command(
"makemigrations", "conflicting_app_with_dependencies",
merge=True, interactive=True, stdout=out
)
val = out.getvalue().lower()
self.assertIn('merging conflicting_app_with_dependencies\n', val)
self.assertIn(
' branch 0002_conflicting_second\n'
' - create model something\n',
val
)
self.assertIn(
' branch 0002_second\n'
' - delete model tribble\n'
' - remove field silly_field from author\n'
' - add field rating to author\n'
' - create model book\n',
val
)
def test_makemigrations_with_custom_name(self):
"""
makemigrations --name generate a custom migration name.
"""
with self.temporary_migration_module() as migration_dir:
def cmd(migration_count, migration_name, *args):
call_command("makemigrations", "migrations", "--verbosity", "0", "--name", migration_name, *args)
migration_file = os.path.join(migration_dir, "%s_%s.py" % (migration_count, migration_name))
# Check for existing migration file in migration folder
self.assertTrue(os.path.exists(migration_file))
with open(migration_file, encoding='utf-8') as fp:
content = fp.read()
content = content.replace(" ", "")
return content
# generate an initial migration
migration_name_0001 = "my_initial_migration"
content = cmd("0001", migration_name_0001)
self.assertIn("dependencies=[\n]", content)
# importlib caches os.listdir() on some platforms like macOS
# (#23850).
if hasattr(importlib, 'invalidate_caches'):
importlib.invalidate_caches()
# generate an empty migration
migration_name_0002 = "my_custom_migration"
content = cmd("0002", migration_name_0002, "--empty")
self.assertIn("dependencies=[\n('migrations','0001_%s'),\n]" % migration_name_0001, content)
self.assertIn("operations=[\n]", content)
def test_makemigrations_with_invalid_custom_name(self):
msg = 'The migration name must be a valid Python identifier.'
with self.assertRaisesMessage(CommandError, msg):
call_command('makemigrations', 'migrations', '--name', 'invalid name', '--empty')
def test_makemigrations_check(self):
"""
makemigrations --check should exit with a non-zero status when
there are changes to an app requiring migrations.
"""
with self.temporary_migration_module():
with self.assertRaises(SystemExit):
call_command("makemigrations", "--check", "migrations", verbosity=0)
with self.temporary_migration_module(module="migrations.test_migrations_no_changes"):
call_command("makemigrations", "--check", "migrations", verbosity=0)
def test_makemigrations_migration_path_output(self):
"""
makemigrations should print the relative paths to the migrations unless
they are outside of the current tree, in which case the absolute path
should be shown.
"""
out = io.StringIO()
apps.register_model('migrations', UnicodeModel)
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", stdout=out)
self.assertIn(os.path.join(migration_dir, '0001_initial.py'), out.getvalue())
def test_makemigrations_migration_path_output_valueerror(self):
"""
makemigrations prints the absolute path if os.path.relpath() raises a
ValueError when it's impossible to obtain a relative path, e.g. on
Windows if Django is installed on a different drive than where the
migration files are created.
"""
out = io.StringIO()
with self.temporary_migration_module() as migration_dir:
with mock.patch('os.path.relpath', side_effect=ValueError):
call_command('makemigrations', 'migrations', stdout=out)
self.assertIn(os.path.join(migration_dir, '0001_initial.py'), out.getvalue())
def test_makemigrations_inconsistent_history(self):
"""
makemigrations should raise InconsistentMigrationHistory exception if
there are some migrations applied before their dependencies.
"""
recorder = MigrationRecorder(connection)
recorder.record_applied('migrations', '0002_second')
msg = "Migration migrations.0002_second is applied before its dependency migrations.0001_initial"
with self.temporary_migration_module(module="migrations.test_migrations"):
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
call_command("makemigrations")
@mock.patch('builtins.input', return_value='1')
@mock.patch('django.db.migrations.questioner.sys.stdin', mock.MagicMock(encoding=sys.getdefaultencoding()))
def test_makemigrations_auto_now_add_interactive(self, *args):
"""
makemigrations prompts the user when adding auto_now_add to an existing
model.
"""
class Entry(models.Model):
title = models.CharField(max_length=255)
creation_date = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = 'migrations'
# Monkeypatch interactive questioner to auto accept
with mock.patch('django.db.migrations.questioner.sys.stdout', new_callable=io.StringIO) as prompt_stdout:
out = io.StringIO()
with self.temporary_migration_module(module='migrations.test_auto_now_add'):
call_command('makemigrations', 'migrations', interactive=True, stdout=out)
output = out.getvalue()
prompt_output = prompt_stdout.getvalue()
self.assertIn("You can accept the default 'timezone.now' by pressing 'Enter'", prompt_output)
self.assertIn("Add field creation_date to entry", output)
class SquashMigrationsTests(MigrationTestBase):
"""
Tests running the squashmigrations command.
"""
def test_squashmigrations_squashes(self):
"""
squashmigrations squashes migrations.
"""
with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir:
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0)
squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py")
self.assertTrue(os.path.exists(squashed_migration_file))
def test_squashmigrations_initial_attribute(self):
with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir:
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0)
squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py")
with open(squashed_migration_file, encoding='utf-8') as fp:
content = fp.read()
self.assertIn("initial = True", content)
def test_squashmigrations_optimizes(self):
"""
squashmigrations optimizes operations.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=1, stdout=out)
self.assertIn("Optimized from 8 operations to 2 operations.", out.getvalue())
def test_ticket_23799_squashmigrations_no_optimize(self):
"""
squashmigrations --no-optimize doesn't optimize operations.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("squashmigrations", "migrations", "0002",
interactive=False, verbosity=1, no_optimize=True, stdout=out)
self.assertIn("Skipping optimization", out.getvalue())
def test_squashmigrations_valid_start(self):
"""
squashmigrations accepts a starting migration.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_changes") as migration_dir:
call_command("squashmigrations", "migrations", "0002", "0003",
interactive=False, verbosity=1, stdout=out)
squashed_migration_file = os.path.join(migration_dir, "0002_second_squashed_0003_third.py")
with open(squashed_migration_file, encoding='utf-8') as fp:
content = fp.read()
self.assertIn(" ('migrations', '0001_initial')", content)
self.assertNotIn("initial = True", content)
out = out.getvalue()
self.assertNotIn(" - 0001_initial", out)
self.assertIn(" - 0002_second", out)
self.assertIn(" - 0003_third", out)
def test_squashmigrations_invalid_start(self):
"""
squashmigrations doesn't accept a starting migration after the ending migration.
"""
with self.temporary_migration_module(module="migrations.test_migrations_no_changes"):
msg = (
"The migration 'migrations.0003_third' cannot be found. Maybe "
"it comes after the migration 'migrations.0002_second'"
)
with self.assertRaisesMessage(CommandError, msg):
call_command("squashmigrations", "migrations", "0003", "0002", interactive=False, verbosity=0)
def test_squashed_name_with_start_migration_name(self):
"""--squashed-name specifies the new migration's name."""
squashed_name = 'squashed_name'
with self.temporary_migration_module(module='migrations.test_migrations') as migration_dir:
call_command(
'squashmigrations', 'migrations', '0001', '0002',
squashed_name=squashed_name, interactive=False, verbosity=0,
)
squashed_migration_file = os.path.join(migration_dir, '0001_%s.py' % squashed_name)
self.assertTrue(os.path.exists(squashed_migration_file))
def test_squashed_name_without_start_migration_name(self):
"""--squashed-name also works if a start migration is omitted."""
squashed_name = 'squashed_name'
with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir:
call_command(
'squashmigrations', 'migrations', '0001',
squashed_name=squashed_name, interactive=False, verbosity=0,
)
squashed_migration_file = os.path.join(migration_dir, '0001_%s.py' % squashed_name)
self.assertTrue(os.path.exists(squashed_migration_file))
class AppLabelErrorTests(TestCase):
"""
This class inherits TestCase because MigrationTestBase uses
`availabe_apps = ['migrations']` which means that it's the only installed
app. 'django.contrib.auth' must be in INSTALLED_APPS for some of these
tests.
"""
nonexistent_app_error = "No installed app with label 'nonexistent_app'."
did_you_mean_auth_error = (
"No installed app with label 'django.contrib.auth'. Did you mean "
"'auth'?"
)
def test_makemigrations_nonexistent_app_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command('makemigrations', 'nonexistent_app', stderr=err)
self.assertIn(self.nonexistent_app_error, err.getvalue())
def test_makemigrations_app_name_specified_as_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command('makemigrations', 'django.contrib.auth', stderr=err)
self.assertIn(self.did_you_mean_auth_error, err.getvalue())
def test_migrate_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command('migrate', 'nonexistent_app')
def test_migrate_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command('migrate', 'django.contrib.auth')
def test_showmigrations_nonexistent_app_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command('showmigrations', 'nonexistent_app', stderr=err)
self.assertIn(self.nonexistent_app_error, err.getvalue())
def test_showmigrations_app_name_specified_as_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command('showmigrations', 'django.contrib.auth', stderr=err)
self.assertIn(self.did_you_mean_auth_error, err.getvalue())
def test_sqlmigrate_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command('sqlmigrate', 'nonexistent_app', '0002')
def test_sqlmigrate_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command('sqlmigrate', 'django.contrib.auth', '0002')
def test_squashmigrations_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command('squashmigrations', 'nonexistent_app', '0002')
def test_squashmigrations_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command('squashmigrations', 'django.contrib.auth', '0002')
|
2d1a17c9a0d6018cc4d91485960e5d5224209bd910ca38ea89f500d62cf251e5 | import datetime
import decimal
import enum
import functools
import math
import os
import re
import uuid
from unittest import mock
import custom_migration_operations.more_operations
import custom_migration_operations.operations
from django import get_version
from django.conf import SettingsReference, settings
from django.core.validators import EmailValidator, RegexValidator
from django.db import migrations, models
from django.db.migrations.serializer import BaseSerializer
from django.db.migrations.writer import MigrationWriter, OperationWriter
from django.test import SimpleTestCase
from django.utils.deconstruct import deconstructible
from django.utils.functional import SimpleLazyObject
from django.utils.timezone import get_default_timezone, get_fixed_timezone, utc
from django.utils.translation import gettext_lazy as _
from .models import FoodManager, FoodQuerySet
class Money(decimal.Decimal):
def deconstruct(self):
return (
'%s.%s' % (self.__class__.__module__, self.__class__.__name__),
[str(self)],
{}
)
class TestModel1:
def upload_to(self):
return '/somewhere/dynamic/'
thing = models.FileField(upload_to=upload_to)
class OperationWriterTests(SimpleTestCase):
def test_empty_signature(self):
operation = custom_migration_operations.operations.TestOperation()
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.TestOperation(\n'
'),'
)
def test_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(1, 2)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
'),'
)
def test_kwargs_signature(self):
operation = custom_migration_operations.operations.KwargsOperation(kwarg1=1)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
'),'
)
def test_args_kwargs_signature(self):
operation = custom_migration_operations.operations.ArgsKwargsOperation(1, 2, kwarg2=4)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsKwargsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' kwarg2=4,\n'
'),'
)
def test_nested_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(
custom_migration_operations.operations.ArgsOperation(1, 2),
custom_migration_operations.operations.KwargsOperation(kwarg1=3, kwarg2=4)
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' ),\n'
' arg2=custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=3,\n'
' kwarg2=4,\n'
' ),\n'
'),'
)
def test_multiline_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation("test\n arg1", "test\narg2")
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
"custom_migration_operations.operations.ArgsOperation(\n"
" arg1='test\\n arg1',\n"
" arg2='test\\narg2',\n"
"),"
)
def test_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation([1, 2])
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' 1,\n'
' 2,\n'
' ],\n'
'),'
)
def test_nested_operation_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation(
arg=[
custom_migration_operations.operations.KwargsOperation(
kwarg1=1,
kwarg2=2,
),
]
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
' kwarg2=2,\n'
' ),\n'
' ],\n'
'),'
)
class WriterTests(SimpleTestCase):
"""
Tests the migration writer (makes migration files from Migration instances)
"""
def safe_exec(self, string, value=None):
d = {}
try:
exec(string, globals(), d)
except Exception as e:
if value:
self.fail("Could not exec %r (from value %r): %s" % (string.strip(), value, e))
else:
self.fail("Could not exec %r: %s" % (string.strip(), e))
return d
def serialize_round_trip(self, value):
string, imports = MigrationWriter.serialize(value)
return self.safe_exec("%s\ntest_value_result = %s" % ("\n".join(imports), string), value)['test_value_result']
def assertSerializedEqual(self, value):
self.assertEqual(self.serialize_round_trip(value), value)
def assertSerializedResultEqual(self, value, target):
self.assertEqual(MigrationWriter.serialize(value), target)
def assertSerializedFieldEqual(self, value):
new_value = self.serialize_round_trip(value)
self.assertEqual(value.__class__, new_value.__class__)
self.assertEqual(value.max_length, new_value.max_length)
self.assertEqual(value.null, new_value.null)
self.assertEqual(value.unique, new_value.unique)
def test_serialize_numbers(self):
self.assertSerializedEqual(1)
self.assertSerializedEqual(1.2)
self.assertTrue(math.isinf(self.serialize_round_trip(float("inf"))))
self.assertTrue(math.isinf(self.serialize_round_trip(float("-inf"))))
self.assertTrue(math.isnan(self.serialize_round_trip(float("nan"))))
self.assertSerializedEqual(decimal.Decimal('1.3'))
self.assertSerializedResultEqual(
decimal.Decimal('1.3'),
("Decimal('1.3')", {'from decimal import Decimal'})
)
self.assertSerializedEqual(Money('1.3'))
self.assertSerializedResultEqual(
Money('1.3'),
("migrations.test_writer.Money('1.3')", {'import migrations.test_writer'})
)
def test_serialize_constants(self):
self.assertSerializedEqual(None)
self.assertSerializedEqual(True)
self.assertSerializedEqual(False)
def test_serialize_strings(self):
self.assertSerializedEqual(b"foobar")
string, imports = MigrationWriter.serialize(b"foobar")
self.assertEqual(string, "b'foobar'")
self.assertSerializedEqual("föobár")
string, imports = MigrationWriter.serialize("foobar")
self.assertEqual(string, "'foobar'")
def test_serialize_multiline_strings(self):
self.assertSerializedEqual(b"foo\nbar")
string, imports = MigrationWriter.serialize(b"foo\nbar")
self.assertEqual(string, "b'foo\\nbar'")
self.assertSerializedEqual("föo\nbár")
string, imports = MigrationWriter.serialize("foo\nbar")
self.assertEqual(string, "'foo\\nbar'")
def test_serialize_collections(self):
self.assertSerializedEqual({1: 2})
self.assertSerializedEqual(["a", 2, True, None])
self.assertSerializedEqual({2, 3, "eighty"})
self.assertSerializedEqual({"lalalala": ["yeah", "no", "maybe"]})
self.assertSerializedEqual(_('Hello'))
def test_serialize_builtin_types(self):
self.assertSerializedEqual([list, tuple, dict, set, frozenset])
self.assertSerializedResultEqual(
[list, tuple, dict, set, frozenset],
("[list, tuple, dict, set, frozenset]", set())
)
def test_serialize_lazy_objects(self):
pattern = re.compile(r'^foo$')
lazy_pattern = SimpleLazyObject(lambda: pattern)
self.assertEqual(self.serialize_round_trip(lazy_pattern), pattern)
def test_serialize_enums(self):
class TextEnum(enum.Enum):
A = 'a-value'
B = 'value-b'
class BinaryEnum(enum.Enum):
A = b'a-value'
B = b'value-b'
class IntEnum(enum.IntEnum):
A = 1
B = 2
self.assertSerializedResultEqual(
TextEnum.A,
("migrations.test_writer.TextEnum('a-value')", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
BinaryEnum.A,
("migrations.test_writer.BinaryEnum(b'a-value')", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
IntEnum.B,
("migrations.test_writer.IntEnum(2)", {'import migrations.test_writer'})
)
field = models.CharField(default=TextEnum.B, choices=[(m.value, m) for m in TextEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"('a-value', migrations.test_writer.TextEnum('a-value')), "
"('value-b', migrations.test_writer.TextEnum('value-b'))], "
"default=migrations.test_writer.TextEnum('value-b'))"
)
field = models.CharField(default=BinaryEnum.B, choices=[(m.value, m) for m in BinaryEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"(b'a-value', migrations.test_writer.BinaryEnum(b'a-value')), "
"(b'value-b', migrations.test_writer.BinaryEnum(b'value-b'))], "
"default=migrations.test_writer.BinaryEnum(b'value-b'))"
)
field = models.IntegerField(default=IntEnum.A, choices=[(m.value, m) for m in IntEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.IntegerField(choices=["
"(1, migrations.test_writer.IntEnum(1)), "
"(2, migrations.test_writer.IntEnum(2))], "
"default=migrations.test_writer.IntEnum(1))"
)
def test_serialize_uuid(self):
self.assertSerializedEqual(uuid.uuid1())
self.assertSerializedEqual(uuid.uuid4())
uuid_a = uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8')
uuid_b = uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2')
self.assertSerializedResultEqual(
uuid_a,
("uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8')", {'import uuid'})
)
self.assertSerializedResultEqual(
uuid_b,
("uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2')", {'import uuid'})
)
field = models.UUIDField(choices=((uuid_a, 'UUID A'), (uuid_b, 'UUID B')), default=uuid_a)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.UUIDField(choices=["
"(uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'), 'UUID A'), "
"(uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2'), 'UUID B')], "
"default=uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'))"
)
def test_serialize_functions(self):
with self.assertRaisesMessage(ValueError, 'Cannot serialize function: lambda'):
self.assertSerializedEqual(lambda x: 42)
self.assertSerializedEqual(models.SET_NULL)
string, imports = MigrationWriter.serialize(models.SET(42))
self.assertEqual(string, 'models.SET(42)')
self.serialize_round_trip(models.SET(42))
def test_serialize_datetime(self):
self.assertSerializedEqual(datetime.datetime.utcnow())
self.assertSerializedEqual(datetime.datetime.utcnow)
self.assertSerializedEqual(datetime.datetime.today())
self.assertSerializedEqual(datetime.datetime.today)
self.assertSerializedEqual(datetime.date.today())
self.assertSerializedEqual(datetime.date.today)
self.assertSerializedEqual(datetime.datetime.now().time())
self.assertSerializedEqual(datetime.datetime(2014, 1, 1, 1, 1, tzinfo=get_default_timezone()))
self.assertSerializedEqual(datetime.datetime(2013, 12, 31, 22, 1, tzinfo=get_fixed_timezone(180)))
self.assertSerializedResultEqual(
datetime.datetime(2014, 1, 1, 1, 1),
("datetime.datetime(2014, 1, 1, 1, 1)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
(
"datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc)",
{'import datetime', 'from django.utils.timezone import utc'},
)
)
def test_serialize_fields(self):
self.assertSerializedFieldEqual(models.CharField(max_length=255))
self.assertSerializedResultEqual(
models.CharField(max_length=255),
("models.CharField(max_length=255)", {"from django.db import models"})
)
self.assertSerializedFieldEqual(models.TextField(null=True, blank=True))
self.assertSerializedResultEqual(
models.TextField(null=True, blank=True),
("models.TextField(blank=True, null=True)", {'from django.db import models'})
)
def test_serialize_settings(self):
self.assertSerializedEqual(SettingsReference(settings.AUTH_USER_MODEL, "AUTH_USER_MODEL"))
self.assertSerializedResultEqual(
SettingsReference("someapp.model", "AUTH_USER_MODEL"),
("settings.AUTH_USER_MODEL", {"from django.conf import settings"})
)
def test_serialize_iterators(self):
self.assertSerializedResultEqual(
((x, x * x) for x in range(3)),
("((0, 0), (1, 1), (2, 4))", set())
)
def test_serialize_compiled_regex(self):
"""
Make sure compiled regex can be serialized.
"""
regex = re.compile(r'^\w+$')
self.assertSerializedEqual(regex)
def test_serialize_class_based_validators(self):
"""
Ticket #22943: Test serialization of class-based validators, including
compiled regexes.
"""
validator = RegexValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(message='hello')")
self.serialize_round_trip(validator)
# Test with a compiled regex.
validator = RegexValidator(regex=re.compile(r'^\w+$'))
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(regex=re.compile('^\\\\w+$'))")
self.serialize_round_trip(validator)
# Test a string regex with flag
validator = RegexValidator(r'^[0-9]+$', flags=re.S)
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[0-9]+$', flags=re.RegexFlag(16))")
self.serialize_round_trip(validator)
# Test message and code
validator = RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')")
self.serialize_round_trip(validator)
# Test with a subclass.
validator = EmailValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.EmailValidator(message='hello')")
self.serialize_round_trip(validator)
validator = deconstructible(path="migrations.test_writer.EmailValidator")(EmailValidator)(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "migrations.test_writer.EmailValidator(message='hello')")
validator = deconstructible(path="custom.EmailValidator")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ImportError, "No module named 'custom'"):
MigrationWriter.serialize(validator)
validator = deconstructible(path="django.core.validators.EmailValidator2")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ValueError, "Could not find object EmailValidator2 in django.core.validators."):
MigrationWriter.serialize(validator)
def test_serialize_empty_nonempty_tuple(self):
"""
Ticket #22679: makemigrations generates invalid code for (an empty
tuple) default_permissions = ()
"""
empty_tuple = ()
one_item_tuple = ('a',)
many_items_tuple = ('a', 'b', 'c')
self.assertSerializedEqual(empty_tuple)
self.assertSerializedEqual(one_item_tuple)
self.assertSerializedEqual(many_items_tuple)
def test_serialize_builtins(self):
string, imports = MigrationWriter.serialize(range)
self.assertEqual(string, 'range')
self.assertEqual(imports, set())
def test_serialize_unbound_method_reference(self):
"""An unbound method used within a class body can be serialized."""
self.serialize_round_trip(TestModel1.thing)
def test_serialize_local_function_reference(self):
"""A reference in a local scope can't be serialized."""
class TestModel2:
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with self.assertRaisesMessage(ValueError, 'Could not find function upload_to in migrations.test_writer'):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_managers(self):
self.assertSerializedEqual(models.Manager())
self.assertSerializedResultEqual(
FoodQuerySet.as_manager(),
('migrations.models.FoodQuerySet.as_manager()', {'import migrations.models'})
)
self.assertSerializedEqual(FoodManager('a', 'b'))
self.assertSerializedEqual(FoodManager('x', 'y', c=3, d=4))
def test_serialize_frozensets(self):
self.assertSerializedEqual(frozenset())
self.assertSerializedEqual(frozenset("let it go"))
def test_serialize_set(self):
self.assertSerializedEqual(set())
self.assertSerializedResultEqual(set(), ('set()', set()))
self.assertSerializedEqual({'a'})
self.assertSerializedResultEqual({'a'}, ("{'a'}", set()))
def test_serialize_timedelta(self):
self.assertSerializedEqual(datetime.timedelta())
self.assertSerializedEqual(datetime.timedelta(minutes=42))
def test_serialize_functools_partial(self):
value = functools.partial(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_serialize_functools_partialmethod(self):
value = functools.partialmethod(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertIsInstance(result, functools.partialmethod)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_serialize_type_none(self):
self.assertSerializedEqual(type(None))
def test_simple_migration(self):
"""
Tests serializing a simple migration.
"""
fields = {
'charfield': models.DateTimeField(default=datetime.datetime.utcnow),
'datetimefield': models.DateTimeField(default=datetime.datetime.utcnow),
}
options = {
'verbose_name': 'My model',
'verbose_name_plural': 'My models',
}
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.CreateModel("MyModel", tuple(fields.items()), options, (models.Model,)),
migrations.CreateModel("MyModel2", tuple(fields.items()), bases=(models.Model,)),
migrations.CreateModel(
name="MyModel3", fields=tuple(fields.items()), options=options, bases=(models.Model,)
),
migrations.DeleteModel("MyModel"),
migrations.AddField("OtherModel", "datetimefield", fields["datetimefield"]),
],
"dependencies": [("testapp", "some_other_one")],
})
writer = MigrationWriter(migration)
output = writer.as_string()
# We don't test the output formatting - that's too fragile.
# Just make sure it runs for now, and that things look alright.
result = self.safe_exec(output)
self.assertIn("Migration", result)
def test_migration_path(self):
test_apps = [
'migrations.migrations_test_apps.normal',
'migrations.migrations_test_apps.with_package_model',
'migrations.migrations_test_apps.without_init_file',
]
base_dir = os.path.dirname(os.path.dirname(__file__))
for app in test_apps:
with self.modify_settings(INSTALLED_APPS={'append': app}):
migration = migrations.Migration('0001_initial', app.split('.')[-1])
expected_path = os.path.join(base_dir, *(app.split('.') + ['migrations', '0001_initial.py']))
writer = MigrationWriter(migration)
self.assertEqual(writer.path, expected_path)
def test_custom_operation(self):
migration = type("Migration", (migrations.Migration,), {
"operations": [
custom_migration_operations.operations.TestOperation(),
custom_migration_operations.operations.CreateModel(),
migrations.CreateModel("MyModel", (), {}, (models.Model,)),
custom_migration_operations.more_operations.TestOperation()
],
"dependencies": []
})
writer = MigrationWriter(migration)
output = writer.as_string()
result = self.safe_exec(output)
self.assertIn("custom_migration_operations", result)
self.assertNotEqual(
result['custom_migration_operations'].operations.TestOperation,
result['custom_migration_operations'].more_operations.TestOperation
)
def test_sorted_imports(self):
"""
#24155 - Tests ordering of imports.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.AddField("mymodel", "myfield", models.DateTimeField(
default=datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
)),
]
})
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn(
"import datetime\n"
"from django.db import migrations, models\n"
"from django.utils.timezone import utc\n",
output
)
def test_migration_file_header_comments(self):
"""
Test comments at top of file.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": []
})
dt = datetime.datetime(2015, 7, 31, 4, 40, 0, 0, tzinfo=utc)
with mock.patch('django.db.migrations.writer.now', lambda: dt):
for include_header in (True, False):
with self.subTest(include_header=include_header):
writer = MigrationWriter(migration, include_header)
output = writer.as_string()
self.assertEqual(
include_header,
output.startswith(
"# Generated by Django %s on 2015-07-31 04:40\n\n" % get_version()
)
)
if not include_header:
# Make sure the output starts with something that's not
# a comment or indentation or blank line
self.assertRegex(output.splitlines(keepends=True)[0], r"^[^#\s]+")
def test_models_import_omitted(self):
"""
django.db.models shouldn't be imported if unused.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.AlterModelOptions(
name='model',
options={'verbose_name': 'model', 'verbose_name_plural': 'models'},
),
]
})
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn("from django.db import migrations\n", output)
def test_deconstruct_class_arguments(self):
# Yes, it doesn't make sense to use a class as a default for a
# CharField. It does make sense for custom fields though, for example
# an enumfield that takes the enum class as an argument.
class DeconstructibleInstances:
def deconstruct(self):
return ('DeconstructibleInstances', [], {})
string = MigrationWriter.serialize(models.CharField(default=DeconstructibleInstances))[0]
self.assertEqual(string, "models.CharField(default=migrations.test_writer.DeconstructibleInstances)")
def test_register_serializer(self):
class ComplexSerializer(BaseSerializer):
def serialize(self):
return 'complex(%r)' % self.value, {}
MigrationWriter.register_serializer(complex, ComplexSerializer)
self.assertSerializedEqual(complex(1, 2))
MigrationWriter.unregister_serializer(complex)
with self.assertRaisesMessage(ValueError, 'Cannot serialize: (1+2j)'):
self.assertSerializedEqual(complex(1, 2))
def test_register_non_serializer(self):
with self.assertRaisesMessage(ValueError, "'TestModel1' must inherit from 'BaseSerializer'."):
MigrationWriter.register_serializer(complex, TestModel1)
|
98541658b1f5e1eb8ca9923b7ce4301d6bfc799be2f2bbebb560699e0e3a6692 | from django.core.exceptions import FieldDoesNotExist
from django.db import connection, migrations, models, transaction
from django.db.migrations.migration import Migration
from django.db.migrations.operations import CreateModel
from django.db.migrations.operations.fields import FieldOperation
from django.db.migrations.state import ModelState, ProjectState
from django.db.models.fields import NOT_PROVIDED
from django.db.transaction import atomic
from django.db.utils import IntegrityError
from django.test import SimpleTestCase, override_settings, skipUnlessDBFeature
from .models import FoodManager, FoodQuerySet, UnicodeModel
from .test_base import MigrationTestBase
class Mixin:
pass
class OperationTestBase(MigrationTestBase):
"""
Common functions to help test operations.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._initial_table_names = frozenset(connection.introspection.table_names())
def tearDown(self):
self.cleanup_test_tables()
super().tearDown()
def cleanup_test_tables(self):
table_names = frozenset(connection.introspection.table_names()) - self._initial_table_names
with connection.schema_editor() as editor:
with connection.constraint_checks_disabled():
for table_name in table_names:
editor.execute(editor.sql_delete_table % {
'table': editor.quote_name(table_name),
})
def apply_operations(self, app_label, project_state, operations, atomic=True):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor(atomic=atomic) as editor:
return migration.apply(project_state, editor)
def unapply_operations(self, app_label, project_state, operations, atomic=True):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor(atomic=atomic) as editor:
return migration.unapply(project_state, editor)
def make_test_state(self, app_label, operation, **kwargs):
"""
Makes a test state using set_up_test_model and returns the
original state and the state after the migration is applied.
"""
project_state = self.set_up_test_model(app_label, **kwargs)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
return project_state, new_state
def set_up_test_model(
self, app_label, second_model=False, third_model=False, index=False, multicol_index=False,
related_model=False, mti_model=False, proxy_model=False, manager_model=False,
unique_together=False, options=False, db_table=None, index_together=False, constraints=None):
"""
Creates a test model state and database table.
"""
# Make the "current" state
model_options = {
"swappable": "TEST_SWAP_MODEL",
"index_together": [["weight", "pink"]] if index_together else [],
"unique_together": [["pink", "weight"]] if unique_together else [],
}
if options:
model_options["permissions"] = [("can_groom", "Can groom")]
if db_table:
model_options["db_table"] = db_table
operations = [migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options=model_options,
)]
if index:
operations.append(migrations.AddIndex(
"Pony",
models.Index(fields=["pink"], name="pony_pink_idx")
))
if multicol_index:
operations.append(migrations.AddIndex(
"Pony",
models.Index(fields=["pink", "weight"], name="pony_test_idx")
))
if constraints:
for constraint in constraints:
operations.append(migrations.AddConstraint(
"Pony",
constraint,
))
if second_model:
operations.append(migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
]
))
if third_model:
operations.append(migrations.CreateModel(
"Van",
[
("id", models.AutoField(primary_key=True)),
]
))
if related_model:
operations.append(migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
("friend", models.ForeignKey("self", models.CASCADE))
],
))
if mti_model:
operations.append(migrations.CreateModel(
"ShetlandPony",
fields=[
('pony_ptr', models.OneToOneField(
'Pony',
models.CASCADE,
auto_created=True,
parent_link=True,
primary_key=True,
to_field='id',
serialize=False,
)),
("cuteness", models.IntegerField(default=1)),
],
bases=['%s.Pony' % app_label],
))
if proxy_model:
operations.append(migrations.CreateModel(
"ProxyPony",
fields=[],
options={"proxy": True},
bases=['%s.Pony' % app_label],
))
if manager_model:
operations.append(migrations.CreateModel(
"Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
]
))
return self.apply_operations(app_label, ProjectState(), operations)
class OperationTests(OperationTestBase):
"""
Tests running the operations and making sure they do what they say they do.
Each test looks at their state changing, and then their database operation -
both forwards and backwards.
"""
def test_create_model(self):
"""
Tests the CreateModel operation.
Most other tests use this operation as part of setup, so check failures here first.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
self.assertEqual(operation.describe(), "Create model Pony")
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(new_state.models["test_crmo", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crmo", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["fields", "name"])
# And default manager not in set
operation = migrations.CreateModel("Foo", fields=[], managers=[("objects", models.Manager())])
definition = operation.deconstruct()
self.assertNotIn('managers', definition[2])
def test_create_model_with_duplicate_field_name(self):
with self.assertRaisesMessage(ValueError, 'Found duplicate value pink in CreateModel fields argument.'):
migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.TextField()),
("pink", models.IntegerField(default=1)),
],
)
def test_create_model_with_duplicate_base(self):
message = 'Found duplicate value test_crmo.pony in CreateModel bases argument.'
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=("test_crmo.Pony", "test_crmo.Pony",),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=("test_crmo.Pony", "test_crmo.pony",),
)
message = 'Found duplicate value migrations.unicodemodel in CreateModel bases argument.'
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(UnicodeModel, UnicodeModel,),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(UnicodeModel, 'migrations.unicodemodel',),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(UnicodeModel, 'migrations.UnicodeModel',),
)
message = "Found duplicate value <class 'django.db.models.base.Model'> in CreateModel bases argument."
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(models.Model, models.Model,),
)
message = "Found duplicate value <class 'migrations.test_operations.Mixin'> in CreateModel bases argument."
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(Mixin, Mixin,),
)
def test_create_model_with_duplicate_manager_name(self):
with self.assertRaisesMessage(ValueError, 'Found duplicate value objects in CreateModel managers argument.'):
migrations.CreateModel(
"Pony",
fields=[],
managers=[
("objects", models.Manager()),
("objects", models.Manager()),
],
)
def test_create_model_with_unique_after(self):
"""
Tests the CreateModel operation directly followed by an
AlterUniqueTogether (bug #22844 - sqlite remake issues)
"""
operation1 = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
operation2 = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("number", models.IntegerField(default=1)),
("pony", models.ForeignKey("test_crmoua.Pony", models.CASCADE)),
],
)
operation3 = migrations.AlterUniqueTogether(
"Rider",
[
("number", "pony"),
],
)
# Test the database alteration
project_state = ProjectState()
self.assertTableNotExists("test_crmoua_pony")
self.assertTableNotExists("test_crmoua_rider")
with connection.schema_editor() as editor:
new_state = project_state.clone()
operation1.state_forwards("test_crmoua", new_state)
operation1.database_forwards("test_crmoua", editor, project_state, new_state)
project_state, new_state = new_state, new_state.clone()
operation2.state_forwards("test_crmoua", new_state)
operation2.database_forwards("test_crmoua", editor, project_state, new_state)
project_state, new_state = new_state, new_state.clone()
operation3.state_forwards("test_crmoua", new_state)
operation3.database_forwards("test_crmoua", editor, project_state, new_state)
self.assertTableExists("test_crmoua_pony")
self.assertTableExists("test_crmoua_rider")
def test_create_model_m2m(self):
"""
Test the creation of a model with a ManyToMany field and the
auto-created "through" model.
"""
project_state = self.set_up_test_model("test_crmomm")
operation = migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("Pony", related_name="stables"))
]
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_crmomm", new_state)
# Test the database alteration
self.assertTableNotExists("test_crmomm_stable_ponies")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmomm", editor, project_state, new_state)
self.assertTableExists("test_crmomm_stable")
self.assertTableExists("test_crmomm_stable_ponies")
self.assertColumnNotExists("test_crmomm_stable", "ponies")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_crmomm", "Pony")
Stable = new_state.apps.get_model("test_crmomm", "Stable")
stable = Stable.objects.create()
p1 = Pony.objects.create(pink=False, weight=4.55)
p2 = Pony.objects.create(pink=True, weight=5.43)
stable.ponies.add(p1, p2)
self.assertEqual(stable.ponies.count(), 2)
stable.ponies.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmomm", editor, new_state, project_state)
self.assertTableNotExists("test_crmomm_stable")
self.assertTableNotExists("test_crmomm_stable_ponies")
def test_create_model_inheritance(self):
"""
Tests the CreateModel operation on a multi-table inheritance setup.
"""
project_state = self.set_up_test_model("test_crmoih")
# Test the state alteration
operation = migrations.CreateModel(
"ShetlandPony",
[
('pony_ptr', models.OneToOneField(
'test_crmoih.Pony',
models.CASCADE,
auto_created=True,
primary_key=True,
to_field='id',
serialize=False,
)),
("cuteness", models.IntegerField(default=1)),
],
)
new_state = project_state.clone()
operation.state_forwards("test_crmoih", new_state)
self.assertIn(("test_crmoih", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crmoih_shetlandpony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmoih", editor, project_state, new_state)
self.assertTableExists("test_crmoih_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmoih", editor, new_state, project_state)
self.assertTableNotExists("test_crmoih_shetlandpony")
def test_create_proxy_model(self):
"""
CreateModel ignores proxy models.
"""
project_state = self.set_up_test_model("test_crprmo")
# Test the state alteration
operation = migrations.CreateModel(
"ProxyPony",
[],
options={"proxy": True},
bases=("test_crprmo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_crprmo", new_state)
self.assertIn(("test_crprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crprmo", editor, project_state, new_state)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crprmo", editor, new_state, project_state)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["bases", "fields", "name", "options"])
def test_create_unmanaged_model(self):
"""
CreateModel ignores unmanaged models.
"""
project_state = self.set_up_test_model("test_crummo")
# Test the state alteration
operation = migrations.CreateModel(
"UnmanagedPony",
[],
options={"proxy": True},
bases=("test_crummo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model UnmanagedPony")
new_state = project_state.clone()
operation.state_forwards("test_crummo", new_state)
self.assertIn(("test_crummo", "unmanagedpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crummo", editor, project_state, new_state)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crummo", editor, new_state, project_state)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
@skipUnlessDBFeature('supports_table_check_constraints')
def test_create_model_with_constraint(self):
where = models.Q(pink__gt=2)
check_constraint = models.CheckConstraint(check=where, name='test_constraint_pony_pink_gt_2')
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
],
options={'constraints': [check_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(len(new_state.models['test_crmo', 'pony'].options['constraints']), 1)
# Test database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
with connection.cursor() as cursor:
with self.assertRaises(IntegrityError):
cursor.execute("INSERT INTO test_crmo_pony (id, pink) VALUES (1, 1)")
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2]['options']['constraints'], [check_constraint])
def test_create_model_with_partial_unique_constraint(self):
partial_unique_constraint = models.UniqueConstraint(
fields=['pink'],
condition=models.Q(weight__gt=5),
name='test_constraint_pony_pink_for_weight_gt_5_uniq',
)
operation = migrations.CreateModel(
'Pony',
[
('id', models.AutoField(primary_key=True)),
('pink', models.IntegerField(default=3)),
('weight', models.FloatField()),
],
options={'constraints': [partial_unique_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards('test_crmo', new_state)
self.assertEqual(len(new_state.models['test_crmo', 'pony'].options['constraints']), 1)
# Test database alteration
self.assertTableNotExists('test_crmo_pony')
with connection.schema_editor() as editor:
operation.database_forwards('test_crmo', editor, project_state, new_state)
self.assertTableExists('test_crmo_pony')
# Test constraint works
Pony = new_state.apps.get_model('test_crmo', 'Pony')
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards('test_crmo', editor, new_state, project_state)
self.assertTableNotExists('test_crmo_pony')
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], 'CreateModel')
self.assertEqual(definition[1], [])
self.assertEqual(definition[2]['options']['constraints'], [partial_unique_constraint])
def test_create_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_cmoma")
# Test the state alteration
operation = migrations.CreateModel(
"Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
]
)
self.assertEqual(operation.describe(), "Create model Food")
new_state = project_state.clone()
operation.state_forwards("test_cmoma", new_state)
self.assertIn(("test_cmoma", "food"), new_state.models)
managers = new_state.models["test_cmoma", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
def test_delete_model(self):
"""
Tests the DeleteModel operation.
"""
project_state = self.set_up_test_model("test_dlmo")
# Test the state alteration
operation = migrations.DeleteModel("Pony")
self.assertEqual(operation.describe(), "Delete model Pony")
new_state = project_state.clone()
operation.state_forwards("test_dlmo", new_state)
self.assertNotIn(("test_dlmo", "pony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlmo", editor, project_state, new_state)
self.assertTableNotExists("test_dlmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dlmo", editor, new_state, project_state)
self.assertTableExists("test_dlmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "DeleteModel")
self.assertEqual(definition[1], [])
self.assertEqual(list(definition[2]), ["name"])
def test_delete_proxy_model(self):
"""
Tests the DeleteModel operation ignores proxy models.
"""
project_state = self.set_up_test_model("test_dlprmo", proxy_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_dlprmo", new_state)
self.assertIn(("test_dlprmo", "proxypony"), project_state.models)
self.assertNotIn(("test_dlprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlprmo", editor, project_state, new_state)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dlprmo", editor, new_state, project_state)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
def test_delete_mti_model(self):
project_state = self.set_up_test_model('test_dlmtimo', mti_model=True)
# Test the state alteration
operation = migrations.DeleteModel('ShetlandPony')
new_state = project_state.clone()
operation.state_forwards('test_dlmtimo', new_state)
self.assertIn(('test_dlmtimo', 'shetlandpony'), project_state.models)
self.assertNotIn(('test_dlmtimo', 'shetlandpony'), new_state.models)
# Test the database alteration
self.assertTableExists('test_dlmtimo_pony')
self.assertTableExists('test_dlmtimo_shetlandpony')
self.assertColumnExists('test_dlmtimo_shetlandpony', 'pony_ptr_id')
with connection.schema_editor() as editor:
operation.database_forwards('test_dlmtimo', editor, project_state, new_state)
self.assertTableExists('test_dlmtimo_pony')
self.assertTableNotExists('test_dlmtimo_shetlandpony')
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards('test_dlmtimo', editor, new_state, project_state)
self.assertTableExists('test_dlmtimo_pony')
self.assertTableExists('test_dlmtimo_shetlandpony')
self.assertColumnExists('test_dlmtimo_shetlandpony', 'pony_ptr_id')
def test_rename_model(self):
"""
Tests the RenameModel operation.
"""
project_state = self.set_up_test_model("test_rnmo", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Pony", "Horse")
self.assertEqual(operation.describe(), "Rename model Pony to Horse")
# Test initial state and database
self.assertIn(("test_rnmo", "pony"), project_state.models)
self.assertNotIn(("test_rnmo", "horse"), project_state.models)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id"))
self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id"))
# Migrate forwards
new_state = project_state.clone()
atomic_rename = connection.features.supports_atomic_references_rename
new_state = self.apply_operations("test_rnmo", new_state, [operation], atomic=atomic_rename)
# Test new state and database
self.assertNotIn(("test_rnmo", "pony"), new_state.models)
self.assertIn(("test_rnmo", "horse"), new_state.models)
# RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual("test_rnmo.Horse", new_state.models["test_rnmo", "rider"].fields[1][1].remote_field.model)
self.assertTableNotExists("test_rnmo_pony")
self.assertTableExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id"))
self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id"))
# Migrate backwards
original_state = self.unapply_operations("test_rnmo", project_state, [operation], atomic=atomic_rename)
# Test original state and database
self.assertIn(("test_rnmo", "pony"), original_state.models)
self.assertNotIn(("test_rnmo", "horse"), original_state.models)
self.assertEqual("Pony", original_state.models["test_rnmo", "rider"].fields[1][1].remote_field.model)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id"))
self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id"))
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'old_name': "Pony", 'new_name': "Horse"})
def test_rename_model_state_forwards(self):
"""
RenameModel operations shouldn't trigger the caching of rendered apps
on state without prior apps.
"""
state = ProjectState()
state.add_model(ModelState('migrations', 'Foo', []))
operation = migrations.RenameModel('Foo', 'Bar')
operation.state_forwards('migrations', state)
self.assertNotIn('apps', state.__dict__)
self.assertNotIn(('migrations', 'foo'), state.models)
self.assertIn(('migrations', 'bar'), state.models)
# Now with apps cached.
apps = state.apps
operation = migrations.RenameModel('Bar', 'Foo')
operation.state_forwards('migrations', state)
self.assertIs(state.apps, apps)
self.assertNotIn(('migrations', 'bar'), state.models)
self.assertIn(('migrations', 'foo'), state.models)
def test_rename_model_with_self_referential_fk(self):
"""
Tests the RenameModel operation on model with self referential FK.
"""
project_state = self.set_up_test_model("test_rmwsrf", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Rider", "HorseRider")
self.assertEqual(operation.describe(), "Rename model Rider to HorseRider")
new_state = project_state.clone()
operation.state_forwards("test_rmwsrf", new_state)
self.assertNotIn(("test_rmwsrf", "rider"), new_state.models)
self.assertIn(("test_rmwsrf", "horserider"), new_state.models)
# Remember, RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
'self',
new_state.models["test_rmwsrf", "horserider"].fields[2][1].remote_field.model
)
HorseRider = new_state.apps.get_model('test_rmwsrf', 'horserider')
self.assertIs(HorseRider._meta.get_field('horserider').remote_field.model, HorseRider)
# Test the database alteration
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id"))
self.assertFKNotExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id"))
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_forwards("test_rmwsrf", editor, project_state, new_state)
self.assertTableNotExists("test_rmwsrf_rider")
self.assertTableExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKNotExists("test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_rider", "id"))
self.assertFKExists("test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_horserider", "id"))
# And test reversal
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_backwards("test_rmwsrf", editor, new_state, project_state)
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id"))
self.assertFKNotExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id"))
def test_rename_model_with_superclass_fk(self):
"""
Tests the RenameModel operation on a model which has a superclass that
has a foreign key.
"""
project_state = self.set_up_test_model("test_rmwsc", related_model=True, mti_model=True)
# Test the state alteration
operation = migrations.RenameModel("ShetlandPony", "LittleHorse")
self.assertEqual(operation.describe(), "Rename model ShetlandPony to LittleHorse")
new_state = project_state.clone()
operation.state_forwards("test_rmwsc", new_state)
self.assertNotIn(("test_rmwsc", "shetlandpony"), new_state.models)
self.assertIn(("test_rmwsc", "littlehorse"), new_state.models)
# RenameModel shouldn't repoint the superclass's relations, only local ones
self.assertEqual(
project_state.models["test_rmwsc", "rider"].fields[1][1].remote_field.model,
new_state.models["test_rmwsc", "rider"].fields[1][1].remote_field.model
)
# Before running the migration we have a table for Shetland Pony, not Little Horse
self.assertTableExists("test_rmwsc_shetlandpony")
self.assertTableNotExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# and the foreign key on rider points to pony, not shetland pony
self.assertFKExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id"))
self.assertFKNotExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_shetlandpony", "id"))
with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor:
operation.database_forwards("test_rmwsc", editor, project_state, new_state)
# Now we have a little horse table, not shetland pony
self.assertTableNotExists("test_rmwsc_shetlandpony")
self.assertTableExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# but the Foreign keys still point at pony, not little horse
self.assertFKExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id"))
self.assertFKNotExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_littlehorse", "id"))
def test_rename_model_with_self_referential_m2m(self):
app_label = "test_rename_model_with_self_referential_m2m"
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel("ReflexivePony", fields=[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("self")),
]),
])
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.RenameModel("ReflexivePony", "ReflexivePony2"),
], atomic=connection.features.supports_atomic_references_rename)
Pony = project_state.apps.get_model(app_label, "ReflexivePony2")
pony = Pony.objects.create()
pony.ponies.add(pony)
def test_rename_model_with_m2m(self):
app_label = "test_rename_model_with_m2m"
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel("Rider", fields=[
("id", models.AutoField(primary_key=True)),
]),
migrations.CreateModel("Pony", fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
]),
])
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.RenameModel("Pony", "Pony2"),
], atomic=connection.features.supports_atomic_references_rename)
Pony = project_state.apps.get_model(app_label, "Pony2")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(Pony._meta.get_field('riders').remote_field.through.objects.count(), 2)
def test_rename_m2m_target_model(self):
app_label = "test_rename_m2m_target_model"
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel("Rider", fields=[
("id", models.AutoField(primary_key=True)),
]),
migrations.CreateModel("Pony", fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
]),
])
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.RenameModel("Rider", "Rider2"),
], atomic=connection.features.supports_atomic_references_rename)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider2")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(Pony._meta.get_field('riders').remote_field.through.objects.count(), 2)
def test_rename_m2m_through_model(self):
app_label = "test_rename_through"
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel("Rider", fields=[
("id", models.AutoField(primary_key=True)),
]),
migrations.CreateModel("Pony", fields=[
("id", models.AutoField(primary_key=True)),
]),
migrations.CreateModel("PonyRider", fields=[
("id", models.AutoField(primary_key=True)),
("rider", models.ForeignKey("test_rename_through.Rider", models.CASCADE)),
("pony", models.ForeignKey("test_rename_through.Pony", models.CASCADE)),
]),
migrations.AddField(
"Pony",
"riders",
models.ManyToManyField("test_rename_through.Rider", through="test_rename_through.PonyRider"),
),
])
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider")
pony = Pony.objects.create()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.RenameModel("PonyRider", "PonyRider2"),
])
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider2")
pony = Pony.objects.first()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
self.assertEqual(Pony.objects.count(), 1)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(PonyRider.objects.count(), 2)
self.assertEqual(pony.riders.count(), 2)
def test_rename_m2m_model_after_rename_field(self):
"""RenameModel renames a many-to-many column after a RenameField."""
app_label = 'test_rename_multiple'
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel('Pony', fields=[
('id', models.AutoField(primary_key=True)),
('name', models.CharField(max_length=20)),
]),
migrations.CreateModel('Rider', fields=[
('id', models.AutoField(primary_key=True)),
('pony', models.ForeignKey('test_rename_multiple.Pony', models.CASCADE)),
]),
migrations.CreateModel('PonyRider', fields=[
('id', models.AutoField(primary_key=True)),
('riders', models.ManyToManyField('Rider')),
]),
migrations.RenameField(model_name='pony', old_name='name', new_name='fancy_name'),
migrations.RenameModel(old_name='Rider', new_name='Jockey'),
], atomic=connection.features.supports_atomic_references_rename)
Pony = project_state.apps.get_model(app_label, 'Pony')
Jockey = project_state.apps.get_model(app_label, 'Jockey')
PonyRider = project_state.apps.get_model(app_label, 'PonyRider')
# No "no such column" error means the column was renamed correctly.
pony = Pony.objects.create(fancy_name='a good name')
jockey = Jockey.objects.create(pony=pony)
ponyrider = PonyRider.objects.create()
ponyrider.riders.add(jockey)
def test_add_field(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
self.assertEqual(operation.describe(), "Add field height to Pony")
project_state, new_state = self.make_test_state("test_adfl", operation)
self.assertEqual(len(new_state.models["test_adfl", "pony"].fields), 4)
field = [
f for n, f in new_state.models["test_adfl", "pony"].fields
if n == "height"
][0]
self.assertEqual(field.default, 5)
# Test the database alteration
self.assertColumnNotExists("test_adfl_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adfl", editor, project_state, new_state)
self.assertColumnExists("test_adfl_pony", "height")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adfl", editor, new_state, project_state)
self.assertColumnNotExists("test_adfl_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_add_charfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adchfl")
Pony = project_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations("test_adchfl", project_state, [
migrations.AddField(
"Pony",
"text",
models.CharField(max_length=10, default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.CharField(max_length=10, default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.CharField(max_length=10, default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.CharField(max_length=10, default='"\'"'),
),
])
Pony = new_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_textfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adtxtfl")
Pony = project_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations("test_adtxtfl", project_state, [
migrations.AddField(
"Pony",
"text",
models.TextField(default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.TextField(default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.TextField(default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.TextField(default='"\'"'),
),
])
Pony = new_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_binaryfield(self):
"""
Tests the AddField operation on TextField/BinaryField.
"""
project_state = self.set_up_test_model("test_adbinfl")
Pony = project_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations("test_adbinfl", project_state, [
migrations.AddField(
"Pony",
"blob",
models.BinaryField(default=b"some text"),
),
migrations.AddField(
"Pony",
"empty",
models.BinaryField(default=b""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.BinaryField(default=b"42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.BinaryField(default=b'"\'"'),
),
])
Pony = new_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
# SQLite returns buffer/memoryview, cast to bytes for checking.
self.assertEqual(bytes(pony.blob), b"some text")
self.assertEqual(bytes(pony.empty), b"")
self.assertEqual(bytes(pony.digits), b"42")
self.assertEqual(bytes(pony.quotes), b'"\'"')
def test_column_name_quoting(self):
"""
Column names that are SQL keywords shouldn't cause problems when used
in migrations (#22168).
"""
project_state = self.set_up_test_model("test_regr22168")
operation = migrations.AddField(
"Pony",
"order",
models.IntegerField(default=0),
)
new_state = project_state.clone()
operation.state_forwards("test_regr22168", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("test_regr22168", editor, project_state, new_state)
self.assertColumnExists("test_regr22168_pony", "order")
def test_add_field_preserve_default(self):
"""
Tests the AddField operation's state alteration
when preserve_default = False.
"""
project_state = self.set_up_test_model("test_adflpd")
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=4),
preserve_default=False,
)
new_state = project_state.clone()
operation.state_forwards("test_adflpd", new_state)
self.assertEqual(len(new_state.models["test_adflpd", "pony"].fields), 4)
field = [
f for n, f in new_state.models["test_adflpd", "pony"].fields
if n == "height"
][0]
self.assertEqual(field.default, NOT_PROVIDED)
# Test the database alteration
project_state.apps.get_model("test_adflpd", "pony").objects.create(
weight=4,
)
self.assertColumnNotExists("test_adflpd_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflpd", editor, project_state, new_state)
self.assertColumnExists("test_adflpd_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name", "preserve_default"])
def test_add_field_m2m(self):
"""
Tests the AddField operation with a ManyToManyField.
"""
project_state = self.set_up_test_model("test_adflmm", second_model=True)
# Test the state alteration
operation = migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies"))
new_state = project_state.clone()
operation.state_forwards("test_adflmm", new_state)
self.assertEqual(len(new_state.models["test_adflmm", "pony"].fields), 4)
# Test the database alteration
self.assertTableNotExists("test_adflmm_pony_stables")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflmm", editor, project_state, new_state)
self.assertTableExists("test_adflmm_pony_stables")
self.assertColumnNotExists("test_adflmm_pony", "stables")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_adflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.stables.create()
self.assertEqual(p.stables.count(), 1)
p.stables.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adflmm", editor, new_state, project_state)
self.assertTableNotExists("test_adflmm_pony_stables")
def test_alter_field_m2m(self):
project_state = self.set_up_test_model("test_alflmm", second_model=True)
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies"))
])
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertFalse(Pony._meta.get_field('stables').blank)
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AlterField(
"Pony", "stables", models.ManyToManyField(to="Stable", related_name="ponies", blank=True)
)
])
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertTrue(Pony._meta.get_field('stables').blank)
def test_repoint_field_m2m(self):
project_state = self.set_up_test_model("test_alflmm", second_model=True, third_model=True)
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AddField("Pony", "places", models.ManyToManyField("Stable", related_name="ponies"))
])
Pony = project_state.apps.get_model("test_alflmm", "Pony")
project_state = self.apply_operations("test_alflmm", project_state, operations=[
migrations.AlterField("Pony", "places", models.ManyToManyField(to="Van", related_name="ponies"))
])
# Ensure the new field actually works
Pony = project_state.apps.get_model("test_alflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.places.create()
self.assertEqual(p.places.count(), 1)
p.places.all().delete()
def test_remove_field_m2m(self):
project_state = self.set_up_test_model("test_rmflmm", second_model=True)
project_state = self.apply_operations("test_rmflmm", project_state, operations=[
migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies"))
])
self.assertTableExists("test_rmflmm_pony_stables")
with_field_state = project_state.clone()
operations = [migrations.RemoveField("Pony", "stables")]
project_state = self.apply_operations("test_rmflmm", project_state, operations=operations)
self.assertTableNotExists("test_rmflmm_pony_stables")
# And test reversal
self.unapply_operations("test_rmflmm", with_field_state, operations=operations)
self.assertTableExists("test_rmflmm_pony_stables")
def test_remove_field_m2m_with_through(self):
project_state = self.set_up_test_model("test_rmflmmwt", second_model=True)
self.assertTableNotExists("test_rmflmmwt_ponystables")
project_state = self.apply_operations("test_rmflmmwt", project_state, operations=[
migrations.CreateModel("PonyStables", fields=[
("pony", models.ForeignKey('test_rmflmmwt.Pony', models.CASCADE)),
("stable", models.ForeignKey('test_rmflmmwt.Stable', models.CASCADE)),
]),
migrations.AddField(
"Pony", "stables",
models.ManyToManyField("Stable", related_name="ponies", through='test_rmflmmwt.PonyStables')
)
])
self.assertTableExists("test_rmflmmwt_ponystables")
operations = [migrations.RemoveField("Pony", "stables"), migrations.DeleteModel("PonyStables")]
self.apply_operations("test_rmflmmwt", project_state, operations=operations)
def test_remove_field(self):
"""
Tests the RemoveField operation.
"""
project_state = self.set_up_test_model("test_rmfl")
# Test the state alteration
operation = migrations.RemoveField("Pony", "pink")
self.assertEqual(operation.describe(), "Remove field pink from Pony")
new_state = project_state.clone()
operation.state_forwards("test_rmfl", new_state)
self.assertEqual(len(new_state.models["test_rmfl", "pony"].fields), 2)
# Test the database alteration
self.assertColumnExists("test_rmfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_rmfl", editor, project_state, new_state)
self.assertColumnNotExists("test_rmfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmfl", editor, new_state, project_state)
self.assertColumnExists("test_rmfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveField")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'model_name': "Pony", 'name': 'pink'})
def test_remove_fk(self):
"""
Tests the RemoveField operation on a foreign key.
"""
project_state = self.set_up_test_model("test_rfk", related_model=True)
self.assertColumnExists("test_rfk_rider", "pony_id")
operation = migrations.RemoveField("Rider", "pony")
new_state = project_state.clone()
operation.state_forwards("test_rfk", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("test_rfk", editor, project_state, new_state)
self.assertColumnNotExists("test_rfk_rider", "pony_id")
with connection.schema_editor() as editor:
operation.database_backwards("test_rfk", editor, new_state, project_state)
self.assertColumnExists("test_rfk_rider", "pony_id")
def test_alter_model_table(self):
"""
Tests the AlterModelTable operation.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony_2")
self.assertEqual(operation.describe(), "Rename table for Pony to test_almota_pony_2")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(new_state.models["test_almota", "pony"].options["db_table"], "test_almota_pony_2")
# Test the database alteration
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableNotExists("test_almota_pony")
self.assertTableExists("test_almota_pony_2")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_almota", editor, new_state, project_state)
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelTable")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'table': "test_almota_pony_2"})
def test_alter_model_table_none(self):
"""
Tests the AlterModelTable operation if the table name is set to None.
"""
operation = migrations.AlterModelTable("Pony", None)
self.assertEqual(operation.describe(), "Rename table for Pony to (default)")
def test_alter_model_table_noop(self):
"""
Tests the AlterModelTable operation if the table name is not changed.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(new_state.models["test_almota", "pony"].options["db_table"], "test_almota_pony")
# Test the database alteration
self.assertTableExists("test_almota_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableExists("test_almota_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_almota", editor, new_state, project_state)
self.assertTableExists("test_almota_pony")
def test_alter_model_table_m2m(self):
"""
AlterModelTable should rename auto-generated M2M tables.
"""
app_label = "test_talflmltlm2m"
pony_db_table = 'pony_foo'
project_state = self.set_up_test_model(app_label, second_model=True, db_table=pony_db_table)
# Add the M2M field
first_state = project_state.clone()
operation = migrations.AddField("Pony", "stables", models.ManyToManyField("Stable"))
operation.state_forwards(app_label, first_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, first_state)
original_m2m_table = "%s_%s" % (pony_db_table, "stables")
new_m2m_table = "%s_%s" % (app_label, "pony_stables")
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
# Rename the Pony db_table which should also rename the m2m table.
second_state = first_state.clone()
operation = migrations.AlterModelTable(name='pony', table=None)
operation.state_forwards(app_label, second_state)
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_forwards(app_label, editor, first_state, second_state)
self.assertTableExists(new_m2m_table)
self.assertTableNotExists(original_m2m_table)
# And test reversal
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_backwards(app_label, editor, second_state, first_state)
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
def test_alter_field(self):
"""
Tests the AlterField operation.
"""
project_state = self.set_up_test_model("test_alfl")
# Test the state alteration
operation = migrations.AlterField("Pony", "pink", models.IntegerField(null=True))
self.assertEqual(operation.describe(), "Alter field pink on Pony")
new_state = project_state.clone()
operation.state_forwards("test_alfl", new_state)
self.assertIs(project_state.models["test_alfl", "pony"].get_field_by_name("pink").null, False)
self.assertIs(new_state.models["test_alfl", "pony"].get_field_by_name("pink").null, True)
# Test the database alteration
self.assertColumnNotNull("test_alfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alfl", editor, project_state, new_state)
self.assertColumnNull("test_alfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alfl", editor, new_state, project_state)
self.assertColumnNotNull("test_alfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_alter_field_pk(self):
"""
Tests the AlterField operation on primary keys (for things like PostgreSQL's SERIAL weirdness)
"""
project_state = self.set_up_test_model("test_alflpk")
# Test the state alteration
operation = migrations.AlterField("Pony", "id", models.IntegerField(primary_key=True))
new_state = project_state.clone()
operation.state_forwards("test_alflpk", new_state)
self.assertIsInstance(project_state.models["test_alflpk", "pony"].get_field_by_name("id"), models.AutoField)
self.assertIsInstance(new_state.models["test_alflpk", "pony"].get_field_by_name("id"), models.IntegerField)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alflpk", editor, project_state, new_state)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alflpk", editor, new_state, project_state)
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_field_pk_fk(self):
"""
Tests the AlterField operation on primary keys changes any FKs pointing to it.
"""
project_state = self.set_up_test_model("test_alflpkfk", related_model=True)
# Test the state alteration
operation = migrations.AlterField("Pony", "id", models.FloatField(primary_key=True))
new_state = project_state.clone()
operation.state_forwards("test_alflpkfk", new_state)
self.assertIsInstance(project_state.models["test_alflpkfk", "pony"].get_field_by_name("id"), models.AutoField)
self.assertIsInstance(new_state.models["test_alflpkfk", "pony"].get_field_by_name("id"), models.FloatField)
def assertIdTypeEqualsFkType():
with connection.cursor() as cursor:
id_type, id_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(cursor, "test_alflpkfk_pony")
if c.name == "id"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(cursor, "test_alflpkfk_rider")
if c.name == "pony_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_null, fk_null)
assertIdTypeEqualsFkType()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alflpkfk", editor, project_state, new_state)
assertIdTypeEqualsFkType()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alflpkfk", editor, new_state, project_state)
assertIdTypeEqualsFkType()
def test_alter_field_reloads_state_on_fk_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = 'alter_alter_field_reloads_state_on_fk_target_changes'
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel('Rider', fields=[
('id', models.CharField(primary_key=True, max_length=100)),
]),
migrations.CreateModel('Pony', fields=[
('id', models.CharField(primary_key=True, max_length=100)),
('rider', models.ForeignKey('%s.Rider' % app_label, models.CASCADE)),
]),
migrations.CreateModel('PonyRider', fields=[
('id', models.AutoField(primary_key=True)),
('pony', models.ForeignKey('%s.Pony' % app_label, models.CASCADE)),
]),
])
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.AlterField('Rider', 'id', models.CharField(primary_key=True, max_length=99)),
migrations.AlterField('Pony', 'id', models.CharField(primary_key=True, max_length=99)),
])
def test_alter_field_reloads_state_on_fk_with_to_field_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = 'alter_alter_field_reloads_state_on_fk_with_to_field_target_changes'
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel('Rider', fields=[
('id', models.CharField(primary_key=True, max_length=100)),
('slug', models.CharField(unique=True, max_length=100)),
]),
migrations.CreateModel('Pony', fields=[
('id', models.CharField(primary_key=True, max_length=100)),
('rider', models.ForeignKey('%s.Rider' % app_label, models.CASCADE, to_field='slug')),
('slug', models.CharField(unique=True, max_length=100)),
]),
migrations.CreateModel('PonyRider', fields=[
('id', models.AutoField(primary_key=True)),
('pony', models.ForeignKey('%s.Pony' % app_label, models.CASCADE, to_field='slug')),
]),
])
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.AlterField('Rider', 'slug', models.CharField(unique=True, max_length=99)),
migrations.AlterField('Pony', 'slug', models.CharField(unique=True, max_length=99)),
])
def test_rename_field_reloads_state_on_fk_target_changes(self):
"""
If RenameField doesn't reload state appropriately, the AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = 'alter_rename_field_reloads_state_on_fk_target_changes'
project_state = self.apply_operations(app_label, ProjectState(), operations=[
migrations.CreateModel('Rider', fields=[
('id', models.CharField(primary_key=True, max_length=100)),
]),
migrations.CreateModel('Pony', fields=[
('id', models.CharField(primary_key=True, max_length=100)),
('rider', models.ForeignKey('%s.Rider' % app_label, models.CASCADE)),
]),
migrations.CreateModel('PonyRider', fields=[
('id', models.AutoField(primary_key=True)),
('pony', models.ForeignKey('%s.Pony' % app_label, models.CASCADE)),
]),
])
project_state = self.apply_operations(app_label, project_state, operations=[
migrations.RenameField('Rider', 'id', 'id2'),
migrations.AlterField('Pony', 'id', models.CharField(primary_key=True, max_length=99)),
], atomic=connection.features.supports_atomic_references_rename)
def test_rename_field(self):
"""
Tests the RenameField operation.
"""
project_state = self.set_up_test_model("test_rnfl", unique_together=True, index_together=True)
# Test the state alteration
operation = migrations.RenameField("Pony", "pink", "blue")
self.assertEqual(operation.describe(), "Rename field pink on Pony to blue")
new_state = project_state.clone()
operation.state_forwards("test_rnfl", new_state)
self.assertIn("blue", [n for n, f in new_state.models["test_rnfl", "pony"].fields])
self.assertNotIn("pink", [n for n, f in new_state.models["test_rnfl", "pony"].fields])
# Make sure the unique_together has the renamed column too
self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['unique_together'][0])
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['unique_together'][0])
# Make sure the index_together has the renamed column too
self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['index_together'][0])
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['index_together'][0])
# Test the database alteration
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnfl", editor, project_state, new_state)
self.assertColumnExists("test_rnfl_pony", "blue")
self.assertColumnNotExists("test_rnfl_pony", "pink")
# Ensure the unique constraint has been ported over
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute("INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_rnfl_pony")
# Ensure the index constraint has been ported over
self.assertIndexExists("test_rnfl_pony", ["weight", "blue"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rnfl", editor, new_state, project_state)
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
# Ensure the index constraint has been reset
self.assertIndexExists("test_rnfl_pony", ["weight", "pink"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameField")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'model_name': "Pony", 'old_name': "pink", 'new_name': "blue"})
def test_rename_missing_field(self):
state = ProjectState()
state.add_model(ModelState('app', 'model', []))
with self.assertRaisesMessage(FieldDoesNotExist, "app.model has no field named 'field'"):
migrations.RenameField('model', 'field', 'new_field').state_forwards('app', state)
def test_rename_referenced_field_state_forward(self):
state = ProjectState()
state.add_model(ModelState('app', 'Model', [
('id', models.AutoField(primary_key=True)),
('field', models.IntegerField(unique=True)),
]))
state.add_model(ModelState('app', 'OtherModel', [
('id', models.AutoField(primary_key=True)),
('fk', models.ForeignKey('Model', models.CASCADE, to_field='field')),
('fo', models.ForeignObject('Model', models.CASCADE, from_fields=('fk',), to_fields=('field',))),
]))
operation = migrations.RenameField('Model', 'field', 'renamed')
new_state = state.clone()
operation.state_forwards('app', new_state)
self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].remote_field.field_name, 'renamed')
self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].from_fields, ['self'])
self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].to_fields, ('renamed',))
self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].from_fields, ('fk',))
self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].to_fields, ('renamed',))
operation = migrations.RenameField('OtherModel', 'fk', 'renamed_fk')
new_state = state.clone()
operation.state_forwards('app', new_state)
self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].remote_field.field_name, 'renamed')
self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].from_fields, ('self',))
self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].to_fields, ('renamed',))
self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].from_fields, ('renamed_fk',))
self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].to_fields, ('renamed',))
def test_alter_unique_together(self):
"""
Tests the AlterUniqueTogether operation.
"""
project_state = self.set_up_test_model("test_alunto")
# Test the state alteration
operation = migrations.AlterUniqueTogether("Pony", [("pink", "weight")])
self.assertEqual(operation.describe(), "Alter unique_together for Pony (1 constraint(s))")
new_state = project_state.clone()
operation.state_forwards("test_alunto", new_state)
self.assertEqual(len(project_state.models["test_alunto", "pony"].options.get("unique_together", set())), 0)
self.assertEqual(len(new_state.models["test_alunto", "pony"].options.get("unique_together", set())), 1)
# Make sure we can insert duplicate rows
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alunto", editor, project_state, new_state)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alunto", editor, new_state, project_state)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test flat unique_together
operation = migrations.AlterUniqueTogether("Pony", ("pink", "weight"))
operation.state_forwards("test_alunto", new_state)
self.assertEqual(len(new_state.models["test_alunto", "pony"].options.get("unique_together", set())), 1)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterUniqueTogether")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'unique_together': {("pink", "weight")}})
def test_alter_unique_together_remove(self):
operation = migrations.AlterUniqueTogether("Pony", None)
self.assertEqual(operation.describe(), "Alter unique_together for Pony (0 constraint(s))")
def test_add_index(self):
"""
Test the AddIndex operation.
"""
project_state = self.set_up_test_model("test_adin")
msg = (
"Indexes passed to AddIndex operations require a name argument. "
"<Index: fields='pink'> doesn't have one."
)
with self.assertRaisesMessage(ValueError, msg):
migrations.AddIndex("Pony", models.Index(fields=["pink"]))
index = models.Index(fields=["pink"], name="test_adin_pony_pink_idx")
operation = migrations.AddIndex("Pony", index)
self.assertEqual(operation.describe(), "Create index test_adin_pony_pink_idx on field(s) pink of model Pony")
new_state = project_state.clone()
operation.state_forwards("test_adin", new_state)
# Test the database alteration
self.assertEqual(len(new_state.models["test_adin", "pony"].options['indexes']), 1)
self.assertIndexNotExists("test_adin_pony", ["pink"])
with connection.schema_editor() as editor:
operation.database_forwards("test_adin", editor, project_state, new_state)
self.assertIndexExists("test_adin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adin", editor, new_state, project_state)
self.assertIndexNotExists("test_adin_pony", ["pink"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'model_name': "Pony", 'index': index})
def test_remove_index(self):
"""
Test the RemoveIndex operation.
"""
project_state = self.set_up_test_model("test_rmin", multicol_index=True)
self.assertTableExists("test_rmin_pony")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
operation = migrations.RemoveIndex("Pony", "pony_test_idx")
self.assertEqual(operation.describe(), "Remove index pony_test_idx from Pony")
new_state = project_state.clone()
operation.state_forwards("test_rmin", new_state)
# Test the state alteration
self.assertEqual(len(new_state.models["test_rmin", "pony"].options['indexes']), 0)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_rmin", editor, project_state, new_state)
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmin", editor, new_state, project_state)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'model_name': "Pony", 'name': "pony_test_idx"})
# Also test a field dropped with index - sqlite remake issue
operations = [
migrations.RemoveIndex("Pony", "pony_test_idx"),
migrations.RemoveField("Pony", "pink"),
]
self.assertColumnExists("test_rmin_pony", "pink")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test database alteration
new_state = project_state.clone()
self.apply_operations('test_rmin', new_state, operations=operations)
self.assertColumnNotExists("test_rmin_pony", "pink")
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
self.unapply_operations("test_rmin", project_state, operations=operations)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
def test_add_index_state_forwards(self):
project_state = self.set_up_test_model('test_adinsf')
index = models.Index(fields=['pink'], name='test_adinsf_pony_pink_idx')
old_model = project_state.apps.get_model('test_adinsf', 'Pony')
new_state = project_state.clone()
operation = migrations.AddIndex('Pony', index)
operation.state_forwards('test_adinsf', new_state)
new_model = new_state.apps.get_model('test_adinsf', 'Pony')
self.assertIsNot(old_model, new_model)
def test_remove_index_state_forwards(self):
project_state = self.set_up_test_model('test_rminsf')
index = models.Index(fields=['pink'], name='test_rminsf_pony_pink_idx')
migrations.AddIndex('Pony', index).state_forwards('test_rminsf', project_state)
old_model = project_state.apps.get_model('test_rminsf', 'Pony')
new_state = project_state.clone()
operation = migrations.RemoveIndex('Pony', 'test_rminsf_pony_pink_idx')
operation.state_forwards('test_rminsf', new_state)
new_model = new_state.apps.get_model('test_rminsf', 'Pony')
self.assertIsNot(old_model, new_model)
def test_alter_field_with_index(self):
"""
Test AlterField operation with an index to ensure indexes created via
Meta.indexes don't get dropped with sqlite3 remake.
"""
project_state = self.set_up_test_model("test_alflin", index=True)
operation = migrations.AlterField("Pony", "pink", models.IntegerField(null=True))
new_state = project_state.clone()
operation.state_forwards("test_alflin", new_state)
# Test the database alteration
self.assertColumnNotNull("test_alflin_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alflin", editor, project_state, new_state)
# Index hasn't been dropped
self.assertIndexExists("test_alflin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alflin", editor, new_state, project_state)
# Ensure the index is still there
self.assertIndexExists("test_alflin_pony", ["pink"])
def test_alter_index_together(self):
"""
Tests the AlterIndexTogether operation.
"""
project_state = self.set_up_test_model("test_alinto")
# Test the state alteration
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
self.assertEqual(operation.describe(), "Alter index_together for Pony (1 constraint(s))")
new_state = project_state.clone()
operation.state_forwards("test_alinto", new_state)
self.assertEqual(len(project_state.models["test_alinto", "pony"].options.get("index_together", set())), 0)
self.assertEqual(len(new_state.models["test_alinto", "pony"].options.get("index_together", set())), 1)
# Make sure there's no matching index
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alinto", editor, project_state, new_state)
self.assertIndexExists("test_alinto_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alinto", editor, new_state, project_state)
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterIndexTogether")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'index_together': {("pink", "weight")}})
def test_alter_index_together_remove(self):
operation = migrations.AlterIndexTogether("Pony", None)
self.assertEqual(operation.describe(), "Alter index_together for Pony (0 constraint(s))")
@skipUnlessDBFeature('supports_table_check_constraints')
def test_add_constraint(self):
project_state = self.set_up_test_model("test_addconstraint")
gt_check = models.Q(pink__gt=2)
gt_constraint = models.CheckConstraint(check=gt_check, name="test_add_constraint_pony_pink_gt_2")
gt_operation = migrations.AddConstraint("Pony", gt_constraint)
self.assertEqual(
gt_operation.describe(), "Create constraint test_add_constraint_pony_pink_gt_2 on model Pony"
)
# Test the state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(len(new_state.models["test_addconstraint", "pony"].options["constraints"]), 1)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards("test_addconstraint", editor, project_state, new_state)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
# Add another one.
lt_check = models.Q(pink__lt=100)
lt_constraint = models.CheckConstraint(check=lt_check, name="test_add_constraint_pony_pink_lt_100")
lt_operation = migrations.AddConstraint("Pony", lt_constraint)
lt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(len(new_state.models["test_addconstraint", "pony"].options["constraints"]), 2)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 2)
with connection.schema_editor() as editor:
lt_operation.database_forwards("test_addconstraint", editor, project_state, new_state)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards("test_addconstraint", editor, new_state, project_state)
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'model_name': "Pony", 'constraint': gt_constraint})
@skipUnlessDBFeature('supports_table_check_constraints')
def test_remove_constraint(self):
project_state = self.set_up_test_model("test_removeconstraint", constraints=[
models.CheckConstraint(check=models.Q(pink__gt=2), name="test_remove_constraint_pony_pink_gt_2"),
models.CheckConstraint(check=models.Q(pink__lt=100), name="test_remove_constraint_pony_pink_lt_100"),
])
gt_operation = migrations.RemoveConstraint("Pony", "test_remove_constraint_pony_pink_gt_2")
self.assertEqual(
gt_operation.describe(), "Remove constraint test_remove_constraint_pony_pink_gt_2 from model Pony"
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(len(new_state.models["test_removeconstraint", "pony"].options['constraints']), 1)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards("test_removeconstraint", editor, project_state, new_state)
Pony.objects.create(pink=1, weight=1.0).delete()
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
# Remove the other one.
lt_operation = migrations.RemoveConstraint("Pony", "test_remove_constraint_pony_pink_lt_100")
lt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(len(new_state.models["test_removeconstraint", "pony"].options['constraints']), 0)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor:
lt_operation.database_forwards("test_removeconstraint", editor, project_state, new_state)
Pony.objects.create(pink=100, weight=1.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards("test_removeconstraint", editor, new_state, project_state)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'model_name': "Pony", 'name': "test_remove_constraint_pony_pink_gt_2"})
def test_add_partial_unique_constraint(self):
project_state = self.set_up_test_model('test_addpartialuniqueconstraint')
partial_unique_constraint = models.UniqueConstraint(
fields=['pink'],
condition=models.Q(weight__gt=5),
name='test_constraint_pony_pink_for_weight_gt_5_uniq',
)
operation = migrations.AddConstraint('Pony', partial_unique_constraint)
self.assertEqual(
operation.describe(),
'Create constraint test_constraint_pony_pink_for_weight_gt_5_uniq '
'on model Pony'
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards('test_addpartialuniqueconstraint', new_state)
self.assertEqual(len(new_state.models['test_addpartialuniqueconstraint', 'pony'].options['constraints']), 1)
Pony = new_state.apps.get_model('test_addpartialuniqueconstraint', 'Pony')
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards('test_addpartialuniqueconstraint', editor, project_state, new_state)
# Test constraint works
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards('test_addpartialuniqueconstraint', editor, new_state, project_state)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], 'AddConstraint')
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'model_name': 'Pony', 'constraint': partial_unique_constraint})
def test_remove_partial_unique_constraint(self):
project_state = self.set_up_test_model('test_removepartialuniqueconstraint', constraints=[
models.UniqueConstraint(
fields=['pink'],
condition=models.Q(weight__gt=5),
name='test_constraint_pony_pink_for_weight_gt_5_uniq',
),
])
gt_operation = migrations.RemoveConstraint('Pony', 'test_constraint_pony_pink_for_weight_gt_5_uniq')
self.assertEqual(
gt_operation.describe(), 'Remove constraint test_constraint_pony_pink_for_weight_gt_5_uniq from model Pony'
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards('test_removepartialuniqueconstraint', new_state)
self.assertEqual(len(new_state.models['test_removepartialuniqueconstraint', 'pony'].options['constraints']), 0)
Pony = new_state.apps.get_model('test_removepartialuniqueconstraint', 'Pony')
self.assertEqual(len(Pony._meta.constraints), 0)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards('test_removepartialuniqueconstraint', editor, project_state, new_state)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
Pony.objects.create(pink=1, weight=7.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards('test_removepartialuniqueconstraint', editor, new_state, project_state)
# Test constraint works
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], 'RemoveConstraint')
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {
'model_name': 'Pony',
'name': 'test_constraint_pony_pink_for_weight_gt_5_uniq',
})
def test_alter_model_options(self):
"""
Tests the AlterModelOptions operation.
"""
project_state = self.set_up_test_model("test_almoop")
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions("Pony", {"permissions": [("can_groom", "Can groom")]})
self.assertEqual(operation.describe(), "Change Meta options on Pony")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(len(project_state.models["test_almoop", "pony"].options.get("permissions", [])), 0)
self.assertEqual(len(new_state.models["test_almoop", "pony"].options.get("permissions", [])), 1)
self.assertEqual(new_state.models["test_almoop", "pony"].options["permissions"][0][0], "can_groom")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'options': {"permissions": [("can_groom", "Can groom")]}})
def test_alter_model_options_emptying(self):
"""
The AlterModelOptions operation removes keys from the dict (#23121)
"""
project_state = self.set_up_test_model("test_almoop", options=True)
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions("Pony", {})
self.assertEqual(operation.describe(), "Change Meta options on Pony")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(len(project_state.models["test_almoop", "pony"].options.get("permissions", [])), 1)
self.assertEqual(len(new_state.models["test_almoop", "pony"].options.get("permissions", [])), 0)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Pony", 'options': {}})
def test_alter_order_with_respect_to(self):
"""
Tests the AlterOrderWithRespectTo operation.
"""
project_state = self.set_up_test_model("test_alorwrtto", related_model=True)
# Test the state alteration
operation = migrations.AlterOrderWithRespectTo("Rider", "pony")
self.assertEqual(operation.describe(), "Set order_with_respect_to on Rider to pony")
new_state = project_state.clone()
operation.state_forwards("test_alorwrtto", new_state)
self.assertIsNone(
project_state.models["test_alorwrtto", "rider"].options.get("order_with_respect_to", None)
)
self.assertEqual(
new_state.models["test_alorwrtto", "rider"].options.get("order_with_respect_to", None),
"pony"
)
# Make sure there's no matching index
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# Create some rows before alteration
rendered_state = project_state.apps
pony = rendered_state.get_model("test_alorwrtto", "Pony").objects.create(weight=50)
rendered_state.get_model("test_alorwrtto", "Rider").objects.create(pony=pony, friend_id=1)
rendered_state.get_model("test_alorwrtto", "Rider").objects.create(pony=pony, friend_id=2)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alorwrtto", editor, project_state, new_state)
self.assertColumnExists("test_alorwrtto_rider", "_order")
# Check for correct value in rows
updated_riders = new_state.apps.get_model("test_alorwrtto", "Rider").objects.all()
self.assertEqual(updated_riders[0]._order, 0)
self.assertEqual(updated_riders[1]._order, 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alorwrtto", editor, new_state, project_state)
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterOrderWithRespectTo")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {'name': "Rider", 'order_with_respect_to': "pony"})
def test_alter_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almoma")
# Test the state alteration
operation = migrations.AlterModelManagers(
"Pony",
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
]
)
self.assertEqual(operation.describe(), "Change managers on Pony")
managers = project_state.models["test_almoma", "pony"].managers
self.assertEqual(managers, [])
new_state = project_state.clone()
operation.state_forwards("test_almoma", new_state)
self.assertIn(("test_almoma", "pony"), new_state.models)
managers = new_state.models["test_almoma", "pony"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
rendered_state = new_state.apps
model = rendered_state.get_model('test_almoma', 'pony')
self.assertIsInstance(model.food_qs, models.Manager)
self.assertIsInstance(model.food_mgr, FoodManager)
self.assertIsInstance(model.food_mgr_kwargs, FoodManager)
def test_alter_model_managers_emptying(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almomae", manager_model=True)
# Test the state alteration
operation = migrations.AlterModelManagers("Food", managers=[])
self.assertEqual(operation.describe(), "Change managers on Food")
self.assertIn(("test_almomae", "food"), project_state.models)
managers = project_state.models["test_almomae", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
new_state = project_state.clone()
operation.state_forwards("test_almomae", new_state)
managers = new_state.models["test_almomae", "food"].managers
self.assertEqual(managers, [])
def test_alter_fk(self):
"""
Creating and then altering an FK works correctly
and deals with the pending SQL (#23091)
"""
project_state = self.set_up_test_model("test_alfk")
# Test adding and then altering the FK in one go
create_operation = migrations.CreateModel(
name="Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
)
create_state = project_state.clone()
create_operation.state_forwards("test_alfk", create_state)
alter_operation = migrations.AlterField(
model_name='Rider',
name='pony',
field=models.ForeignKey("Pony", models.CASCADE, editable=False),
)
alter_state = create_state.clone()
alter_operation.state_forwards("test_alfk", alter_state)
with connection.schema_editor() as editor:
create_operation.database_forwards("test_alfk", editor, project_state, create_state)
alter_operation.database_forwards("test_alfk", editor, create_state, alter_state)
def test_alter_fk_non_fk(self):
"""
Altering an FK to a non-FK works (#23244)
"""
# Test the state alteration
operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.FloatField(),
)
project_state, new_state = self.make_test_state("test_afknfk", operation, related_model=True)
# Test the database alteration
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_afknfk", editor, project_state, new_state)
self.assertColumnExists("test_afknfk_rider", "pony")
self.assertColumnNotExists("test_afknfk_rider", "pony_id")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_afknfk", editor, new_state, project_state)
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
def test_run_sql(self):
"""
Tests the RunSQL operation.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
# Use a multi-line string with a comment to test splitting on SQLite and MySQL respectively
"CREATE TABLE i_love_ponies (id int, special_thing varchar(15));\n"
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'i love ponies'); -- this is magic!\n"
"INSERT INTO i_love_ponies (id, special_thing) VALUES (2, 'i love django');\n"
"UPDATE i_love_ponies SET special_thing = 'Ponies' WHERE special_thing LIKE '%%ponies';"
"UPDATE i_love_ponies SET special_thing = 'Django' WHERE special_thing LIKE '%django';",
# Run delete queries to test for parameter substitution failure
# reported in #23426
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%Django%';"
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%%Ponies%%';"
"DROP TABLE i_love_ponies",
state_operations=[migrations.CreateModel("SomethingElse", [("id", models.AutoField(primary_key=True))])],
)
self.assertEqual(operation.describe(), "Raw SQL operation")
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_runsql", new_state)
self.assertEqual(len(new_state.models["test_runsql", "somethingelse"].fields), 1)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test SQL collection
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertIn("LIKE '%%ponies';", "\n".join(editor.collected_sql))
operation.database_backwards("test_runsql", editor, project_state, new_state)
self.assertIn("LIKE '%%Ponies%%';", "\n".join(editor.collected_sql))
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertTableExists("i_love_ponies")
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 2)
cursor.execute("SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Django'")
self.assertEqual(cursor.fetchall()[0][0], 1)
cursor.execute("SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Ponies'")
self.assertEqual(cursor.fetchall()[0][0], 1)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards("test_runsql", editor, new_state, project_state)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunSQL")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["reverse_sql", "sql", "state_operations"])
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunSQL('SELECT 1 FROM void;', elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_sql_params(self):
"""
#23426 - RunSQL should accept parameters.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
["CREATE TABLE i_love_ponies (id int, special_thing varchar(15));"],
["DROP TABLE i_love_ponies"],
)
param_operation = migrations.RunSQL(
# forwards
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'Django');",
["INSERT INTO i_love_ponies (id, special_thing) VALUES (2, %s);", ['Ponies']],
("INSERT INTO i_love_ponies (id, special_thing) VALUES (%s, %s);", (3, 'Python',)),
),
# backwards
[
"DELETE FROM i_love_ponies WHERE special_thing = 'Django';",
["DELETE FROM i_love_ponies WHERE special_thing = 'Ponies';", None],
("DELETE FROM i_love_ponies WHERE id = %s OR special_thing = %s;", [3, 'Python']),
]
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
new_state = project_state.clone()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
# Test parameter passing
with connection.schema_editor() as editor:
param_operation.database_forwards("test_runsql", editor, project_state, new_state)
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 3)
with connection.schema_editor() as editor:
param_operation.database_backwards("test_runsql", editor, new_state, project_state)
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_runsql", editor, new_state, project_state)
self.assertTableNotExists("i_love_ponies")
def test_run_sql_params_invalid(self):
"""
#23426 - RunSQL should fail when a list of statements with an incorrect
number of tuples is given.
"""
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
operation = migrations.RunSQL(
# forwards
[
["INSERT INTO foo (bar) VALUES ('buz');"]
],
# backwards
(
("DELETE FROM foo WHERE bar = 'buz';", 'invalid', 'parameter count'),
),
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 1"):
operation.database_forwards("test_runsql", editor, project_state, new_state)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 3"):
operation.database_backwards("test_runsql", editor, new_state, project_state)
def test_run_sql_noop(self):
"""
#24098 - Tests no-op RunSQL operations.
"""
operation = migrations.RunSQL(migrations.RunSQL.noop, migrations.RunSQL.noop)
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, None, None)
operation.database_backwards("test_runsql", editor, None, None)
def test_run_python(self):
"""
Tests the RunPython operation
"""
project_state = self.set_up_test_model("test_runpython", mti_model=True)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
def inner_method_reverse(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.filter(pink=1, weight=3.55).delete()
Pony.objects.filter(weight=5).delete()
operation = migrations.RunPython(inner_method, reverse_code=inner_method_reverse)
self.assertEqual(operation.describe(), "Raw Python operation")
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards("test_runpython", new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2)
# Now test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0)
# Now test we can't use a string
with self.assertRaisesMessage(ValueError, 'RunPython must be supplied with a callable'):
migrations.RunPython("print 'ahahaha'")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code", "reverse_code"])
# Also test reversal fails, with an operation identical to above but without reverse_code set
no_reverse_operation = migrations.RunPython(inner_method)
self.assertFalse(no_reverse_operation.reversible)
with connection.schema_editor() as editor:
no_reverse_operation.database_forwards("test_runpython", editor, project_state, new_state)
with self.assertRaises(NotImplementedError):
no_reverse_operation.database_backwards("test_runpython", editor, new_state, project_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2)
def create_ponies(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
pony1 = Pony.objects.create(pink=1, weight=3.55)
self.assertIsNot(pony1.pk, None)
pony2 = Pony.objects.create(weight=5)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_ponies)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 4)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code"])
def create_shetlandponies(models, schema_editor):
ShetlandPony = models.get_model("test_runpython", "ShetlandPony")
pony1 = ShetlandPony.objects.create(weight=4.0)
self.assertIsNot(pony1.pk, None)
pony2 = ShetlandPony.objects.create(weight=5.0)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_shetlandponies)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 6)
self.assertEqual(project_state.apps.get_model("test_runpython", "ShetlandPony").objects.count(), 2)
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunPython(inner_method, elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_python_atomic(self):
"""
Tests the RunPython operation correctly handles the "atomic" keyword
"""
project_state = self.set_up_test_model("test_runpythonatomic", mti_model=True)
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpythonatomic", "Pony")
Pony.objects.create(pink=1, weight=3.55)
raise ValueError("Adrian hates ponies.")
# Verify atomicity when applying.
atomic_migration = Migration("test", "test_runpythonatomic")
atomic_migration.operations = [migrations.RunPython(inner_method, reverse_code=inner_method)]
non_atomic_migration = Migration("test", "test_runpythonatomic")
non_atomic_migration.operations = [migrations.RunPython(inner_method, reverse_code=inner_method, atomic=False)]
# If we're a fully-transactional database, both versions should rollback
if connection.features.can_rollback_ddl:
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
# Otherwise, the non-atomic operation should leave a row there
else:
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 1)
# Reset object count to zero and verify atomicity when unapplying.
project_state.apps.get_model("test_runpythonatomic", "Pony").objects.all().delete()
# On a fully-transactional database, both versions rollback.
if connection.features.can_rollback_ddl:
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
# Otherwise, the non-atomic operation leaves a row there.
else:
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 1)
# Verify deconstruction.
definition = non_atomic_migration.operations[0].deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["atomic", "code", "reverse_code"])
def test_run_python_related_assignment(self):
"""
#24282 - Model changes to a FK reverse side update the model
on the FK side as well.
"""
def inner_method(models, schema_editor):
Author = models.get_model("test_authors", "Author")
Book = models.get_model("test_books", "Book")
author = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author)
create_author = migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
("author", models.ForeignKey("test_authors.Author", models.CASCADE))
],
options={},
)
add_hometown = migrations.AddField(
"Author",
"hometown",
models.CharField(max_length=100),
)
create_old_man = migrations.RunPython(inner_method, inner_method)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_authors", new_state)
create_author.database_forwards("test_authors", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_books", new_state)
create_book.database_forwards("test_books", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
add_hometown.state_forwards("test_authors", new_state)
add_hometown.database_forwards("test_authors", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_old_man.state_forwards("test_books", new_state)
create_old_man.database_forwards("test_books", editor, project_state, new_state)
def test_model_with_bigautofield(self):
"""
A model with BigAutoField can be created.
"""
def create_data(models, schema_editor):
Author = models.get_model("test_author", "Author")
Book = models.get_model("test_book", "Book")
author1 = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author1)
Book.objects.create(id=2 ** 33, title="A farewell to arms", author=author1)
author2 = Author.objects.create(id=2 ** 33, name="Remarque")
Book.objects.create(title="All quiet on the western front", author=author2)
Book.objects.create(title="Arc de Triomphe", author=author2)
create_author = migrations.CreateModel(
"Author",
[
("id", models.BigAutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.BigAutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
("author", models.ForeignKey(to="test_author.Author", on_delete=models.CASCADE))
],
options={},
)
fill_data = migrations.RunPython(create_data)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_author", new_state)
create_author.database_forwards("test_author", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_book", new_state)
create_book.database_forwards("test_book", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_data.state_forwards("fill_data", new_state)
fill_data.database_forwards("fill_data", editor, project_state, new_state)
def test_autofield_foreignfield_growth(self):
"""
A field may be migrated from AutoField to BigAutoField.
"""
def create_initial_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog = Blog.objects.create(name="web development done right")
Article.objects.create(name="Frameworks", blog=blog)
Article.objects.create(name="Programming Languages", blog=blog)
def create_big_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog2 = Blog.objects.create(name="Frameworks", id=2 ** 33)
Article.objects.create(name="Django", blog=blog2)
Article.objects.create(id=2 ** 33, name="Django2", blog=blog2)
create_blog = migrations.CreateModel(
"Blog",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_article = migrations.CreateModel(
"Article",
[
("id", models.AutoField(primary_key=True)),
("blog", models.ForeignKey(to="test_blog.Blog", on_delete=models.CASCADE)),
("name", models.CharField(max_length=100)),
("data", models.TextField(default="")),
],
options={},
)
fill_initial_data = migrations.RunPython(create_initial_data, create_initial_data)
fill_big_data = migrations.RunPython(create_big_data, create_big_data)
grow_article_id = migrations.AlterField("Article", "id", models.BigAutoField(primary_key=True))
grow_blog_id = migrations.AlterField("Blog", "id", models.BigAutoField(primary_key=True))
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_blog.state_forwards("test_blog", new_state)
create_blog.database_forwards("test_blog", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_article.state_forwards("test_article", new_state)
create_article.database_forwards("test_article", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_initial_data.state_forwards("fill_initial_data", new_state)
fill_initial_data.database_forwards("fill_initial_data", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_article_id.state_forwards("test_article", new_state)
grow_article_id.database_forwards("test_article", editor, project_state, new_state)
state = new_state.clone()
article = state.apps.get_model("test_article.Article")
self.assertIsInstance(article._meta.pk, models.BigAutoField)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_blog_id.state_forwards("test_blog", new_state)
grow_blog_id.database_forwards("test_blog", editor, project_state, new_state)
state = new_state.clone()
blog = state.apps.get_model("test_blog.Blog")
self.assertIsInstance(blog._meta.pk, models.BigAutoField)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_big_data.state_forwards("fill_big_data", new_state)
fill_big_data.database_forwards("fill_big_data", editor, project_state, new_state)
def test_run_python_noop(self):
"""
#24098 - Tests no-op RunPython operations.
"""
project_state = ProjectState()
new_state = project_state.clone()
operation = migrations.RunPython(migrations.RunPython.noop, migrations.RunPython.noop)
with connection.schema_editor() as editor:
operation.database_forwards("test_runpython", editor, project_state, new_state)
operation.database_backwards("test_runpython", editor, new_state, project_state)
def test_separate_database_and_state(self):
"""
Tests the SeparateDatabaseAndState operation.
"""
project_state = self.set_up_test_model("test_separatedatabaseandstate")
# Create the operation
database_operation = migrations.RunSQL(
"CREATE TABLE i_love_ponies (id int, special_thing int);",
"DROP TABLE i_love_ponies;"
)
state_operation = migrations.CreateModel("SomethingElse", [("id", models.AutoField(primary_key=True))])
operation = migrations.SeparateDatabaseAndState(
state_operations=[state_operation],
database_operations=[database_operation]
)
self.assertEqual(operation.describe(), "Custom state/database change combination")
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_separatedatabaseandstate", new_state)
self.assertEqual(len(new_state.models["test_separatedatabaseandstate", "somethingelse"].fields), 1)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_separatedatabaseandstate", editor, project_state, new_state)
self.assertTableExists("i_love_ponies")
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards("test_separatedatabaseandstate", editor, new_state, project_state)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "SeparateDatabaseAndState")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["database_operations", "state_operations"])
def test_separate_database_and_state2(self):
"""
A complex SeparateDatabaseAndState operation: Multiple operations both
for state and database. Verify the state dependencies within each list
and that state ops don't affect the database.
"""
app_label = "test_separatedatabaseandstate2"
project_state = self.set_up_test_model(app_label)
# Create the operation
database_operations = [
migrations.CreateModel(
"ILovePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveponies"},
),
migrations.CreateModel(
"ILoveMorePonies",
# We use IntegerField and not AutoField because
# the model is going to be deleted immediately
# and with an AutoField this fails on Oracle
[("id", models.IntegerField(primary_key=True))],
options={"db_table": "ilovemoreponies"},
),
migrations.DeleteModel("ILoveMorePonies"),
migrations.CreateModel(
"ILoveEvenMorePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveevenmoreponies"},
),
]
state_operations = [
migrations.CreateModel(
"SomethingElse",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingelse"},
),
migrations.DeleteModel("SomethingElse"),
migrations.CreateModel(
"SomethingCompletelyDifferent",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingcompletelydifferent"},
),
]
operation = migrations.SeparateDatabaseAndState(
state_operations=state_operations,
database_operations=database_operations,
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
def assertModelsAndTables(after_db):
# Tables and models exist, or don't, as they should:
self.assertNotIn((app_label, "somethingelse"), new_state.models)
self.assertEqual(len(new_state.models[app_label, "somethingcompletelydifferent"].fields), 1)
self.assertNotIn((app_label, "iloveponiesonies"), new_state.models)
self.assertNotIn((app_label, "ilovemoreponies"), new_state.models)
self.assertNotIn((app_label, "iloveevenmoreponies"), new_state.models)
self.assertTableNotExists("somethingelse")
self.assertTableNotExists("somethingcompletelydifferent")
self.assertTableNotExists("ilovemoreponies")
if after_db:
self.assertTableExists("iloveponies")
self.assertTableExists("iloveevenmoreponies")
else:
self.assertTableNotExists("iloveponies")
self.assertTableNotExists("iloveevenmoreponies")
assertModelsAndTables(after_db=False)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertModelsAndTables(after_db=True)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertModelsAndTables(after_db=False)
class SwappableOperationTests(OperationTestBase):
"""
Key operations ignore swappable models
(we don't want to replicate all of them here, as the functionality
is in a common base class anyway)
"""
available_apps = ['migrations']
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_create_ignore_swapped(self):
"""
The CreateTable operation ignores swapped models.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
options={
"swappable": "TEST_SWAP_MODEL",
},
)
# Test the state alteration (it should still be there!)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crigsw", new_state)
self.assertEqual(new_state.models["test_crigsw", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crigsw", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crigsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crigsw", editor, project_state, new_state)
self.assertTableNotExists("test_crigsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crigsw", editor, new_state, project_state)
self.assertTableNotExists("test_crigsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_delete_ignore_swapped(self):
"""
Tests the DeleteModel operation ignores swapped models.
"""
operation = migrations.DeleteModel("Pony")
project_state, new_state = self.make_test_state("test_dligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_dligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dligsw", editor, project_state, new_state)
self.assertTableNotExists("test_dligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dligsw", editor, new_state, project_state)
self.assertTableNotExists("test_dligsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_add_field_ignore_swapped(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
project_state, new_state = self.make_test_state("test_adfligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_adfligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_adfligsw", editor, project_state, new_state)
self.assertTableNotExists("test_adfligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adfligsw", editor, new_state, project_state)
self.assertTableNotExists("test_adfligsw_pony")
@override_settings(TEST_SWAP_MODEL='migrations.SomeFakeModel')
def test_indexes_ignore_swapped(self):
"""
Add/RemoveIndex operations ignore swapped models.
"""
operation = migrations.AddIndex('Pony', models.Index(fields=['pink'], name='my_name_idx'))
project_state, new_state = self.make_test_state('test_adinigsw', operation)
with connection.schema_editor() as editor:
# No database queries should be run for swapped models
operation.database_forwards('test_adinigsw', editor, project_state, new_state)
operation.database_backwards('test_adinigsw', editor, new_state, project_state)
operation = migrations.RemoveIndex('Pony', models.Index(fields=['pink'], name='my_name_idx'))
project_state, new_state = self.make_test_state("test_rminigsw", operation)
with connection.schema_editor() as editor:
operation.database_forwards('test_rminigsw', editor, project_state, new_state)
operation.database_backwards('test_rminigsw', editor, new_state, project_state)
class TestCreateModel(SimpleTestCase):
def test_references_model_mixin(self):
CreateModel('name', [], bases=(Mixin, models.Model)).references_model('other_model')
class FieldOperationTests(SimpleTestCase):
def test_references_model(self):
operation = FieldOperation('MoDel', 'field', models.ForeignKey('Other', models.CASCADE))
# Model name match.
self.assertIs(operation.references_model('mOdEl'), True)
# Referenced field.
self.assertIs(operation.references_model('oTher'), True)
# Doesn't reference.
self.assertIs(operation.references_model('Whatever'), False)
def test_references_field_by_name(self):
operation = FieldOperation('MoDel', 'field', models.BooleanField(default=False))
self.assertIs(operation.references_field('model', 'field'), True)
def test_references_field_by_remote_field_model(self):
operation = FieldOperation('Model', 'field', models.ForeignKey('Other', models.CASCADE))
self.assertIs(operation.references_field('Other', 'whatever'), True)
self.assertIs(operation.references_field('Missing', 'whatever'), False)
def test_references_field_by_from_fields(self):
operation = FieldOperation(
'Model', 'field', models.fields.related.ForeignObject('Other', models.CASCADE, ['from'], ['to'])
)
self.assertIs(operation.references_field('Model', 'from'), True)
self.assertIs(operation.references_field('Model', 'to'), False)
self.assertIs(operation.references_field('Other', 'from'), False)
self.assertIs(operation.references_field('Model', 'to'), False)
def test_references_field_by_to_fields(self):
operation = FieldOperation('Model', 'field', models.ForeignKey('Other', models.CASCADE, to_field='field'))
self.assertIs(operation.references_field('Other', 'field'), True)
self.assertIs(operation.references_field('Other', 'whatever'), False)
self.assertIs(operation.references_field('Missing', 'whatever'), False)
def test_references_field_by_through(self):
operation = FieldOperation('Model', 'field', models.ManyToManyField('Other', through='Through'))
self.assertIs(operation.references_field('Other', 'whatever'), True)
self.assertIs(operation.references_field('Through', 'whatever'), True)
self.assertIs(operation.references_field('Missing', 'whatever'), False)
def test_reference_field_by_through_fields(self):
operation = FieldOperation(
'Model', 'field', models.ManyToManyField('Other', through='Through', through_fields=('first', 'second'))
)
self.assertIs(operation.references_field('Other', 'whatever'), True)
self.assertIs(operation.references_field('Through', 'whatever'), False)
self.assertIs(operation.references_field('Through', 'first'), True)
self.assertIs(operation.references_field('Through', 'second'), True)
|
20dfc0930c15b5087d88392e7e1f344d009e52aafedd4b2214191e2d1b08cdf4 | import os
import sys
import tempfile
import unittest
import warnings
from io import StringIO
from unittest import mock
from django.apps import apps
from django.contrib.sites.models import Site
from django.core import management
from django.core.files.temp import NamedTemporaryFile
from django.core.management import CommandError
from django.core.management.commands.dumpdata import ProxyModelWarning
from django.core.serializers.base import ProgressBar
from django.db import IntegrityError, connection
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import (
Article, Category, NaturalKeyThing, PrimaryKeyUUIDModel, ProxySpy, Spy,
Tag, Visa,
)
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Test case has installed 3 fixture objects"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class SubclassTestCaseFixtureLoadingTests(TestCaseFixtureLoadingTests):
"""
Make sure that subclasses can remove fixtures from parent class (#21089).
"""
fixtures = []
def testClassFixtures(self):
"There were no fixture objects installed"
self.assertEqual(Article.objects.count(), 0)
class DumpDataAssertMixin:
def _dumpdata_assert(self, args, output, format='json', filename=None,
natural_foreign_keys=False, natural_primary_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = StringIO()
filename = filename and os.path.join(tempfile.gettempdir(), filename)
management.call_command(
'dumpdata',
*args,
format=format,
stdout=new_io,
stderr=new_io,
output=filename,
use_natural_foreign_keys=natural_foreign_keys,
use_natural_primary_keys=natural_primary_keys,
use_base_manager=use_base_manager,
exclude=exclude_list,
primary_keys=primary_keys,
)
if filename:
with open(filename) as f:
command_output = f.read()
os.remove(filename)
else:
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_loading_and_dumping(self):
apps.clear_cache()
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(
['fixtures.Category'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}]'
)
# ...and just fixtures.Article
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# ...and both
self._dumpdata_assert(
['fixtures.Category', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has '
'no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", '
'"fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a specific model twice
self._dumpdata_assert(
['fixtures.Article', 'fixtures.Article'],
(
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
)
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(
['fixtures.Article', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a dump that specifies Article both explicitly and implicitly,
# but lists the app first (#22025).
self._dumpdata_assert(
['fixtures', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Same again, but specify in the reverse order
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no '
'place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields":'
' {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(
['fixtures.Category', 'sites'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": '
'"example.com"}}]'
)
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}]'
)
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# You can also omit the primary keys for models that we can get later with natural keys.
self._dumpdata_assert(
['fixtures.person'],
'[{"fields": {"name": "Django Reinhardt"}, "model": "fixtures.person"}, {"fields": {"name": "Stephane '
'Grappelli"}, "model": "fixtures.person"}, {"fields": {"name": "Artist formerly known as '
'\\"Prince\\""}, "model": "fixtures.person"}]',
natural_primary_keys=True
)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is '
'great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, '
'"model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": '
'"2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML '
'identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", '
'"article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": '
'{"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, '
'"model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": '
'"fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", '
'"fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", '
'"fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], '
'["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": '
'"fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", '
'"user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person":'
' ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, '
'{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field '
'type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it '
'is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object '
'pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!'
'</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object '
'pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading '
'cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field>'
'</object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field>'
'<field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures'
'</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3'
'</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal'
'</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>'
'fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" '
'name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" '
'name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" '
'rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field '
'type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag">'
'<field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" '
'name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field>'
'<field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" '
'model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object>'
'<object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli'
'</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field '
'to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field>'
'<field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user'
'</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" '
'model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane'
' Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel">'
'<object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object>'
'<natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field>'
'</object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" '
'rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field '
'to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural>'
'<natural>auth</natural><natural>user</natural></object></field></object><object pk="1" '
'model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field '
'to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as '
'"Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object>'
'</django-objects>',
format='xml', natural_foreign_keys=True
)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'],
)
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites']
)
# Excluding a bogus app should throw an error
with self.assertRaisesMessage(management.CommandError, "No installed app with label 'foo_app'."):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with self.assertRaisesMessage(management.CommandError, "Unknown model: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
@unittest.skipIf(sys.platform.startswith('win'), "Windows doesn't support '?' in filenames.")
def test_load_fixture_with_special_characters(self):
management.call_command('loaddata', 'fixture_with[special]chars', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), ['<Article: How To Deal With Special Characters>'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk
)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": '
'"fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk),
use_base_manager=True
)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_dumpdata_with_uuid_pks(self):
m1 = PrimaryKeyUUIDModel.objects.create()
m2 = PrimaryKeyUUIDModel.objects.create()
output = StringIO()
management.call_command(
'dumpdata', 'fixtures.PrimaryKeyUUIDModel', '--pks', ', '.join([str(m1.id), str(m2.id)]),
stdout=output,
)
result = output.getvalue()
self.assertIn('"pk": "%s"' % m1.id, result)
self.assertIn('"pk": "%s"' % m2.id, result)
def test_dumpdata_with_file_output(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]',
filename='dumpdata.json'
)
def test_dumpdata_progressbar(self):
"""
Dumpdata shows a progress bar on the command line when --output is set,
stdout is a tty, and verbosity > 0.
"""
management.call_command('loaddata', 'fixture1.json', verbosity=0)
new_io = StringIO()
new_io.isatty = lambda: True
with NamedTemporaryFile() as file:
options = {
'format': 'json',
'stdout': new_io,
'stderr': new_io,
'output': file.name,
}
management.call_command('dumpdata', 'fixtures', **options)
self.assertTrue(new_io.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n'))
# Test no progress bar when verbosity = 0
options['verbosity'] = 0
new_io = StringIO()
new_io.isatty = lambda: True
options.update({'stdout': new_io, 'stderr': new_io})
management.call_command('dumpdata', 'fixtures', **options)
self.assertEqual(new_io.getvalue(), '')
def test_dumpdata_proxy_without_concrete(self):
"""
A warning is displayed if a proxy model is dumped without its concrete
parent.
"""
ProxySpy.objects.create(name='Paul')
msg = "fixtures.ProxySpy is a proxy model and won't be serialized."
with self.assertWarnsMessage(ProxyModelWarning, msg):
self._dumpdata_assert(['fixtures.ProxySpy'], '[]')
def test_dumpdata_proxy_with_concrete(self):
"""
A warning isn't displayed if a proxy model is dumped with its concrete
parent.
"""
spy = ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(
['fixtures.ProxySpy', 'fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy.pk
)
self.assertEqual(len(warning_list), 0)
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambiguous, so loading it will raise an error
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture5', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0])
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Loading a fixture which contains an invalid object outputs an error
message which contains the pk of the object that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0)
self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0])
@unittest.skipUnless(connection.vendor == 'postgresql', 'psycopg2 prohibits null characters in data.')
def test_loaddata_null_characters_on_postgresql(self):
msg = (
'Could not load fixtures.Article(pk=2): '
'A string literal cannot contain NUL (0x00) characters.'
)
with self.assertRaisesMessage(ValueError, msg):
management.call_command('loaddata', 'null_character_in_field_value.json')
def test_loaddata_app_option(self):
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_1' found."):
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="someotherapp")
self.assertQuerysetEqual(Article.objects.all(), [])
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="fixtures")
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
])
def test_loaddata_verbosity_three(self):
output = StringIO()
management.call_command('loaddata', 'fixture1.json', verbosity=3, stdout=output, stderr=output)
command_output = output.getvalue()
self.assertIn(
"\rProcessed 1 object(s).\rProcessed 2 object(s)."
"\rProcessed 3 object(s).\rProcessed 4 object(s).\n",
command_output
)
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, database='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, database='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0, database='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django '
'Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, '
'{"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" '
'model="fixtures.tag"><field type="CharField" name="name">copyright</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt'
'</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane '
'Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Prince</field></object></django-objects>',
format='xml', natural_foreign_keys=True
)
def test_loading_with_exclude_app(self):
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1', exclude=['fixtures'], verbosity=0)
self.assertFalse(Article.objects.exists())
self.assertFalse(Category.objects.exists())
self.assertQuerysetEqual(Site.objects.all(), ['<Site: example.com>'])
def test_loading_with_exclude_model(self):
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1', exclude=['fixtures.Article'], verbosity=0)
self.assertFalse(Article.objects.exists())
self.assertQuerysetEqual(Category.objects.all(), ['<Category: News Stories>'])
self.assertQuerysetEqual(Site.objects.all(), ['<Site: example.com>'])
def test_exclude_option_errors(self):
"""Excluding a bogus app or model should raise an error."""
msg = "No installed app with label 'foo_app'."
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture1', exclude=['foo_app'], verbosity=0)
msg = "Unknown model: fixtures.FooModel"
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture1', exclude=['fixtures.FooModel'], verbosity=0)
def test_stdin_without_format(self):
"""Reading from stdin raises an error if format isn't specified."""
msg = '--format must be specified when reading from stdin.'
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', '-', verbosity=0)
def test_loading_stdin(self):
"""Loading fixtures from stdin with json and xml."""
tests_dir = os.path.dirname(__file__)
fixture_json = os.path.join(tests_dir, 'fixtures', 'fixture1.json')
fixture_xml = os.path.join(tests_dir, 'fixtures', 'fixture3.xml')
with mock.patch('django.core.management.commands.loaddata.sys.stdin', open(fixture_json)):
management.call_command('loaddata', '--format=json', '-', verbosity=0)
self.assertEqual(Article.objects.count(), 2)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
with mock.patch('django.core.management.commands.loaddata.sys.stdin', open(fixture_xml)):
management.call_command('loaddata', '--format=xml', '-', verbosity=0)
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Time to reform copyright>',
'<Article: Poker on TV is great!>',
])
class NonexistentFixtureTests(TestCase):
"""
Custom class to limit fixture dirs.
"""
def test_loaddata_not_existent_fixture_file(self):
stdout_output = StringIO()
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', stdout=stdout_output)
@mock.patch('django.db.connection.enable_constraint_checking')
@mock.patch('django.db.connection.disable_constraint_checking')
def test_nonexistent_fixture_no_constraint_checking(
self, disable_constraint_checking, enable_constraint_checking):
"""
If no fixtures match the loaddata command, constraints checks on the
database shouldn't be disabled. This is performance critical on MSSQL.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', verbosity=0)
disable_constraint_checking.assert_not_called()
enable_constraint_checking.assert_not_called()
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture2', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0])
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
class ForwardReferenceTests(TestCase):
def test_forward_reference_fk(self):
management.call_command('loaddata', 'forward_reference_fk.json', verbosity=0)
self.assertEqual(NaturalKeyThing.objects.count(), 2)
t1, t2 = NaturalKeyThing.objects.all()
self.assertEqual(t1.other_thing, t2)
self.assertEqual(t2.other_thing, t1)
def test_forward_reference_m2m(self):
management.call_command('loaddata', 'forward_reference_m2m.json', verbosity=0)
self.assertEqual(NaturalKeyThing.objects.count(), 3)
t1 = NaturalKeyThing.objects.get_by_natural_key('t1')
self.assertQuerysetEqual(
t1.other_things.order_by('key'),
['<NaturalKeyThing: t2>', '<NaturalKeyThing: t3>']
)
|
7341af364c06ad4c12686c4854bbe2ebc1c1d2027c57bedf324ec4a80ffc88e2 | import datetime
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg, Count, DecimalField, DurationField, F, FloatField, Func, IntegerField,
Max, Min, Sum, Value,
)
from django.db.models.expressions import Case, When
from django.test import TestCase
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import timezone
from .models import Author, Book, Publisher, Store
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1))
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2))
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_aggregate_in_order_by(self):
msg = (
'Using an aggregate in order_by() without also including it in '
'annotate() is not allowed: Avg(F(book__rating)'
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.values('age').order_by(Avg('book__rating'))
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(vals, {'age__sum': 254})
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(vals, {'friends__age__avg': Approximate(34.07, places=2)})
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(vals, {'authors__age__avg': Approximate(38.2857, places=2)})
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(vals, {'book__rating__avg': 4.0})
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(vals, {'publisher__num_awards__sum': 30})
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(vals, {'book__price__sum': Decimal('270.27')})
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(vals, {'books__authors__age__max': 57})
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(vals, {'book__publisher__num_awards__min': 1})
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(vals, {'amazon_mean': Approximate(4.08, places=2)})
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = Book.objects.annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(self.b1.id, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = Book.objects.select_related('contact').annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(self.b1.id, "159059725", 447, "Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name)
)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
]
)
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg('authors__age'))
.values('pk', 'isbn', 'mean_age')
)
self.assertEqual(
list(books), [
{
"pk": self.b1.pk,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books),
[{'name': 'The Definitive Guide to Django: Web Development Done Right'}],
)
books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
]
)
books = (
Book.objects
.values("rating")
.annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
.order_by("rating")
)
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_count_star(self):
with self.assertNumQueries(1) as ctx:
Book.objects.aggregate(n=Count("*"))
sql = ctx.captured_queries[0]['sql']
self.assertIn('SELECT COUNT(*) ', sql)
def test_count_distinct_expression(self):
aggs = Book.objects.aggregate(
distinct_ratings=Count(Case(When(pages__gt=300, then='rating')), distinct=True),
)
self.assertEqual(aggs['distinct_ratings'], 4)
def test_non_grouped_annotation_not_in_group_by(self):
"""
An annotation not included in values() before an aggregate should be
excluded from the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating')
.annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
)
self.assertEqual(list(qs), [{'rating': 4.0, 'count': 2}])
def test_grouped_annotation_in_group_by(self):
"""
An annotation included in values() before an aggregate should be
included in the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice')
.annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
)
self.assertEqual(
list(qs), [
{'rating': 4.0, 'count': 1},
{'rating': 4.0, 'count': 2},
]
)
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{'rating': 4.5, 'oldest': 35},
{'rating': 3.0, 'oldest': 45},
{'rating': 4.0, 'oldest': 57},
{'rating': 5.0, 'oldest': 57},
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{'rating': 5.0, 'oldest': 57},
{'rating': 4.0, 'oldest': 57},
{'rating': 3.0, 'oldest': 45},
{'rating': 4.5, 'oldest': 35},
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
# Explicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg('duration', output_field=DurationField())),
{'duration__avg': datetime.timedelta(days=1, hours=12)}
)
# Implicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg('duration')),
{'duration__avg': datetime.timedelta(days=1, hours=12)}
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum('duration', output_field=DurationField())),
{'duration__sum': datetime.timedelta(days=3)}
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distinct() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[self.b5, self.b6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum('age'))
self.assertEqual(age_sum['age__sum'], 103)
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers,
['Apress', 'Prentice Hall', 'Expensive Publisher'],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = (
Publisher.objects
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
['Apress', 'Prentice Hall', 'Expensive Publisher'],
lambda p: p.name,
)
publishers = (
Publisher.objects
.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers,
['Apress', 'Sams', 'Prentice Hall', 'Morgan Kaufmann'],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers,
['Sams', 'Morgan Kaufmann', 'Expensive Publisher'],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = (
Author.objects
.annotate(num_friends=Count("friends__id", distinct=True))
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerysetEqual(authors, ['Brad Dayley'], lambda a: a.name)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(publishers, ['Apress', 'Prentice Hall'], lambda p: p.name)
publishers = (
Publisher.objects
.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name)
books = (
Book.objects
.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
)
self.assertQuerysetEqual(
books,
['Artificial Intelligence: A Modern Approach'],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = (
Book.objects
.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
.aggregate(Avg("rating"))
)
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
).exclude(earliest_book=None).order_by("earliest_book").values(
'earliest_book',
'num_awards',
'id',
'name',
)
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': self.p4.id,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': self.p3.id,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': self.p1.id,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': self.p2.id,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("pk", "isbn", "mean_age")
)
self.assertEqual(list(books), [(self.b1.id, '159059725', 34.5)])
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(list(books), [('159059725',)])
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(list(books), [(34.5,)])
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age", flat=True)
)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
.dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
self.assertEqual(max_rating['max_rating'], 5)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id')
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3})
def test_ticket17424(self):
"""
Doing exclude() on a foreign model after annotate() doesn't crash.
"""
all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Aggregation over sliced queryset works correctly.
"""
qs = Book.objects.all().order_by('-rating')[0:3]
vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or
select_related() stuff.
"""
qs = Book.objects.all().select_for_update().order_by(
'pk').select_related('publisher').annotate(max_pk=Max('pk'))
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg('max_pk'))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql'].lower()
self.assertNotIn('for update', qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r'order by (\w+)', qstr),
[', '.join(f[1][0] for f in forced_ordering).lower()]
)
else:
self.assertNotIn('order by', qstr)
self.assertEqual(qstr.count(' join '), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i), name="none", pages=10, rating=4.0,
price=9999.98, contact=a1, publisher=p1, pubdate=thedate)
book = Book.objects.aggregate(price_sum=Sum('price'))
self.assertEqual(book['price_sum'], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with self.assertRaisesMessage(TypeError, 'fail is not an aggregate expression'):
Book.objects.aggregate(fail=F('price'))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_missing_output_field_raises_error(self):
with self.assertRaisesMessage(FieldError, 'Cannot resolve expression type, unknown output_field'):
Book.objects.annotate(val=Max(2)).first()
def test_annotation_expressions(self):
authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name')
authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name')
for qs in (authors, authors2):
self.assertQuerysetEqual(
qs, [
('Adrian Holovaty', 132),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 129),
('James Bennett', 63),
('Jeffrey Forcier', 128),
('Paul Bissex', 120),
('Peter Norvig', 103),
('Stuart Russell', 103),
('Wesley J. Chun', 176)
],
lambda a: (a.name, a.combined_ages)
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*'))
a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age'))
a3 = Author.objects.aggregate(av_age=Avg('age'))
self.assertEqual(a1, {'av_age': 37})
self.assertEqual(a2, {'av_age': 37})
self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price']
self.assertIsInstance(v, Decimal)
self.assertEqual(v, Approximate(Decimal('47.39'), places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3)
self.assertEqual(p1, {'avg_price': Approximate(Decimal('148.18'), places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3)
self.assertEqual(p2, {'avg_price': Approximate(Decimal('53.39'), places=2)})
def test_combine_different_types(self):
msg = 'Expression contains mixed types. You must set output_field.'
qs = Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price'))
with self.assertRaisesMessage(FieldError, msg):
qs.first()
with self.assertRaisesMessage(FieldError, msg):
qs.first()
b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField())).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=FloatField())).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=DecimalField())).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'):
Author.objects.annotate(Sum(F('age') + F('friends__age')))
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum('age') / Count('age'))
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(
combined_ages=Sum(F('age') + F('friends__age')))
age = qs.aggregate(max_combined_age=Max('combined_ages'))
self.assertEqual(age['max_combined_age'], 176)
age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2)
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age=Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age'], 954)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age_doubled'], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['another_age'], 68)
qs = qs.annotate(friend_count=Count('friends'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['friend_count'], 2)
qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter(
name="Adrian Holovaty").order_by('-combined_age')
self.assertEqual(
list(qs), [
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 63
}
]
)
vals = qs.values('name', 'combined_age')
self.assertEqual(
list(vals), [
{'name': 'Adrian Holovaty', 'combined_age': 69},
{'name': 'Adrian Holovaty', 'combined_age': 63},
]
)
def test_annotate_values_aggregate(self):
alias_age = Author.objects.annotate(
age_alias=F('age')
).values(
'age_alias',
).aggregate(sum_age=Sum('age_alias'))
age = Author.objects.values('age').aggregate(sum_age=Sum('age'))
self.assertEqual(alias_age['sum_age'], age['sum_age'])
def test_annotate_over_annotate(self):
author = Author.objects.annotate(
age_alias=F('age')
).annotate(
sum_age=Sum('age_alias')
).get(name="Adrian Holovaty")
other_author = Author.objects.annotate(
sum_age=Sum('age')
).get(name="Adrian Holovaty")
self.assertEqual(author.sum_age, other_author.sum_age)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(Sum('id__max'))
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super().as_sql(compiler, connection)
with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price'))
def test_multi_arg_aggregate(self):
class MyMax(Max):
output_field = DecimalField()
def as_sql(self, compiler, connection):
copy = self.copy()
copy.set_source_expressions(copy.get_source_expressions()[0:1])
return super(MyMax, copy).as_sql(compiler, connection)
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Book.objects.aggregate(MyMax('pages', 'price'))
with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'):
Book.objects.annotate(MyMax('pages', 'price'))
Book.objects.aggregate(max_field=MyMax('pages', 'price'))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = {'function': self.function.lower(), 'expressions': sql, 'distinct': ''}
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, 'as_' + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra['function'] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, 'as_' + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = {'function': 'MAX', 'expressions': '2', 'distinct': ''}
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, 'as_' + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('MAX('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values('rating').aggregate(
double_max_rating=Max('rating') + Max('rating'))
self.assertEqual(max_rating['double_max_rating'], 5 * 2)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id') + 5
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3 + 5})
def test_expression_on_aggregation(self):
# Create a plain expression
class Greatest(Func):
function = 'GREATEST'
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='MAX', **extra_context)
qs = Publisher.objects.annotate(
price_or_median=Greatest(Avg('book__rating', output_field=DecimalField()), Avg('book__price'))
).filter(price_or_median__gte=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = Publisher.objects.annotate(
rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'),
output_field=FloatField())
).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs2, [1, 3], lambda v: v.num_awards)
def test_arguments_must_be_expressions(self):
msg = 'QuerySet.aggregate() received non-expression(s): %s.'
with self.assertRaisesMessage(TypeError, msg % FloatField()):
Book.objects.aggregate(FloatField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.aggregate(is_book=True)
with self.assertRaisesMessage(TypeError, msg % ', '.join([str(FloatField()), 'True'])):
Book.objects.aggregate(FloatField(), Avg('price'), is_book=True)
|
d2be807ee54a053f692a3effd6b2a57906b4a2137f4e34611e4c27fb6a5744ea | import os
import unittest
import warnings
from io import StringIO
from unittest import mock
from django.conf import settings
from django.contrib.staticfiles.finders import get_finder, get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage
from django.db import connection, connections, models, router
from django.forms import EmailField, IntegerField
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.html import HTMLParseError, parse_html
from django.test.utils import (
CaptureQueriesContext, TestContextDecorator, isolate_apps,
override_settings, setup_test_environment,
)
from django.urls import NoReverseMatch, path, reverse, reverse_lazy
from .models import Car, Person, PossessedCar
from .views import empty_response
class SkippingTestCase(SimpleTestCase):
def _assert_skipping(self, func, expected_exc, msg=None):
try:
if msg is not None:
self.assertRaisesMessage(expected_exc, msg, func)
else:
self.assertRaises(expected_exc, func)
except unittest.SkipTest:
self.fail('%s should not result in a skipped test.' % func.__name__)
def test_skip_unless_db_feature(self):
"""
Testing the django.test.skipUnlessDBFeature decorator.
"""
# Total hack, but it works, just want an attribute that's always true.
@skipUnlessDBFeature("__class__")
def test_func():
raise ValueError
@skipUnlessDBFeature("notprovided")
def test_func2():
raise ValueError
@skipUnlessDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipUnlessDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
self._assert_skipping(test_func, ValueError)
self._assert_skipping(test_func2, unittest.SkipTest)
self._assert_skipping(test_func3, ValueError)
self._assert_skipping(test_func4, unittest.SkipTest)
class SkipTestCase(SimpleTestCase):
@skipUnlessDBFeature('missing')
def test_foo(self):
pass
self._assert_skipping(
SkipTestCase('test_foo').test_foo,
ValueError,
"skipUnlessDBFeature cannot be used on test_foo (test_utils.tests."
"SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase) "
"as SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase "
"doesn't allow queries against the 'default' database."
)
def test_skip_if_db_feature(self):
"""
Testing the django.test.skipIfDBFeature decorator.
"""
@skipIfDBFeature("__class__")
def test_func():
raise ValueError
@skipIfDBFeature("notprovided")
def test_func2():
raise ValueError
@skipIfDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipIfDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
@skipIfDBFeature("notprovided", "notprovided")
def test_func5():
raise ValueError
self._assert_skipping(test_func, unittest.SkipTest)
self._assert_skipping(test_func2, ValueError)
self._assert_skipping(test_func3, unittest.SkipTest)
self._assert_skipping(test_func4, unittest.SkipTest)
self._assert_skipping(test_func5, ValueError)
class SkipTestCase(SimpleTestCase):
@skipIfDBFeature('missing')
def test_foo(self):
pass
self._assert_skipping(
SkipTestCase('test_foo').test_foo,
ValueError,
"skipIfDBFeature cannot be used on test_foo (test_utils.tests."
"SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase) "
"as SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase "
"doesn't allow queries against the 'default' database."
)
class SkippingClassTestCase(TestCase):
def test_skip_class_unless_db_feature(self):
@skipUnlessDBFeature("__class__")
class NotSkippedTests(TestCase):
def test_dummy(self):
return
@skipUnlessDBFeature("missing")
@skipIfDBFeature("__class__")
class SkippedTests(TestCase):
def test_will_be_skipped(self):
self.fail("We should never arrive here.")
@skipIfDBFeature("__dict__")
class SkippedTestsSubclass(SkippedTests):
pass
test_suite = unittest.TestSuite()
test_suite.addTest(NotSkippedTests('test_dummy'))
try:
test_suite.addTest(SkippedTests('test_will_be_skipped'))
test_suite.addTest(SkippedTestsSubclass('test_will_be_skipped'))
except unittest.SkipTest:
self.fail('SkipTest should not be raised here.')
result = unittest.TextTestRunner(stream=StringIO()).run(test_suite)
self.assertEqual(result.testsRun, 3)
self.assertEqual(len(result.skipped), 2)
self.assertEqual(result.skipped[0][1], 'Database has feature(s) __class__')
self.assertEqual(result.skipped[1][1], 'Database has feature(s) __class__')
def test_missing_default_databases(self):
@skipIfDBFeature('missing')
class MissingDatabases(SimpleTestCase):
def test_assertion_error(self):
pass
suite = unittest.TestSuite()
try:
suite.addTest(MissingDatabases('test_assertion_error'))
except unittest.SkipTest:
self.fail("SkipTest should not be raised at this stage")
runner = unittest.TextTestRunner(stream=StringIO())
msg = (
"skipIfDBFeature cannot be used on <class 'test_utils.tests."
"SkippingClassTestCase.test_missing_default_databases.<locals>."
"MissingDatabases'> as it doesn't allow queries against the "
"'default' database."
)
with self.assertRaisesMessage(ValueError, msg):
runner.run(suite)
@override_settings(ROOT_URLCONF='test_utils.urls')
class AssertNumQueriesTests(TestCase):
def test_assert_num_queries(self):
def test_func():
raise ValueError
with self.assertRaises(ValueError):
self.assertNumQueries(2, test_func)
def test_assert_num_queries_with_client(self):
person = Person.objects.create(name='test')
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
def test_func():
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.assertNumQueries(2, test_func)
@unittest.skipUnless(
connection.vendor != 'sqlite' or not connection.is_in_memory_db(),
'For SQLite in-memory tests, closing the connection destroys the database.'
)
class AssertNumQueriesUponConnectionTests(TransactionTestCase):
available_apps = []
def test_ignores_connection_configuration_queries(self):
real_ensure_connection = connection.ensure_connection
connection.close()
def make_configuration_query():
is_opening_connection = connection.connection is None
real_ensure_connection()
if is_opening_connection:
# Avoid infinite recursion. Creating a cursor calls
# ensure_connection() which is currently mocked by this method.
connection.cursor().execute('SELECT 1' + connection.features.bare_select_suffix)
ensure_connection = 'django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection'
with mock.patch(ensure_connection, side_effect=make_configuration_query):
with self.assertNumQueries(1):
list(Car.objects.all())
class AssertQuerysetEqualTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.p1 = Person.objects.create(name='p1')
cls.p2 = Person.objects.create(name='p2')
def test_ordered(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[repr(self.p1), repr(self.p2)]
)
def test_unordered(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[repr(self.p2), repr(self.p1)],
ordered=False
)
def test_transform(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[self.p1.pk, self.p2.pk],
transform=lambda x: x.pk
)
def test_undefined_order(self):
# Using an unordered queryset with more than one ordered value
# is an error.
msg = 'Trying to compare non-ordered queryset against more than one ordered values'
with self.assertRaisesMessage(ValueError, msg):
self.assertQuerysetEqual(
Person.objects.all(),
[repr(self.p1), repr(self.p2)]
)
# No error for one value.
self.assertQuerysetEqual(
Person.objects.filter(name='p1'),
[repr(self.p1)]
)
def test_repeated_values(self):
"""
assertQuerysetEqual checks the number of appearance of each item
when used with option ordered=False.
"""
batmobile = Car.objects.create(name='Batmobile')
k2000 = Car.objects.create(name='K 2000')
PossessedCar.objects.bulk_create([
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
])
with self.assertRaises(AssertionError):
self.assertQuerysetEqual(
self.p1.cars.all(),
[repr(batmobile), repr(k2000)],
ordered=False
)
self.assertQuerysetEqual(
self.p1.cars.all(),
[repr(batmobile)] * 2 + [repr(k2000)] * 4,
ordered=False
)
@override_settings(ROOT_URLCONF='test_utils.urls')
class CaptureQueriesContextManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.person_pk = str(Person.objects.create(name='test').pk)
def test_simple(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
pass
self.assertEqual(0, len(captured_queries))
def test_within(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
def test_nested(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.count()
with CaptureQueriesContext(connection) as nested_captured_queries:
Person.objects.count()
self.assertEqual(1, len(nested_captured_queries))
self.assertEqual(2, len(captured_queries))
def test_failure(self):
with self.assertRaises(TypeError):
with CaptureQueriesContext(connection):
raise TypeError
def test_with_client(self):
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 2)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
self.assertIn(self.person_pk, captured_queries[1]['sql'])
@override_settings(ROOT_URLCONF='test_utils.urls')
class AssertNumQueriesContextManagerTests(TestCase):
def test_simple(self):
with self.assertNumQueries(0):
pass
with self.assertNumQueries(1):
Person.objects.count()
with self.assertNumQueries(2):
Person.objects.count()
Person.objects.count()
def test_failure(self):
with self.assertRaises(AssertionError) as exc_info:
with self.assertNumQueries(2):
Person.objects.count()
exc_lines = str(exc_info.exception).split('\n')
self.assertEqual(exc_lines[0], '1 != 2 : 1 queries executed, 2 expected')
self.assertEqual(exc_lines[1], 'Captured queries were:')
self.assertTrue(exc_lines[2].startswith('1.')) # queries are numbered
with self.assertRaises(TypeError):
with self.assertNumQueries(4000):
raise TypeError
def test_with_client(self):
person = Person.objects.create(name="test")
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(2):
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
@override_settings(ROOT_URLCONF='test_utils.urls')
class AssertTemplateUsedContextManagerTests(SimpleTestCase):
def test_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed(template_name='template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
render_to_string('template_used/base.html')
def test_nested_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/include.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/extends.html'):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/alternative.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateNotUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
def test_not_used(self):
with self.assertTemplateNotUsed('template_used/base.html'):
pass
with self.assertTemplateNotUsed('template_used/alternative.html'):
pass
def test_error_message(self):
msg = 'template_used/base.html was not rendered. No template was rendered.'
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed('template_used/base.html'):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(template_name='template_used/base.html'):
pass
msg2 = (
'template_used/base.html was not rendered. Following templates '
'were rendered: template_used/alternative.html'
)
with self.assertRaisesMessage(AssertionError, msg2):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
with self.assertRaisesMessage(AssertionError, 'No templates used to render the response'):
response = self.client.get('/test_utils/no_template_used/')
self.assertTemplateUsed(response, 'template_used/base.html')
def test_failure(self):
msg = 'response and/or template_name argument must be provided'
with self.assertRaisesMessage(TypeError, msg):
with self.assertTemplateUsed():
pass
msg = 'No templates used to render the response'
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(''):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(''):
render_to_string('template_used/base.html')
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(template_name=''):
pass
msg = (
'template_used/base.html was not rendered. Following '
'templates were rendered: template_used/alternative.html'
)
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
def test_assert_used_on_http_response(self):
response = HttpResponse()
error_msg = (
'assertTemplateUsed() and assertTemplateNotUsed() are only '
'usable on responses fetched using the Django test Client.'
)
with self.assertRaisesMessage(ValueError, error_msg):
self.assertTemplateUsed(response, 'template.html')
with self.assertRaisesMessage(ValueError, error_msg):
self.assertTemplateNotUsed(response, 'template.html')
class HTMLEqualTests(SimpleTestCase):
def test_html_parser(self):
element = parse_html('<div><p>Hello</p></div>')
self.assertEqual(len(element.children), 1)
self.assertEqual(element.children[0].name, 'p')
self.assertEqual(element.children[0].children[0], 'Hello')
parse_html('<p>')
parse_html('<p attr>')
dom = parse_html('<p>foo')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.name, 'p')
self.assertEqual(dom[0], 'foo')
def test_parse_html_in_script(self):
parse_html('<script>var a = "<p" + ">";</script>')
parse_html('''
<script>
var js_sha_link='<p>***</p>';
</script>
''')
# script content will be parsed to text
dom = parse_html('''
<script><p>foo</p> '</scr'+'ipt>' <span>bar</span></script>
''')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.children[0], "<p>foo</p> '</scr'+'ipt>' <span>bar</span>")
def test_self_closing_tags(self):
self_closing_tags = (
'br', 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame',
'base', 'col',
)
for tag in self_closing_tags:
dom = parse_html('<p>Hello <%s> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
dom = parse_html('<p>Hello <%s /> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
def test_simple_equal_html(self):
self.assertHTMLEqual('', '')
self.assertHTMLEqual('<p></p>', '<p></p>')
self.assertHTMLEqual('<p></p>', ' <p> </p> ')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div><p>Hello</p></div>')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div> <p>Hello</p> </div>')
self.assertHTMLEqual(
'<div>\n<p>Hello</p></div>',
'<div><p>Hello</p></div>\n')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<p>Hello World !</p>',
'<p>Hello World\n\n!</p>')
self.assertHTMLEqual('<p> </p>', '<p></p>')
self.assertHTMLEqual('<p/>', '<p></p>')
self.assertHTMLEqual('<p />', '<p></p>')
self.assertHTMLEqual('<input checked>', '<input checked="checked">')
self.assertHTMLEqual('<p>Hello', '<p> Hello')
self.assertHTMLEqual('<p>Hello</p>World', '<p>Hello</p> World')
def test_ignore_comments(self):
self.assertHTMLEqual(
'<div>Hello<!-- this is a comment --> World!</div>',
'<div>Hello World!</div>')
def test_unequal_html(self):
self.assertHTMLNotEqual('<p>Hello</p>', '<p>Hello!</p>')
self.assertHTMLNotEqual('<p>foobar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo bar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo nbsp</p>', '<p>foo </p>')
self.assertHTMLNotEqual('<p>foo #20</p>', '<p>foo </p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span><span>World</span></p>',
'<p><span>Hello</span>World</p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span>World</p>',
'<p><span>Hello</span><span>World</span></p>')
def test_attributes(self):
self.assertHTMLEqual(
'<input type="text" id="id_name" />',
'<input id="id_name" type="text" />')
self.assertHTMLEqual(
'''<input type='text' id="id_name" />''',
'<input id="id_name" type="text" />')
self.assertHTMLNotEqual(
'<input type="text" id="id_name" />',
'<input type="password" id="id_name" />')
def test_complex_examples(self):
self.assertHTMLEqual(
"""<tr><th><label for="id_first_name">First name:</label></th>
<td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><input type="text" id="id_last_name" name="last_name" value="Lennon" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><input type="text" value="1940-10-9" name="birthday" id="id_birthday" /></td></tr>""",
"""
<tr><th>
<label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" value="John" id="id_first_name" />
</td></tr>
<tr><th>
<label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
</td></tr>
<tr><th>
<label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
</td></tr>
""")
self.assertHTMLEqual(
"""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p>
This is a valid paragraph
<div> this is a div AFTER the p</div>
</body>
</html>""", """
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p> This is a valid paragraph
<!-- browsers would close the p tag here -->
<div> this is a div AFTER the p</div>
</p> <!-- this is invalid HTML parsing, but it should make no
difference in most cases -->
</body>
</html>""")
def test_html_contain(self):
# equal html contains each other
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertIn(dom1, dom2)
self.assertIn(dom2, dom1)
dom2 = parse_html('<div><p>foo</p></div>')
self.assertIn(dom1, dom2)
self.assertNotIn(dom2, dom1)
self.assertNotIn('<p>foo</p>', dom2)
self.assertIn('foo', dom2)
# when a root element is used ...
dom1 = parse_html('<p>foo</p><p>bar</p>')
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertIn(dom1, dom2)
dom1 = parse_html('<p>foo</p>')
self.assertIn(dom1, dom2)
dom1 = parse_html('<p>bar</p>')
self.assertIn(dom1, dom2)
dom1 = parse_html('<div><p>foo</p><p>bar</p></div>')
self.assertIn(dom2, dom1)
def test_count(self):
# equal html contains each other one time
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertEqual(dom1.count(dom2), 1)
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo foo</p><p>foo</p>')
self.assertEqual(dom2.count('foo'), 3)
dom2 = parse_html('<p class="bar">foo</p>')
self.assertEqual(dom2.count('bar'), 0)
self.assertEqual(dom2.count('class'), 0)
self.assertEqual(dom2.count('p'), 0)
self.assertEqual(dom2.count('o'), 2)
dom2 = parse_html('<p>foo</p><p>foo</p>')
self.assertEqual(dom2.count(dom1), 2)
dom2 = parse_html('<div><p>foo<input type=""></p><p>foo</p></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<div><div><p>foo</p></div></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>foo</p></p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>bar</p></p>')
self.assertEqual(dom2.count(dom1), 0)
# html with a root element contains the same html with no root element
dom1 = parse_html('<p>foo</p><p>bar</p>')
dom2 = parse_html('<div><p>foo</p><p>bar</p></div>')
self.assertEqual(dom2.count(dom1), 1)
def test_parsing_errors(self):
with self.assertRaises(AssertionError):
self.assertHTMLEqual('<p>', '')
with self.assertRaises(AssertionError):
self.assertHTMLEqual('', '<p>')
error_msg = (
"First argument is not valid HTML:\n"
"('Unexpected end tag `div` (Line 1, Column 6)', (1, 6))"
)
with self.assertRaisesMessage(AssertionError, error_msg):
self.assertHTMLEqual('< div></ div>', '<div></div>')
with self.assertRaises(HTMLParseError):
parse_html('</p>')
def test_contains_html(self):
response = HttpResponse('''<body>
This is a form: <form method="get">
<input type="text" name="Hello" />
</form></body>''')
self.assertNotContains(response, "<input name='Hello' type='text'>")
self.assertContains(response, '<form method="get">')
self.assertContains(response, "<input name='Hello' type='text'>", html=True)
self.assertNotContains(response, '<form method="get">', html=True)
invalid_response = HttpResponse('''<body <bad>>''')
with self.assertRaises(AssertionError):
self.assertContains(invalid_response, '<p></p>')
with self.assertRaises(AssertionError):
self.assertContains(response, '<p "whats" that>')
def test_unicode_handling(self):
response = HttpResponse('<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>')
self.assertContains(
response,
'<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>',
html=True
)
class JSONEqualTests(SimpleTestCase):
def test_simple_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_unordered(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz", "attr1": "foo"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(json1, json2)
def test_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONEqual(valid_json, invalid_json)
def test_simple_not_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
self.assertJSONNotEqual(json1, json2)
def test_simple_not_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(json1, json2)
def test_not_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(valid_json, invalid_json)
class XMLEqualTests(SimpleTestCase):
def test_simple_equal(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_unordered(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raise(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raises_message(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
msg = '''{xml1} != {xml2}
- <elem attr1='a' />
+ <elem attr2='b' attr1='a' />
? ++++++++++
'''.format(xml1=repr(xml1), xml2=repr(xml2))
with self.assertRaisesMessage(AssertionError, msg):
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal(self):
xml1 = "<elem attr1='a' attr2='c' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLNotEqual(xml1, xml2)
def test_simple_not_equal_raise(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml1, xml2)
def test_parsing_errors(self):
xml_unvalid = "<elem attr1='a attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml_unvalid, xml2)
def test_comment_root(self):
xml1 = "<?xml version='1.0'?><!-- comment1 --><elem attr1='a' attr2='b' />"
xml2 = "<?xml version='1.0'?><!-- comment2 --><elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_with_leading_or_trailing_whitespace(self):
xml1 = "<elem>foo</elem> \t\n"
xml2 = " \t\n<elem>foo</elem>"
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal_with_whitespace_in_the_middle(self):
xml1 = "<elem>foo</elem><elem>bar</elem>"
xml2 = "<elem>foo</elem> <elem>bar</elem>"
self.assertXMLNotEqual(xml1, xml2)
class SkippingExtraTests(TestCase):
fixtures = ['should_not_be_loaded.json']
# HACK: This depends on internals of our TestCase subclasses
def __call__(self, result=None):
# Detect fixture loading by counting SQL queries, should be zero
with self.assertNumQueries(0):
super().__call__(result)
@unittest.skip("Fixture loading should not be performed for skipped tests.")
def test_fixtures_are_skipped(self):
pass
class AssertRaisesMsgTest(SimpleTestCase):
def test_assert_raises_message(self):
msg = "'Expected message' not found in 'Unexpected message'"
# context manager form of assertRaisesMessage()
with self.assertRaisesMessage(AssertionError, msg):
with self.assertRaisesMessage(ValueError, "Expected message"):
raise ValueError("Unexpected message")
# callable form
def func():
raise ValueError("Unexpected message")
with self.assertRaisesMessage(AssertionError, msg):
self.assertRaisesMessage(ValueError, "Expected message", func)
def test_special_re_chars(self):
"""assertRaisesMessage shouldn't interpret RE special chars."""
def func1():
raise ValueError("[.*x+]y?")
with self.assertRaisesMessage(ValueError, "[.*x+]y?"):
func1()
class AssertWarnsMessageTests(SimpleTestCase):
def test_context_manager(self):
with self.assertWarnsMessage(UserWarning, 'Expected message'):
warnings.warn('Expected message', UserWarning)
def test_context_manager_failure(self):
msg = "Expected message' not found in 'Unexpected message'"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertWarnsMessage(UserWarning, 'Expected message'):
warnings.warn('Unexpected message', UserWarning)
def test_callable(self):
def func():
warnings.warn('Expected message', UserWarning)
self.assertWarnsMessage(UserWarning, 'Expected message', func)
def test_special_re_chars(self):
def func1():
warnings.warn('[.*x+]y?', UserWarning)
with self.assertWarnsMessage(UserWarning, '[.*x+]y?'):
func1()
class AssertFieldOutputTests(SimpleTestCase):
def test_assert_field_output(self):
error_invalid = ['Enter a valid email address.']
self.assertFieldOutput(EmailField, {'[email protected]': '[email protected]'}, {'aaa': error_invalid})
with self.assertRaises(AssertionError):
self.assertFieldOutput(EmailField, {'[email protected]': '[email protected]'}, {'aaa': error_invalid + ['Another error']})
with self.assertRaises(AssertionError):
self.assertFieldOutput(EmailField, {'[email protected]': 'Wrong output'}, {'aaa': error_invalid})
with self.assertRaises(AssertionError):
self.assertFieldOutput(
EmailField, {'[email protected]': '[email protected]'}, {'aaa': ['Come on, gimme some well formatted data, dude.']}
)
def test_custom_required_message(self):
class MyCustomField(IntegerField):
default_error_messages = {
'required': 'This is really required.',
}
self.assertFieldOutput(MyCustomField, {}, {}, empty_value=None)
@override_settings(ROOT_URLCONF='test_utils.urls')
class AssertURLEqualTests(SimpleTestCase):
def test_equal(self):
valid_tests = (
('http://example.com/?', 'http://example.com/'),
('http://example.com/?x=1&', 'http://example.com/?x=1'),
('http://example.com/?x=1&y=2', 'http://example.com/?y=2&x=1'),
('http://example.com/?x=1&y=2', 'http://example.com/?y=2&x=1'),
('http://example.com/?x=1&y=2&a=1&a=2', 'http://example.com/?a=1&a=2&y=2&x=1'),
('/path/to/?x=1&y=2&z=3', '/path/to/?z=3&y=2&x=1'),
('?x=1&y=2&z=3', '?z=3&y=2&x=1'),
('/test_utils/no_template_used/', reverse_lazy('no_template_used')),
)
for url1, url2 in valid_tests:
with self.subTest(url=url1):
self.assertURLEqual(url1, url2)
def test_not_equal(self):
invalid_tests = (
# Protocol must be the same.
('http://example.com/', 'https://example.com/'),
('http://example.com/?x=1&x=2', 'https://example.com/?x=2&x=1'),
('http://example.com/?x=1&y=bar&x=2', 'https://example.com/?y=bar&x=2&x=1'),
# Parameters of the same name must be in the same order.
('/path/to?a=1&a=2', '/path/to/?a=2&a=1')
)
for url1, url2 in invalid_tests:
with self.subTest(url=url1), self.assertRaises(AssertionError):
self.assertURLEqual(url1, url2)
def test_message(self):
msg = (
"Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual('http://example.com/?x=1&x=2', 'https://example.com/?x=2&x=1')
def test_msg_prefix(self):
msg = (
"Prefix: Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual(
'http://example.com/?x=1&x=2', 'https://example.com/?x=2&x=1',
msg_prefix='Prefix: ',
)
class FirstUrls:
urlpatterns = [path('first/', empty_response, name='first')]
class SecondUrls:
urlpatterns = [path('second/', empty_response, name='second')]
class SetupTestEnvironmentTests(SimpleTestCase):
def test_setup_test_environment_calling_more_than_once(self):
with self.assertRaisesMessage(RuntimeError, "setup_test_environment() was already called"):
setup_test_environment()
def test_allowed_hosts(self):
for type_ in (list, tuple):
with self.subTest(type_=type_):
allowed_hosts = type_('*')
with mock.patch('django.test.utils._TestState') as x:
del x.saved_data
with self.settings(ALLOWED_HOSTS=allowed_hosts):
setup_test_environment()
self.assertEqual(settings.ALLOWED_HOSTS, ['*', 'testserver'])
class OverrideSettingsTests(SimpleTestCase):
# #21518 -- If neither override_settings nor a setting_changed receiver
# clears the URL cache between tests, then one of test_first or
# test_second will fail.
@override_settings(ROOT_URLCONF=FirstUrls)
def test_urlconf_first(self):
reverse('first')
@override_settings(ROOT_URLCONF=SecondUrls)
def test_urlconf_second(self):
reverse('second')
def test_urlconf_cache(self):
with self.assertRaises(NoReverseMatch):
reverse('first')
with self.assertRaises(NoReverseMatch):
reverse('second')
with override_settings(ROOT_URLCONF=FirstUrls):
self.client.get(reverse('first'))
with self.assertRaises(NoReverseMatch):
reverse('second')
with override_settings(ROOT_URLCONF=SecondUrls):
with self.assertRaises(NoReverseMatch):
reverse('first')
self.client.get(reverse('second'))
self.client.get(reverse('first'))
with self.assertRaises(NoReverseMatch):
reverse('second')
with self.assertRaises(NoReverseMatch):
reverse('first')
with self.assertRaises(NoReverseMatch):
reverse('second')
def test_override_media_root(self):
"""
Overriding the MEDIA_ROOT setting should be reflected in the
base_location attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, '')
with self.settings(MEDIA_ROOT='test_value'):
self.assertEqual(default_storage.base_location, 'test_value')
def test_override_media_url(self):
"""
Overriding the MEDIA_URL setting should be reflected in the
base_url attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, '')
with self.settings(MEDIA_URL='/test_value/'):
self.assertEqual(default_storage.base_url, '/test_value/')
def test_override_file_upload_permissions(self):
"""
Overriding the FILE_UPLOAD_PERMISSIONS setting should be reflected in
the file_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertIsNone(default_storage.file_permissions_mode)
with self.settings(FILE_UPLOAD_PERMISSIONS=0o777):
self.assertEqual(default_storage.file_permissions_mode, 0o777)
def test_override_file_upload_directory_permissions(self):
"""
Overriding the FILE_UPLOAD_DIRECTORY_PERMISSIONS setting should be
reflected in the directory_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertIsNone(default_storage.directory_permissions_mode)
with self.settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777):
self.assertEqual(default_storage.directory_permissions_mode, 0o777)
def test_override_database_routers(self):
"""
Overriding DATABASE_ROUTERS should update the master router.
"""
test_routers = [object()]
with self.settings(DATABASE_ROUTERS=test_routers):
self.assertEqual(router.routers, test_routers)
def test_override_static_url(self):
"""
Overriding the STATIC_URL setting should be reflected in the
base_url attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_URL='/test/'):
self.assertEqual(staticfiles_storage.base_url, '/test/')
def test_override_static_root(self):
"""
Overriding the STATIC_ROOT setting should be reflected in the
location attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_ROOT='/tmp/test'):
self.assertEqual(staticfiles_storage.location, os.path.abspath('/tmp/test'))
def test_override_staticfiles_storage(self):
"""
Overriding the STATICFILES_STORAGE setting should be reflected in
the value of django.contrib.staticfiles.storage.staticfiles_storage.
"""
new_class = 'ManifestStaticFilesStorage'
new_storage = 'django.contrib.staticfiles.storage.' + new_class
with self.settings(STATICFILES_STORAGE=new_storage):
self.assertEqual(staticfiles_storage.__class__.__name__, new_class)
def test_override_staticfiles_finders(self):
"""
Overriding the STATICFILES_FINDERS setting should be reflected in
the return value of django.contrib.staticfiles.finders.get_finders.
"""
current = get_finders()
self.assertGreater(len(list(current)), 1)
finders = ['django.contrib.staticfiles.finders.FileSystemFinder']
with self.settings(STATICFILES_FINDERS=finders):
self.assertEqual(len(list(get_finders())), len(finders))
def test_override_staticfiles_dirs(self):
"""
Overriding the STATICFILES_DIRS setting should be reflected in
the locations attribute of the
django.contrib.staticfiles.finders.FileSystemFinder instance.
"""
finder = get_finder('django.contrib.staticfiles.finders.FileSystemFinder')
test_path = '/tmp/test'
expected_location = ('', test_path)
self.assertNotIn(expected_location, finder.locations)
with self.settings(STATICFILES_DIRS=[test_path]):
finder = get_finder('django.contrib.staticfiles.finders.FileSystemFinder')
self.assertIn(expected_location, finder.locations)
class TestBadSetUpTestData(TestCase):
"""
An exception in setUpTestData() shouldn't leak a transaction which would
cascade across the rest of the test suite.
"""
class MyException(Exception):
pass
@classmethod
def setUpClass(cls):
try:
super().setUpClass()
except cls.MyException:
cls._in_atomic_block = connection.in_atomic_block
@classmethod
def tearDownClass(Cls):
# override to avoid a second cls._rollback_atomics() which would fail.
# Normal setUpClass() methods won't have exception handling so this
# method wouldn't typically be run.
pass
@classmethod
def setUpTestData(cls):
# Simulate a broken setUpTestData() method.
raise cls.MyException()
def test_failure_in_setUpTestData_should_rollback_transaction(self):
# setUpTestData() should call _rollback_atomics() so that the
# transaction doesn't leak.
self.assertFalse(self._in_atomic_block)
class DisallowedDatabaseQueriesTests(SimpleTestCase):
def test_disallowed_database_connections(self):
expected_message = (
"Database connections to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(AssertionError, expected_message):
connection.connect()
with self.assertRaisesMessage(AssertionError, expected_message):
connection.temporary_connection()
def test_disallowed_database_queries(self):
expected_message = (
"Database queries to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(AssertionError, expected_message):
Car.objects.first()
def test_disallowed_database_chunked_cursor_queries(self):
expected_message = (
"Database queries to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(AssertionError, expected_message):
next(Car.objects.iterator())
class AllowedDatabaseQueriesTests(SimpleTestCase):
databases = {'default'}
def test_allowed_database_queries(self):
Car.objects.first()
def test_allowed_database_chunked_cursor_queries(self):
next(Car.objects.iterator(), None)
class DatabaseAliasTests(SimpleTestCase):
def setUp(self):
self.addCleanup(setattr, self.__class__, 'databases', self.databases)
def test_no_close_match(self):
self.__class__.databases = {'void'}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'void' which is not defined "
"in settings.DATABASES."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_close_match(self):
self.__class__.databases = {'defualt'}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'defualt' which is not defined "
"in settings.DATABASES. Did you mean 'default'?"
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_match(self):
self.__class__.databases = {'default', 'other'}
self.assertEqual(self._validate_databases(), frozenset({'default', 'other'}))
def test_all(self):
self.__class__.databases = '__all__'
self.assertEqual(self._validate_databases(), frozenset(connections))
@isolate_apps('test_utils', attr_name='class_apps')
class IsolatedAppsTests(SimpleTestCase):
def test_installed_apps(self):
self.assertEqual([app_config.label for app_config in self.class_apps.get_app_configs()], ['test_utils'])
def test_class_decoration(self):
class ClassDecoration(models.Model):
pass
self.assertEqual(ClassDecoration._meta.apps, self.class_apps)
@isolate_apps('test_utils', kwarg_name='method_apps')
def test_method_decoration(self, method_apps):
class MethodDecoration(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
def test_context_manager(self):
with isolate_apps('test_utils') as context_apps:
class ContextManager(models.Model):
pass
self.assertEqual(ContextManager._meta.apps, context_apps)
@isolate_apps('test_utils', kwarg_name='method_apps')
def test_nested(self, method_apps):
class MethodDecoration(models.Model):
pass
with isolate_apps('test_utils') as context_apps:
class ContextManager(models.Model):
pass
with isolate_apps('test_utils') as nested_context_apps:
class NestedContextManager(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
self.assertEqual(ContextManager._meta.apps, context_apps)
self.assertEqual(NestedContextManager._meta.apps, nested_context_apps)
class DoNothingDecorator(TestContextDecorator):
def enable(self):
pass
def disable(self):
pass
class TestContextDecoratorTests(SimpleTestCase):
@mock.patch.object(DoNothingDecorator, 'disable')
def test_exception_in_setup(self, mock_disable):
"""An exception is setUp() is reraised after disable() is called."""
class ExceptionInSetUp(unittest.TestCase):
def setUp(self):
raise NotImplementedError('reraised')
decorator = DoNothingDecorator()
decorated_test_class = decorator.__call__(ExceptionInSetUp)()
self.assertFalse(mock_disable.called)
with self.assertRaisesMessage(NotImplementedError, 'reraised'):
decorated_test_class.setUp()
self.assertTrue(mock_disable.called)
|
4d0caf9d15cd783a8b76fdb374f50cd201d86cef3f3f7fbbb4d6838675c41b8e | import logging
from contextlib import contextmanager
from io import StringIO
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.core import mail
from django.core.exceptions import PermissionDenied
from django.core.files.temp import NamedTemporaryFile
from django.core.management import color
from django.http.multipartparser import MultiPartParserError
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.utils.log import (
DEFAULT_LOGGING, AdminEmailHandler, CallbackFilter, RequireDebugFalse,
RequireDebugTrue, ServerFormatter,
)
from . import views
from .logconfig import MyEmailBackend
# logging config prior to using filter with mail_admins
OLD_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
class LoggingFiltersTest(SimpleTestCase):
def test_require_debug_false_filter(self):
"""
Test the RequireDebugFalse filter class.
"""
filter_ = RequireDebugFalse()
with self.settings(DEBUG=True):
self.assertIs(filter_.filter("record is not used"), False)
with self.settings(DEBUG=False):
self.assertIs(filter_.filter("record is not used"), True)
def test_require_debug_true_filter(self):
"""
Test the RequireDebugTrue filter class.
"""
filter_ = RequireDebugTrue()
with self.settings(DEBUG=True):
self.assertIs(filter_.filter("record is not used"), True)
with self.settings(DEBUG=False):
self.assertIs(filter_.filter("record is not used"), False)
class SetupDefaultLoggingMixin:
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._logging = settings.LOGGING
logging.config.dictConfig(DEFAULT_LOGGING)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
logging.config.dictConfig(cls._logging)
class DefaultLoggingTests(SetupDefaultLoggingMixin, LoggingCaptureMixin, SimpleTestCase):
def test_django_logger(self):
"""
The 'django' base logger only output anything when DEBUG=True.
"""
self.logger.error("Hey, this is an error.")
self.assertEqual(self.logger_output.getvalue(), '')
with self.settings(DEBUG=True):
self.logger.error("Hey, this is an error.")
self.assertEqual(self.logger_output.getvalue(), 'Hey, this is an error.\n')
@override_settings(DEBUG=True)
def test_django_logger_warning(self):
self.logger.warning('warning')
self.assertEqual(self.logger_output.getvalue(), 'warning\n')
@override_settings(DEBUG=True)
def test_django_logger_info(self):
self.logger.info('info')
self.assertEqual(self.logger_output.getvalue(), 'info\n')
@override_settings(DEBUG=True)
def test_django_logger_debug(self):
self.logger.debug('debug')
self.assertEqual(self.logger_output.getvalue(), '')
class LoggingAssertionMixin:
def assertLogsRequest(self, url, level, msg, status_code, logger='django.request', exc_class=None):
with self.assertLogs(logger, level) as cm:
try:
self.client.get(url)
except views.UncaughtException:
pass
self.assertEqual(
len(cm.records), 1,
"Wrong number of calls for logger %r in %r level." % (logger, level)
)
record = cm.records[0]
self.assertEqual(record.getMessage(), msg)
self.assertEqual(record.status_code, status_code)
if exc_class:
self.assertIsNotNone(record.exc_info)
self.assertEqual(record.exc_info[0], exc_class)
@override_settings(DEBUG=True, ROOT_URLCONF='logging_tests.urls')
class HandlerLoggingTests(SetupDefaultLoggingMixin, LoggingAssertionMixin, LoggingCaptureMixin, SimpleTestCase):
def test_page_found_no_warning(self):
self.client.get('/innocent/')
self.assertEqual(self.logger_output.getvalue(), '')
def test_redirect_no_warning(self):
self.client.get('/redirect/')
self.assertEqual(self.logger_output.getvalue(), '')
def test_page_not_found_warning(self):
self.assertLogsRequest(
url='/does_not_exist/',
level='WARNING',
status_code=404,
msg='Not Found: /does_not_exist/',
)
def test_page_not_found_raised(self):
self.assertLogsRequest(
url='/does_not_exist_raised/',
level='WARNING',
status_code=404,
msg='Not Found: /does_not_exist_raised/',
)
def test_uncaught_exception(self):
self.assertLogsRequest(
url='/uncaught_exception/',
level='ERROR',
status_code=500,
msg='Internal Server Error: /uncaught_exception/',
exc_class=views.UncaughtException,
)
def test_internal_server_error(self):
self.assertLogsRequest(
url='/internal_server_error/',
level='ERROR',
status_code=500,
msg='Internal Server Error: /internal_server_error/',
)
def test_internal_server_error_599(self):
self.assertLogsRequest(
url='/internal_server_error/?status=599',
level='ERROR',
status_code=599,
msg='Unknown Status Code: /internal_server_error/',
)
def test_permission_denied(self):
self.assertLogsRequest(
url='/permission_denied/',
level='WARNING',
status_code=403,
msg='Forbidden (Permission denied): /permission_denied/',
exc_class=PermissionDenied,
)
def test_multi_part_parser_error(self):
self.assertLogsRequest(
url='/multi_part_parser_error/',
level='WARNING',
status_code=400,
msg='Bad request (Unable to parse request body): /multi_part_parser_error/',
exc_class=MultiPartParserError,
)
@override_settings(
DEBUG=True,
USE_I18N=True,
LANGUAGES=[('en', 'English')],
MIDDLEWARE=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='logging_tests.urls_i18n',
)
class I18nLoggingTests(SetupDefaultLoggingMixin, LoggingCaptureMixin, SimpleTestCase):
def test_i18n_page_found_no_warning(self):
self.client.get('/exists/')
self.client.get('/en/exists/')
self.assertEqual(self.logger_output.getvalue(), '')
def test_i18n_page_not_found_warning(self):
self.client.get('/this_does_not/')
self.client.get('/en/nor_this/')
self.assertEqual(self.logger_output.getvalue(), 'Not Found: /this_does_not/\nNot Found: /en/nor_this/\n')
class CallbackFilterTest(SimpleTestCase):
def test_sense(self):
f_false = CallbackFilter(lambda r: False)
f_true = CallbackFilter(lambda r: True)
self.assertEqual(f_false.filter("record"), False)
self.assertEqual(f_true.filter("record"), True)
def test_passes_on_record(self):
collector = []
def _callback(record):
collector.append(record)
return True
f = CallbackFilter(_callback)
f.filter("a record")
self.assertEqual(collector, ["a record"])
class AdminEmailHandlerTest(SimpleTestCase):
logger = logging.getLogger('django')
request_factory = RequestFactory()
def get_admin_email_handler(self, logger):
# AdminEmailHandler does not get filtered out
# even with DEBUG=True.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
return admin_email_handler
def test_fail_silently(self):
admin_email_handler = self.get_admin_email_handler(self.logger)
self.assertTrue(admin_email_handler.connection().fail_silently)
@override_settings(
ADMINS=[('whatever admin', '[email protected]')],
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-'
)
def test_accepts_args(self):
"""
User-supplied arguments and the EMAIL_SUBJECT_PREFIX setting are used
to compose the email subject (#16736).
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
self.logger.error(message, token1, token2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['[email protected]'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR: Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=[('whatever admin', '[email protected]')],
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-',
INTERNAL_IPS=['127.0.0.1'],
)
def test_accepts_args_and_request(self):
"""
The subject is also handled if being passed a request object.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
request = self.request_factory.get('/')
self.logger.error(
message, token1, token2,
extra={
'status_code': 403,
'request': request,
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['[email protected]'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR (internal IP): Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=[('admin', '[email protected]')],
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_subject_accepts_newlines(self):
"""
Newlines in email reports' subjects are escaped to prevent
AdminErrorHandler from failing (#17281).
"""
message = 'Message \r\n with newlines'
expected_subject = 'ERROR: Message \\r\\n with newlines'
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertNotIn('\n', mail.outbox[0].subject)
self.assertNotIn('\r', mail.outbox[0].subject)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=[('admin', '[email protected]')],
DEBUG=False,
)
def test_uses_custom_email_backend(self):
"""
Refs #19325
"""
message = 'All work and no play makes Jack a dull boy'
admin_email_handler = self.get_admin_email_handler(self.logger)
mail_admins_called = {'called': False}
def my_mail_admins(*args, **kwargs):
connection = kwargs['connection']
self.assertIsInstance(connection, MyEmailBackend)
mail_admins_called['called'] = True
# Monkeypatches
orig_mail_admins = mail.mail_admins
orig_email_backend = admin_email_handler.email_backend
mail.mail_admins = my_mail_admins
admin_email_handler.email_backend = (
'logging_tests.logconfig.MyEmailBackend')
try:
self.logger.error(message)
self.assertTrue(mail_admins_called['called'])
finally:
# Revert Monkeypatches
mail.mail_admins = orig_mail_admins
admin_email_handler.email_backend = orig_email_backend
@override_settings(
ADMINS=[('whatever admin', '[email protected]')],
)
def test_emit_non_ascii(self):
"""
#23593 - AdminEmailHandler should allow Unicode characters in the
request.
"""
handler = self.get_admin_email_handler(self.logger)
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
url_path = '/º'
record.request = self.request_factory.get(url_path)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.to, ['[email protected]'])
self.assertEqual(msg.subject, "[Django] ERROR (EXTERNAL IP): message")
self.assertIn("Report at %s" % url_path, msg.body)
@override_settings(
MANAGERS=[('manager', '[email protected]')],
DEBUG=False,
)
def test_customize_send_mail_method(self):
class ManagerEmailHandler(AdminEmailHandler):
def send_mail(self, subject, message, *args, **kwargs):
mail.mail_managers(subject, message, *args, connection=self.connection(), **kwargs)
handler = ManagerEmailHandler()
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
self.assertEqual(len(mail.outbox), 0)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['[email protected]'])
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host_doesnt_crash(self):
admin_email_handler = self.get_admin_email_handler(self.logger)
old_include_html = admin_email_handler.include_html
# Text email
admin_email_handler.include_html = False
try:
self.client.get('/', HTTP_HOST='evil.com')
finally:
admin_email_handler.include_html = old_include_html
# HTML email
admin_email_handler.include_html = True
try:
self.client.get('/', HTTP_HOST='evil.com')
finally:
admin_email_handler.include_html = old_include_html
class SettingsConfigTest(AdminScriptTestCase):
"""
Accessing settings in a custom logging handler does not trigger
a circular import error.
"""
def setUp(self):
super().setUp()
log_config = """{
'version': 1,
'handlers': {
'custom_handler': {
'level': 'INFO',
'class': 'logging_tests.logconfig.MyHandler',
}
}
}"""
self.write_settings('settings.py', sdict={'LOGGING': log_config})
def test_circular_dependency(self):
# validate is just an example command to trigger settings configuration
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
def dictConfig(config):
dictConfig.called = True
dictConfig.called = False
class SetupConfigureLogging(SimpleTestCase):
"""
Calling django.setup() initializes the logging configuration.
"""
@override_settings(
LOGGING_CONFIG='logging_tests.tests.dictConfig',
LOGGING=OLD_LOGGING,
)
def test_configure_initializes_logging(self):
from django import setup
setup()
self.assertTrue(dictConfig.called)
@override_settings(DEBUG=True, ROOT_URLCONF='logging_tests.urls')
class SecurityLoggerTest(LoggingAssertionMixin, SimpleTestCase):
def test_suspicious_operation_creates_log_message(self):
self.assertLogsRequest(
url='/suspicious/',
level='ERROR',
msg='dubious',
status_code=400,
logger='django.security.SuspiciousOperation',
)
def test_suspicious_operation_uses_sublogger(self):
self.assertLogsRequest(
url='/suspicious_spec/',
level='ERROR',
msg='dubious',
status_code=400,
logger='django.security.DisallowedHost',
)
@override_settings(
ADMINS=[('admin', '[email protected]')],
DEBUG=False,
)
def test_suspicious_email_admins(self):
self.client.get('/suspicious/')
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Report at /suspicious/', mail.outbox[0].body)
class SettingsCustomLoggingTest(AdminScriptTestCase):
"""
Using a logging defaults are still applied when using a custom
callable in LOGGING_CONFIG (i.e., logging.config.fileConfig).
"""
def setUp(self):
super().setUp()
logging_conf = """
[loggers]
keys=root
[handlers]
keys=stream
[formatters]
keys=simple
[logger_root]
handlers=stream
[handler_stream]
class=StreamHandler
formatter=simple
args=(sys.stdout,)
[formatter_simple]
format=%(message)s
"""
self.temp_file = NamedTemporaryFile()
self.temp_file.write(logging_conf.encode())
self.temp_file.flush()
self.write_settings('settings.py', sdict={
'LOGGING_CONFIG': '"logging.config.fileConfig"',
'LOGGING': 'r"%s"' % self.temp_file.name,
})
def tearDown(self):
self.temp_file.close()
def test_custom_logging(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
class LogFormattersTests(SimpleTestCase):
def test_server_formatter_styles(self):
color_style = color.make_style('')
formatter = ServerFormatter()
formatter.style = color_style
log_msg = 'log message'
status_code_styles = [
(200, 'HTTP_SUCCESS'),
(100, 'HTTP_INFO'),
(304, 'HTTP_NOT_MODIFIED'),
(300, 'HTTP_REDIRECT'),
(404, 'HTTP_NOT_FOUND'),
(400, 'HTTP_BAD_REQUEST'),
(500, 'HTTP_SERVER_ERROR'),
]
for status_code, style in status_code_styles:
record = logging.makeLogRecord({'msg': log_msg, 'status_code': status_code})
self.assertEqual(formatter.format(record), getattr(color_style, style)(log_msg))
record = logging.makeLogRecord({'msg': log_msg})
self.assertEqual(formatter.format(record), log_msg)
def test_server_formatter_default_format(self):
server_time = '2016-09-25 10:20:30'
log_msg = 'log message'
logger = logging.getLogger('django.server')
@contextmanager
def patch_django_server_logger():
old_stream = logger.handlers[0].stream
new_stream = StringIO()
logger.handlers[0].stream = new_stream
yield new_stream
logger.handlers[0].stream = old_stream
with patch_django_server_logger() as logger_output:
logger.info(log_msg, extra={'server_time': server_time})
self.assertEqual('[%s] %s\n' % (server_time, log_msg), logger_output.getvalue())
with patch_django_server_logger() as logger_output:
logger.info(log_msg)
self.assertRegex(logger_output.getvalue(), r'^\[[-:,.\s\d]+\] %s' % log_msg)
|
7bf60326a34961d30560846a063e9ec1185cb1d3b9551507b518eea59bd7381a | import os
from io import StringIO
from unittest import mock
from admin_scripts.tests import AdminScriptTestCase
from django.apps import apps
from django.core import management
from django.core.management import BaseCommand, CommandError, find_commands
from django.core.management.utils import (
find_command, get_random_secret_key, is_ignored_path,
normalize_path_patterns, popen_wrapper,
)
from django.db import connection
from django.test import SimpleTestCase, override_settings
from django.test.utils import captured_stderr, extend_sys_path
from django.utils import translation
from .management.commands import dance
# A minimal set of apps to avoid system checks running on all apps.
@override_settings(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'user_commands',
],
)
class CommandTests(SimpleTestCase):
def test_command(self):
out = StringIO()
management.call_command('dance', stdout=out)
self.assertIn("I don't feel like dancing Rock'n'Roll.\n", out.getvalue())
def test_command_style(self):
out = StringIO()
management.call_command('dance', style='Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
# Passing options as arguments also works (thanks argparse)
management.call_command('dance', '--style', 'Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
def test_language_preserved(self):
out = StringIO()
with translation.override('fr'):
management.call_command('dance', stdout=out)
self.assertEqual(translation.get_language(), 'fr')
def test_explode(self):
""" An unknown command raises CommandError """
with self.assertRaisesMessage(CommandError, "Unknown command: 'explode'"):
management.call_command(('explode',))
def test_system_exit(self):
""" Exception raised in a command should raise CommandError with
call_command, but SystemExit when run from command line
"""
with self.assertRaises(CommandError):
management.call_command('dance', example="raise")
dance.Command.requires_system_checks = False
try:
with captured_stderr() as stderr, self.assertRaises(SystemExit):
management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute()
finally:
dance.Command.requires_system_checks = True
self.assertIn("CommandError", stderr.getvalue())
def test_no_translations_deactivate_translations(self):
"""
When the Command handle method is decorated with @no_translations,
translations are deactivated inside the command.
"""
current_locale = translation.get_language()
with translation.override('pl'):
result = management.call_command('no_translations', stdout=StringIO())
self.assertIsNone(result)
self.assertEqual(translation.get_language(), current_locale)
def test_find_command_without_PATH(self):
"""
find_command should still work when the PATH environment variable
doesn't exist (#22256).
"""
current_path = os.environ.pop('PATH', None)
try:
self.assertIsNone(find_command('_missing_'))
finally:
if current_path is not None:
os.environ['PATH'] = current_path
def test_discover_commands_in_eggs(self):
"""
Management commands can also be loaded from Python eggs.
"""
egg_dir = '%s/eggs' % os.path.dirname(__file__)
egg_name = '%s/basic.egg' % egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['commandegg']):
cmds = find_commands(os.path.join(apps.get_app_config('commandegg').path, 'management'))
self.assertEqual(cmds, ['eggcommand'])
def test_call_command_option_parsing(self):
"""
When passing the long option name to call_command, the available option
key is the option dest name (#22985).
"""
out = StringIO()
management.call_command('dance', stdout=out, opt_3=True)
self.assertIn("option3", out.getvalue())
self.assertNotIn("opt_3", out.getvalue())
self.assertNotIn("opt-3", out.getvalue())
def test_call_command_option_parsing_non_string_arg(self):
"""
It should be possible to pass non-string arguments to call_command.
"""
out = StringIO()
management.call_command('dance', 1, verbosity=0, stdout=out)
self.assertIn("You passed 1 as a positional argument.", out.getvalue())
def test_calling_a_command_with_only_empty_parameter_should_ends_gracefully(self):
out = StringIO()
management.call_command('hal', "--empty", stdout=out)
self.assertIn("Dave, I can't do that.\n", out.getvalue())
def test_calling_command_with_app_labels_and_parameters_should_be_ok(self):
out = StringIO()
management.call_command('hal', 'myapp', "--verbosity", "3", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_command_with_parameters_and_app_labels_at_the_end_should_be_ok(self):
out = StringIO()
management.call_command('hal', "--verbosity", "3", "myapp", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_a_command_with_no_app_labels_and_parameters_should_raise_a_command_error(self):
with self.assertRaises(CommandError):
management.call_command('hal', stdout=StringIO())
def test_output_transaction(self):
output = management.call_command('transaction', stdout=StringIO(), no_color=True)
self.assertTrue(output.strip().startswith(connection.ops.start_transaction_sql()))
self.assertTrue(output.strip().endswith(connection.ops.end_transaction_sql()))
def test_call_command_no_checks(self):
"""
By default, call_command should not trigger the check framework, unless
specifically asked.
"""
self.counter = 0
def patched_check(self_, **kwargs):
self.counter += 1
saved_check = BaseCommand.check
BaseCommand.check = patched_check
try:
management.call_command("dance", verbosity=0)
self.assertEqual(self.counter, 0)
management.call_command("dance", verbosity=0, skip_checks=False)
self.assertEqual(self.counter, 1)
finally:
BaseCommand.check = saved_check
def test_check_migrations(self):
requires_migrations_checks = dance.Command.requires_migrations_checks
self.assertIs(requires_migrations_checks, False)
try:
with mock.patch.object(BaseCommand, 'check_migrations') as check_migrations:
management.call_command('dance', verbosity=0)
self.assertFalse(check_migrations.called)
dance.Command.requires_migrations_checks = True
management.call_command('dance', verbosity=0)
self.assertTrue(check_migrations.called)
finally:
dance.Command.requires_migrations_checks = requires_migrations_checks
def test_call_command_unrecognized_option(self):
msg = (
'Unknown option(s) for dance command: unrecognized. Valid options '
'are: example, force_color, help, integer, no_color, opt_3, '
'option3, pythonpath, settings, skip_checks, stderr, stdout, '
'style, traceback, verbosity, version.'
)
with self.assertRaisesMessage(TypeError, msg):
management.call_command('dance', unrecognized=1)
msg = (
'Unknown option(s) for dance command: unrecognized, unrecognized2. '
'Valid options are: example, force_color, help, integer, no_color, '
'opt_3, option3, pythonpath, settings, skip_checks, stderr, '
'stdout, style, traceback, verbosity, version.'
)
with self.assertRaisesMessage(TypeError, msg):
management.call_command('dance', unrecognized=1, unrecognized2=1)
def test_call_command_with_required_parameters_in_options(self):
out = StringIO()
management.call_command('required_option', need_me='foo', needme2='bar', stdout=out)
self.assertIn('need_me', out.getvalue())
self.assertIn('needme2', out.getvalue())
def test_call_command_with_required_parameters_in_mixed_options(self):
out = StringIO()
management.call_command('required_option', '--need-me=foo', needme2='bar', stdout=out)
self.assertIn('need_me', out.getvalue())
self.assertIn('needme2', out.getvalue())
def test_command_add_arguments_after_common_arguments(self):
out = StringIO()
management.call_command('common_args', stdout=out)
self.assertIn('Detected that --version already exists', out.getvalue())
def test_subparser(self):
out = StringIO()
management.call_command('subparser', 'foo', 12, stdout=out)
self.assertIn('bar', out.getvalue())
def test_subparser_invalid_option(self):
msg = "Error: invalid choice: 'test' (choose from 'foo')"
with self.assertRaisesMessage(CommandError, msg):
management.call_command('subparser', 'test', 12)
def test_create_parser_kwargs(self):
"""BaseCommand.create_parser() passes kwargs to CommandParser."""
epilog = 'some epilog text'
parser = BaseCommand().create_parser('prog_name', 'subcommand', epilog=epilog)
self.assertEqual(parser.epilog, epilog)
class CommandRunTests(AdminScriptTestCase):
"""
Tests that need to run by simulating the command line, not by call_command.
"""
def test_script_prefix_set_in_commands(self):
self.write_settings('settings.py', apps=['user_commands'], sdict={
'ROOT_URLCONF': '"user_commands.urls"',
'FORCE_SCRIPT_NAME': '"/PREFIX/"',
})
out, err = self.run_manage(['reverse_url'])
self.assertNoOutput(err)
self.assertEqual(out.strip(), '/PREFIX/some/url/')
def test_disallowed_abbreviated_options(self):
"""
To avoid conflicts with custom options, commands don't allow
abbreviated forms of the --setting and --pythonpath options.
"""
self.write_settings('settings.py', apps=['user_commands'])
out, err = self.run_manage(['set_option', '--set', 'foo'])
self.assertNoOutput(err)
self.assertEqual(out.strip(), 'Set foo')
class UtilsTests(SimpleTestCase):
def test_no_existent_external_program(self):
msg = 'Error executing a_42_command_that_doesnt_exist_42'
with self.assertRaisesMessage(CommandError, msg):
popen_wrapper(['a_42_command_that_doesnt_exist_42'])
def test_get_random_secret_key(self):
key = get_random_secret_key()
self.assertEqual(len(key), 50)
for char in key:
self.assertIn(char, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
def test_is_ignored_path_true(self):
patterns = (
['foo/bar/baz'],
['baz'],
['foo/bar/baz'],
['*/baz'],
['*'],
['b?z'],
['[abc]az'],
['*/ba[!z]/baz'],
)
for ignore_patterns in patterns:
with self.subTest(ignore_patterns=ignore_patterns):
self.assertIs(is_ignored_path('foo/bar/baz', ignore_patterns=ignore_patterns), True)
def test_is_ignored_path_false(self):
self.assertIs(is_ignored_path('foo/bar/baz', ignore_patterns=['foo/bar/bat', 'bar', 'flub/blub']), False)
def test_normalize_path_patterns_truncates_wildcard_base(self):
expected = [os.path.normcase(p) for p in ['foo/bar', 'bar/*/']]
self.assertEqual(normalize_path_patterns(['foo/bar/*', 'bar/*/']), expected)
|
2c4a10e2a02cda62dba15750d841dfa9ba44f75c76bb84401d2c78799b1aface | import json
from django.db.models.expressions import F, Value
from django.test.utils import Approximate
from . import PostgreSQLTestCase
from .models import AggregateTestModel, StatTestModel
try:
from django.contrib.postgres.aggregates import (
ArrayAgg, BitAnd, BitOr, BoolAnd, BoolOr, Corr, CovarPop, JSONBAgg,
RegrAvgX, RegrAvgY, RegrCount, RegrIntercept, RegrR2, RegrSlope,
RegrSXX, RegrSXY, RegrSYY, StatAggregate, StringAgg,
)
except ImportError:
pass # psycopg2 is not installed
class TestGeneralAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo1', integer_field=0)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo2', integer_field=1)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo4', integer_field=2)
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo3', integer_field=0)
def test_array_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': ['Foo1', 'Foo2', 'Foo4', 'Foo3']})
def test_array_agg_charfield_ordering(self):
ordering_test_cases = (
(F('char_field').desc(), ['Foo4', 'Foo3', 'Foo2', 'Foo1']),
(F('char_field').asc(), ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
(F('char_field'), ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
([F('boolean_field'), F('char_field').desc()], ['Foo4', 'Foo2', 'Foo3', 'Foo1']),
((F('boolean_field'), F('char_field').desc()), ['Foo4', 'Foo2', 'Foo3', 'Foo1']),
('char_field', ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
('-char_field', ['Foo4', 'Foo3', 'Foo2', 'Foo1']),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('char_field', ordering=ordering)
)
self.assertEqual(values, {'arrayagg': expected_output})
def test_array_agg_integerfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': [0, 1, 2, 0]})
def test_array_agg_integerfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('integer_field', ordering=F('integer_field').desc())
)
self.assertEqual(values, {'arrayagg': [2, 1, 0, 0]})
def test_array_agg_booleanfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': [True, False, False, True]})
def test_array_agg_booleanfield_ordering(self):
ordering_test_cases = (
(F('boolean_field').asc(), [False, False, True, True]),
(F('boolean_field').desc(), [True, True, False, False]),
(F('boolean_field'), [False, False, True, True]),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('boolean_field', ordering=ordering)
)
self.assertEqual(values, {'arrayagg': expected_output})
def test_array_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': []})
def test_array_agg_lookups(self):
aggr1 = AggregateTestModel.objects.create()
aggr2 = AggregateTestModel.objects.create()
StatTestModel.objects.create(related_field=aggr1, int1=1, int2=0)
StatTestModel.objects.create(related_field=aggr1, int1=2, int2=0)
StatTestModel.objects.create(related_field=aggr2, int1=3, int2=0)
StatTestModel.objects.create(related_field=aggr2, int1=4, int2=0)
qs = StatTestModel.objects.values('related_field').annotate(
array=ArrayAgg('int1')
).filter(array__overlap=[2]).values_list('array', flat=True)
self.assertCountEqual(qs.get(), [1, 2])
def test_bit_and_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 1})
def test_bit_and_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': None})
def test_bit_or_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 0})
def test_bit_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': None})
def test_bool_and_general(self):
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': False})
def test_bool_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': None})
def test_bool_or_general(self):
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': True})
def test_bool_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': None})
def test_string_agg_requires_delimiter(self):
with self.assertRaises(TypeError):
AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field'))
def test_string_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': 'Foo1;Foo2;Foo4;Foo3'})
def test_string_agg_charfield_ordering(self):
ordering_test_cases = (
(F('char_field').desc(), 'Foo4;Foo3;Foo2;Foo1'),
(F('char_field').asc(), 'Foo1;Foo2;Foo3;Foo4'),
(F('char_field'), 'Foo1;Foo2;Foo3;Foo4'),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg('char_field', delimiter=';', ordering=ordering)
)
self.assertEqual(values, {'stringagg': expected_output})
def test_string_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': ''})
def test_orderable_agg_alternative_fields(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('integer_field', ordering=F('char_field').asc())
)
self.assertEqual(values, {'arrayagg': [0, 1, 0, 2]})
def test_json_agg(self):
values = AggregateTestModel.objects.aggregate(jsonagg=JSONBAgg('char_field'))
self.assertEqual(values, {'jsonagg': ['Foo1', 'Foo2', 'Foo4', 'Foo3']})
def test_json_agg_empty(self):
values = AggregateTestModel.objects.none().aggregate(jsonagg=JSONBAgg('integer_field'))
self.assertEqual(values, json.loads('{"jsonagg": []}'))
class TestAggregateDistinct(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
AggregateTestModel.objects.create(char_field='Foo')
AggregateTestModel.objects.create(char_field='Foo')
AggregateTestModel.objects.create(char_field='Bar')
def test_string_agg_distinct_false(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=False))
self.assertEqual(values['stringagg'].count('Foo'), 2)
self.assertEqual(values['stringagg'].count('Bar'), 1)
def test_string_agg_distinct_true(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=True))
self.assertEqual(values['stringagg'].count('Foo'), 1)
self.assertEqual(values['stringagg'].count('Bar'), 1)
def test_array_agg_distinct_false(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field', distinct=False))
self.assertEqual(sorted(values['arrayagg']), ['Bar', 'Foo', 'Foo'])
def test_array_agg_distinct_true(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field', distinct=True))
self.assertEqual(sorted(values['arrayagg']), ['Bar', 'Foo'])
class TestStatisticsAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
StatTestModel.objects.create(
int1=1,
int2=3,
related_field=AggregateTestModel.objects.create(integer_field=0),
)
StatTestModel.objects.create(
int1=2,
int2=2,
related_field=AggregateTestModel.objects.create(integer_field=1),
)
StatTestModel.objects.create(
int1=3,
int2=1,
related_field=AggregateTestModel.objects.create(integer_field=2),
)
# Tests for base class (StatAggregate)
def test_missing_arguments_raises_exception(self):
with self.assertRaisesMessage(ValueError, 'Both y and x must be provided.'):
StatAggregate(x=None, y=None)
def test_correct_source_expressions(self):
func = StatAggregate(x='test', y=13)
self.assertIsInstance(func.source_expressions[0], Value)
self.assertIsInstance(func.source_expressions[1], F)
def test_alias_is_required(self):
class SomeFunc(StatAggregate):
function = 'TEST'
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
StatTestModel.objects.aggregate(SomeFunc(y='int2', x='int1'))
# Test aggregates
def test_corr_general(self):
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': -1.0})
def test_corr_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': None})
def test_covar_pop_general(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': Approximate(-0.66, places=1)})
def test_covar_pop_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': None})
def test_covar_pop_sample(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': -1.0})
def test_covar_pop_sample_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': None})
def test_regr_avgx_general(self):
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': 2.0})
def test_regr_avgx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': None})
def test_regr_avgy_general(self):
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': 2.0})
def test_regr_avgy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': None})
def test_regr_count_general(self):
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 3})
def test_regr_count_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 0})
def test_regr_intercept_general(self):
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': 4})
def test_regr_intercept_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': None})
def test_regr_r2_general(self):
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': 1})
def test_regr_r2_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': None})
def test_regr_slope_general(self):
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': -1})
def test_regr_slope_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': None})
def test_regr_sxx_general(self):
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': 2.0})
def test_regr_sxx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': None})
def test_regr_sxy_general(self):
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': -2.0})
def test_regr_sxy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': None})
def test_regr_syy_general(self):
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': 2.0})
def test_regr_syy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': None})
def test_regr_avgx_with_related_obj_and_number_as_argument(self):
"""
This is more complex test to check if JOIN on field and
number as argument works as expected.
"""
values = StatTestModel.objects.aggregate(complex_regravgx=RegrAvgX(y=5, x='related_field__integer_field'))
self.assertEqual(values, {'complex_regravgx': 1.0})
|
d17a7fc6546601d80b31107d6c5b90b5cf8178f5369c4f6263e5a8d4508d87cf | import os
import subprocess
import sys
from . import PostgreSQLSimpleTestCase
class PostgresIntegrationTests(PostgreSQLSimpleTestCase):
def test_check(self):
result = subprocess.run(
[sys.executable, '-m', 'django', 'check', '--settings', 'integration_settings'],
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
cwd=os.path.dirname(__file__),
)
stderr = '\n'.join([e.decode() for e in result.stderr.splitlines()])
self.assertEqual(result.returncode, 0, msg=stderr)
|
ad1c08108bd2a0199374cc3fdace11a14b9e9facea43b219405b8c99e83e131d | """
Unit tests for reverse URL lookups.
"""
import sys
import threading
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.utils import override_script_prefix
from django.urls import (
NoReverseMatch, Resolver404, ResolverMatch, URLPattern, URLResolver,
get_callable, get_resolver, get_urlconf, include, path, re_path, resolve,
reverse, reverse_lazy,
)
from django.urls.resolvers import RegexPattern
from . import middleware, urlconf_outer, views
from .utils import URLObject
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}),
(
'/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/view_class/42/37/', 'inc-view-class', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-view-class',
views.view_class_instance, (), {'arg1': '42', 'arg2': '37'}
),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, (), {'arg2': '37'}),
(
'/included/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
(
'/included/12/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
# Unnamed views should have None as the url_name. Regression data for #21157.
(
'/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance,
(), {'arg1': '42', 'arg2': '37'}
),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
(
'/included/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('42', '37'), {}
),
(
'/included/12/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('12', '42', '37'), {}
),
# Namespaces
(
'/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'inc-ns1', 'inc-ns1:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Nested namespaces
(
'/ns-included1/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-ns1:inc-ns4:inc-ns2:test-ns3',
'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp', 'inc-app:test-ns3',
'inc-app:test-ns3:urlobject-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-app:inc-ns4:inc-ns2:test-ns3',
'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Namespaces capturing variables
(
'/inc70/', 'inner-nothing', 'included_urls', 'inc-ns5', 'inc-ns5:inner-nothing',
views.empty_view, (), {'outer': '70'}
),
(
'/inc78/extra/foobar/', 'inner-extra', 'included_urls', 'inc-ns5', 'inc-ns5:inner-extra',
views.empty_view, (), {'outer': '78', 'extra': 'foobar'}
),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], {'year': 2007, 'month': 5, 'day': 21}),
(
'windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [],
{'drive_name': 'C', 'path': r'Documents and Settings\spam'}
),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('test', '/test/1', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
URLResolver should raise an exception when no urlpatterns exist.
"""
resolver = URLResolver(RegexPattern(r'^$'), settings.ROOT_URLCONF)
with self.assertRaisesMessage(
ImproperlyConfigured,
"The included URLconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see valid patterns in "
"the file then the issue is probably caused by a circular import."
):
getattr(resolver, 'url_patterns')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
with self.subTest(name=name, args=args, kwargs=kwargs):
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(NoReverseMatch, expected)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
with self.assertRaises(NoReverseMatch):
reverse(None)
def test_mixing_args_and_kwargs(self):
msg = "Don't mix *args and **kwargs in call to reverse()!"
with self.assertRaisesMessage(ValueError, msg):
reverse('name', args=['a'], kwargs={'b': 'c'})
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
with self.assertRaisesMessage(NoReverseMatch, r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"):
# this url exists, but requires an argument
reverse("people", args=[])
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_view_not_found_message(self):
msg = (
"Reverse for 'nonexistent-view' not found. 'nonexistent-view' "
"is not a valid view function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('nonexistent-view')
def test_no_args_message(self):
msg = "Reverse for 'places' with no arguments not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places')
def test_illegal_args_message(self):
msg = "Reverse for 'places' with arguments '(1, 2)' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', args=(1, 2))
def test_illegal_kwargs_message(self):
msg = "Reverse for 'places' with keyword arguments '{'arg1': 2}' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', kwargs={'arg1': 2})
class ResolverTests(SimpleTestCase):
def test_resolver_repr(self):
"""
Test repr of URLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced URLconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<URLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
resolver.resolve(proxy_url)
def test_resolver_reverse(self):
resolver = get_resolver('urlpatterns_reverse.named_urls')
test_urls = [
# (name, args, kwargs, expected)
('named-url1', (), {}, ''),
('named-url2', ('arg',), {}, 'extra/arg/'),
('named-url2', (), {'extra': 'arg'}, 'extra/arg/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_resolver_reverse_conflict(self):
"""
URL pattern name arguments don't need to be unique. The last registered
pattern takes precedence for conflicting names.
"""
resolver = get_resolver('urlpatterns_reverse.named_urls_conflict')
test_urls = [
# (name, args, kwargs, expected)
# Without arguments, the last URL in urlpatterns has precedence.
('name-conflict', (), {}, 'conflict/'),
# With an arg, the last URL in urlpatterns has precedence.
('name-conflict', ('arg',), {}, 'conflict-last/arg/'),
# With a kwarg, other URL patterns can be reversed.
('name-conflict', (), {'first': 'arg'}, 'conflict-first/arg/'),
('name-conflict', (), {'middle': 'arg'}, 'conflict-middle/arg/'),
('name-conflict', (), {'last': 'arg'}, 'conflict-last/arg/'),
# The number and order of the arguments don't interfere with reversing.
('name-conflict', ('arg', 'arg'), {}, 'conflict/arg/arg/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_non_regex(self):
"""
A Resolver404 is raised if resolving doesn't meet the basic
requirements of a path to match - i.e., at the very least, it matches
the root pattern '^/'. Never return None from resolve() to prevent a
TypeError from occurring later (#10834).
"""
test_urls = ['', 'a', '\\', '.']
for path_ in test_urls:
with self.subTest(path=path_):
with self.assertRaises(Resolver404):
resolve(path_)
def test_404_tried_urls_have_names(self):
"""
The list of URLs that come back from a Resolver404 exception contains
a list in the right format for printing out in the DEBUG 404 page with
both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a nonexistent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/nonexistent-url')
url_types_names = [
[{'type': URLPattern, 'name': 'named-url1'}],
[{'type': URLPattern, 'name': 'named-url2'}],
[{'type': URLPattern, 'name': None}],
[{'type': URLResolver}, {'type': URLPattern, 'name': 'named-url3'}],
[{'type': URLResolver}, {'type': URLPattern, 'name': 'named-url4'}],
[{'type': URLResolver}, {'type': URLPattern, 'name': None}],
[{'type': URLResolver}, {'type': URLResolver}],
]
with self.assertRaisesMessage(Resolver404, 'tried') as cm:
resolve('/included/nonexistent-url', urlconf=urls)
e = cm.exception
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
self.assertEqual(
len(e.args[0]['tried']),
len(url_types_names),
'Wrong number of tried URLs returned. Expected %s, got %s.' % (
len(url_types_names), len(e.args[0]['tried'])
)
)
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
with self.subTest(t):
self.assertIsInstance(t, e['type']), '%s is not an instance of %s' % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(
t.name,
e['name'],
'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name)
)
def test_namespaced_view_detail(self):
resolver = get_resolver('urlpatterns_reverse.nested_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view1'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view2'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.View3'))
self.assertFalse(resolver._is_callback('urlpatterns_reverse.nested_urls.blub'))
def test_view_detail_as_method(self):
# Views which have a class name as part of their path.
resolver = get_resolver('urlpatterns_reverse.method_view_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.method_view'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.classmethod_view'))
def test_populate_concurrency(self):
"""
URLResolver._populate() can be called concurrently, but not more
than once per thread (#26888).
"""
resolver = URLResolver(RegexPattern(r'^/'), 'urlpatterns_reverse.urls')
resolver._local.populating = True
thread = threading.Thread(target=resolver._populate)
thread.start()
thread.join()
self.assertNotEqual(resolver._reverse_dict, {})
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
alfred = User.objects.create_user('alfred', '[email protected]', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.force_login(alfred)
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
super().setUp()
self.write_settings(
'settings.py',
extra="from django.urls import reverse_lazy\nLOGIN_URL = reverse_lazy('login')",
)
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj:
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
with self.assertRaises(NoReverseMatch):
redirect('not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_reverse_by_path_nested(self):
# Views added to urlpatterns using include() should be reversible.
from .views import nested_view
self.assertEqual(reverse(nested_view), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
with self.assertRaises(NoReverseMatch):
redirect(absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
('urlobject-view', [], {}),
('urlobject-view', [37, 42], {}),
('urlobject-view', [], {'arg1': 42, 'arg2': 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_ambiguous_urlpattern(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
('inner-nothing', [], {}),
('inner-nothing', [37, 42], {}),
('inner-nothing', [], {'arg1': 42, 'arg2': 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_non_existent_namespace(self):
"""Nonexistent namespaces raise errors."""
test_urls = [
'blahblah:urlobject-view',
'test-ns1:blahblah:urlobject-view',
]
for name in test_urls:
with self.subTest(name=name):
with self.assertRaises(NoReverseMatch):
reverse(name)
def test_normal_name(self):
"""Normal lookups work as expected."""
test_urls = [
('normal-view', [], {}, '/normal/'),
('normal-view', [37, 42], {}, '/normal/37/42/'),
('normal-view', [], {'arg1': 42, 'arg2': 37}, '/normal/42/37/'),
('special-view', [], {}, '/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_simple_included_name(self):
"""Normal lookups work on names included from other patterns."""
test_urls = [
('included_namespace_urls:inc-normal-view', [], {}, '/included/normal/'),
('included_namespace_urls:inc-normal-view', [37, 42], {}, '/included/normal/37/42/'),
('included_namespace_urls:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/included/normal/42/37/'),
('included_namespace_urls:inc-special-view', [], {}, '/included/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_object(self):
"""Dynamic URL objects can be found using a namespace."""
test_urls = [
('test-ns1:urlobject-view', [], {}, '/test1/inner/'),
('test-ns1:urlobject-view', [37, 42], {}, '/test1/inner/37/42/'),
('test-ns1:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/test1/inner/42/37/'),
('test-ns1:urlobject-special-view', [], {}, '/test1/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object(self):
"""
Dynamic URL objects can return a (pattern, app_name) 2-tuple, and
include() can set the namespace.
"""
test_urls = [
('new-ns1:urlobject-view', [], {}, '/newapp1/inner/'),
('new-ns1:urlobject-view', [37, 42], {}, '/newapp1/inner/37/42/'),
('new-ns1:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/newapp1/inner/42/37/'),
('new-ns1:urlobject-special-view', [], {}, '/newapp1/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object_default_namespace(self):
"""
Namespace defaults to app_name when including a (pattern, app_name)
2-tuple.
"""
test_urls = [
('newapp:urlobject-view', [], {}, '/new-default/inner/'),
('newapp:urlobject-view', [37, 42], {}, '/new-default/inner/37/42/'),
('newapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/new-default/inner/42/37/'),
('newapp:urlobject-special-view', [], {}, '/new-default/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_embedded_namespace_object(self):
"""Namespaces can be installed anywhere in the URL pattern tree."""
test_urls = [
('included_namespace_urls:test-ns3:urlobject-view', [], {}, '/included/test3/inner/'),
('included_namespace_urls:test-ns3:urlobject-view', [37, 42], {}, '/included/test3/inner/37/42/'),
(
'included_namespace_urls:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/included/test3/inner/42/37/',
),
('included_namespace_urls:test-ns3:urlobject-special-view', [], {}, '/included/test3/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern(self):
"""Namespaces can be applied to include()'d urlpatterns."""
test_urls = [
('inc-ns1:inc-normal-view', [], {}, '/ns-included1/normal/'),
('inc-ns1:inc-normal-view', [37, 42], {}, '/ns-included1/normal/37/42/'),
('inc-ns1:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/ns-included1/normal/42/37/'),
('inc-ns1:inc-special-view', [], {}, '/ns-included1/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_name_pattern(self):
"""
Namespaces can be applied to include()'d urlpatterns that set an
app_name attribute.
"""
test_urls = [
('app-ns1:inc-normal-view', [], {}, '/app-included1/normal/'),
('app-ns1:inc-normal-view', [37, 42], {}, '/app-included1/normal/37/42/'),
('app-ns1:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/app-included1/normal/42/37/'),
('app-ns1:inc-special-view', [], {}, '/app-included1/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern_with_variable_prefix(self):
"""
Using include() with namespaces when there is a regex variable in front
of it.
"""
test_urls = [
('inc-outer:inc-normal-view', [], {'outer': 42}, '/ns-outer/42/normal/'),
('inc-outer:inc-normal-view', [42], {}, '/ns-outer/42/normal/'),
('inc-outer:inc-normal-view', [], {'arg1': 37, 'arg2': 4, 'outer': 42}, '/ns-outer/42/normal/37/4/'),
('inc-outer:inc-normal-view', [42, 37, 4], {}, '/ns-outer/42/normal/37/4/'),
('inc-outer:inc-special-view', [], {'outer': 42}, '/ns-outer/42/+%5C$*/'),
('inc-outer:inc-special-view', [42], {}, '/ns-outer/42/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_multiple_namespace_pattern(self):
"""Namespaces can be embedded."""
test_urls = [
('inc-ns1:test-ns3:urlobject-view', [], {}, '/ns-included1/test3/inner/'),
('inc-ns1:test-ns3:urlobject-view', [37, 42], {}, '/ns-included1/test3/inner/37/42/'),
(
'inc-ns1:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/ns-included1/test3/inner/42/37/',
),
('inc-ns1:test-ns3:urlobject-special-view', [], {}, '/ns-included1/test3/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_namespace_pattern(self):
"""Namespaces can be nested."""
test_urls = [
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [37, 42], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/37/42/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/ns-included1/ns-included4/ns-included1/test3/inner/42/37/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view', [], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/',
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object(self):
"""A default application namespace can be used for lookup."""
test_urls = [
('testapp:urlobject-view', [], {}, '/default/inner/'),
('testapp:urlobject-view', [37, 42], {}, '/default/inner/37/42/'),
('testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/default/inner/42/37/'),
('testapp:urlobject-special-view', [], {}, '/default/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object_with_default(self):
"""A default application namespace is sensitive to the current app."""
test_urls = [
('testapp:urlobject-view', [], {}, 'test-ns3', '/default/inner/'),
('testapp:urlobject-view', [37, 42], {}, 'test-ns3', '/default/inner/37/42/'),
('testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'test-ns3', '/default/inner/42/37/'),
('testapp:urlobject-special-view', [], {}, 'test-ns3', '/default/inner/+%5C$*/'),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_app_lookup_object_without_default(self):
"""
An application namespace without a default is sensitive to the current
app.
"""
test_urls = [
('nodefault:urlobject-view', [], {}, None, '/other2/inner/'),
('nodefault:urlobject-view', [37, 42], {}, None, '/other2/inner/37/42/'),
('nodefault:urlobject-view', [], {'arg1': 42, 'arg2': 37}, None, '/other2/inner/42/37/'),
('nodefault:urlobject-special-view', [], {}, None, '/other2/inner/+%5C$*/'),
('nodefault:urlobject-view', [], {}, 'other-ns1', '/other1/inner/'),
('nodefault:urlobject-view', [37, 42], {}, 'other-ns1', '/other1/inner/37/42/'),
('nodefault:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'other-ns1', '/other1/inner/42/37/'),
('nodefault:urlobject-special-view', [], {}, 'other-ns1', '/other1/inner/+%5C$*/'),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_special_chars_namespace(self):
test_urls = [
('special:included_namespace_urls:inc-normal-view', [], {}, '/+%5C$*/included/normal/'),
('special:included_namespace_urls:inc-normal-view', [37, 42], {}, '/+%5C$*/included/normal/37/42/'),
(
'special:included_namespace_urls:inc-normal-view', [], {'arg1': 42, 'arg2': 37},
'/+%5C$*/included/normal/42/37/',
),
('special:included_namespace_urls:inc-special-view', [], {}, '/+%5C$*/included/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespaces_with_variables(self):
"""Namespace prefixes can capture variables."""
test_urls = [
('inc-ns5:inner-nothing', [], {'outer': '70'}, '/inc70/'),
('inc-ns5:inner-extra', [], {'extra': 'foobar', 'outer': '78'}, '/inc78/extra/foobar/'),
('inc-ns5:inner-nothing', ['70'], {}, '/inc70/'),
('inc-ns5:inner-extra', ['78', 'foobar'], {}, '/inc78/extra/foobar/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_app_lookup(self):
"""
A nested current_app should be split in individual namespaces (#24904).
"""
test_urls = [
('inc-ns1:testapp:urlobject-view', [], {}, None, '/ns-included1/test4/inner/'),
('inc-ns1:testapp:urlobject-view', [37, 42], {}, None, '/ns-included1/test4/inner/37/42/'),
('inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, None, '/ns-included1/test4/inner/42/37/'),
('inc-ns1:testapp:urlobject-special-view', [], {}, None, '/ns-included1/test4/inner/+%5C$*/'),
('inc-ns1:testapp:urlobject-view', [], {}, 'inc-ns1:test-ns3', '/ns-included1/test3/inner/'),
('inc-ns1:testapp:urlobject-view', [37, 42], {}, 'inc-ns1:test-ns3', '/ns-included1/test3/inner/37/42/'),
(
'inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'inc-ns1:test-ns3',
'/ns-included1/test3/inner/42/37/',
),
(
'inc-ns1:testapp:urlobject-special-view', [], {}, 'inc-ns1:test-ns3',
'/ns-included1/test3/inner/+%5C$*/',
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_current_app_no_partial_match(self):
"""current_app shouldn't be used unless it matches the whole path."""
test_urls = [
('inc-ns1:testapp:urlobject-view', [], {}, 'nonexistent:test-ns3', '/ns-included1/test4/inner/'),
(
'inc-ns1:testapp:urlobject-view', [37, 42], {}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/37/42/',
),
(
'inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/42/37/',
),
(
'inc-ns1:testapp:urlobject-special-view', [], {}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/+%5C$*/',
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
"""
Overriding request.urlconf with None will fall back to the default
URLconf.
"""
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
def test_urlconf_is_reset_after_request(self):
"""The URLconf is reset after each request."""
self.assertIsNone(get_urlconf())
with override_settings(MIDDLEWARE=['%s.ChangeURLconfMiddleware' % middleware.__name__]):
self.client.get(reverse('inner'))
self.assertIsNone(get_urlconf())
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = URLResolver(RegexPattern(r'^$'), urlconf)
self.callable_resolver = URLResolver(RegexPattern(r'^$'), urlconf_callables)
def test_named_handlers(self):
handler = (empty_view, {})
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(self.resolver.resolve_error_handler(code), handler)
def test_callable_handlers(self):
handler = (empty_view, {})
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(self.callable_resolver.resolve_error_handler(code), handler)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_handlers')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
with self.assertRaisesMessage(ValueError, "I don't think I'm getting good"):
self.client.get('/bad_view/')
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if ROOT_URLCONF is None"""
def test_no_handler_exception(self):
msg = (
"The included URLconf 'None' does not appear to have any patterns "
"in it. If you see valid patterns in the file then the issue is "
"probably caused by a circular import."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get('/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
def test_urlpattern_resolve(self):
for path_, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
with self.subTest(path=path_):
# Legacy support for extracting "function, args, kwargs".
match_func, match_args, match_kwargs = resolve(path_)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# ResolverMatch capabilities.
match = resolve(path_)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
def test_repr(self):
self.assertEqual(
repr(resolve('/no_kwargs/42/37/')),
"ResolverMatch(func=urlpatterns_reverse.views.empty_view, "
"args=('42', '37'), kwargs={}, url_name=no-kwargs, app_names=[], "
"namespaces=[], route=^no_kwargs/([0-9]+)/([0-9]+)/$)",
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_noncallable_view(self):
# View is not a callable (explicit import; arbitrary Python object)
with self.assertRaisesMessage(TypeError, 'view must be a callable'):
path('uncallable-object/', views.uncallable)
def test_invalid_regex(self):
# Regex contains an error (refs #6170)
msg = '(regex_error/$" is not a valid regular expression'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse(views.empty_view)
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
self.assertEqual(get_callable(empty_view), empty_view)
def test_view_does_not_exist(self):
msg = "View does not exist in module urlpatterns_reverse.views."
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
def test_attributeerror_not_hidden(self):
msg = 'I am here to confuse django.urls.get_callable'
with self.assertRaisesMessage(AttributeError, msg):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
def test_non_string_value(self):
msg = "'1' is not a callable or a dot-notation path"
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable(1)
def test_string_without_dot(self):
msg = "Could not import 'test'. The path must be fully qualified."
with self.assertRaisesMessage(ImportError, msg):
get_callable('test')
def test_module_does_not_exist(self):
with self.assertRaisesMessage(ImportError, "No module named 'foo'"):
get_callable('foo.bar')
def test_parent_module_does_not_exist(self):
msg = 'Parent module urlpatterns_reverse.foo does not exist.'
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.foo.bar')
def test_not_callable(self):
msg = (
"Could not import 'urlpatterns_reverse.tests.resolve_test_data'. "
"View is not callable."
)
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.tests.resolve_test_data')
class IncludeTests(SimpleTestCase):
url_patterns = [
path('inner/', views.empty_view, name='urlobject-view'),
re_path(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
re_path(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
]
app_urls = URLObject('inc-app')
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
def test_include_namespace(self):
msg = (
'Specifying a namespace in include() without providing an '
'app_name is not supported.'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include(self.url_patterns, 'namespace')
def test_include_4_tuple(self):
msg = 'Passing a 4-tuple to include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace', 'blah'))
def test_include_3_tuple(self):
msg = 'Passing a 3-tuple to include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'))
def test_include_3_tuple_namespace(self):
msg = 'Cannot override the namespace for a dynamic module that provides a namespace.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'), 'namespace')
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, 'app_name')),
(self.url_patterns, 'app_name', 'app_name')
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, 'app_name'), namespace='namespace'),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_app_name(self):
self.assertEqual(
include(self.app_urls),
(self.app_urls, 'inc-app', 'inc-app')
)
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, 'namespace'),
(self.app_urls, 'inc-app', 'namespace')
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
with self.subTest(url=test_url):
self.assertEqual(resolve(test_url).kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.subTest(url=test_url):
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
test_urls = [
('lookahead-positive', {'city': 'a-city'}, '/lookahead+/a-city/'),
('lookahead-negative', {'city': 'a-city'}, '/lookahead-/a-city/'),
('lookbehind-positive', {'city': 'a-city'}, '/lookbehind+/a-city/'),
('lookbehind-negative', {'city': 'a-city'}, '/lookbehind-/a-city/'),
]
for name, kwargs, expected in test_urls:
with self.subTest(name=name, kwargs=kwargs):
self.assertEqual(reverse(name, kwargs=kwargs), expected)
def test_invalid_reverse(self):
test_urls = [
('lookahead-positive', {'city': 'other-city'}),
('lookahead-negative', {'city': 'not-a-city'}),
('lookbehind-positive', {'city': 'other-city'}),
('lookbehind-negative', {'city': 'not-a-city'}),
]
for name, kwargs in test_urls:
with self.subTest(name=name, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, kwargs=kwargs)
|
e97d2a76f1c80054947a3515dbcde37b37b1536a66733b3434f4c61be4803739 | # Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import tempfile
import threading
import time
import unittest
from unittest import mock
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
override_settings,
)
from django.test.signals import setting_changed
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers,
)
from django.views.decorators.cache import cache_control, cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
KEY_ERRORS_WITH_MEMCACHED_MSG = (
'Cache key contains characters that will cause errors if used with '
'memcached: %r'
)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Nonexistent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_get_many_invalid_key(self):
with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'):
cache.get_many(['key with spaces'])
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_touch(self):
"""Dummy cache can't do touch()."""
self.assertIs(cache.touch('whatever'), False)
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.assertEqual(cache.set_many({'a': 1, 'b': 2}), [])
self.assertEqual(cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1'), [])
def test_set_many_invalid_key(self):
with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'):
cache.set_many({'key with spaces': 'foo'})
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_delete_many_invalid_key(self):
with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'):
cache.delete_many({'key with spaces': 'foo'})
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertEqual(cache.get_or_set('mykey', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
self.assertEqual(cache.get_many(iter(['a', 'b', 'e'])), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_touch(self):
# cache.touch() updates the timeout.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1', timeout=4), True)
time.sleep(2)
self.assertTrue(cache.has_key('expire1'))
time.sleep(3)
self.assertFalse(cache.has_key('expire1'))
# cache.touch() works without the timeout argument.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1'), True)
time.sleep(2)
self.assertTrue(cache.has_key('expire1'))
self.assertIs(cache.touch('nonexistent'), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_returns_empty_list_on_success(self):
"""set_many() returns an empty list when all keys are inserted."""
failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(failing_keys, [])
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set_many({'key1': 'spam', 'key2': 'eggs', 'key3': 'ham'})
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
cache.set('key5', 'belgian fries', timeout=1)
cache.touch('key5', timeout=None)
time.sleep(2)
self.assertEqual(cache.get('key5'), 'belgian fries')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
cache.set('key5', 'belgian fries', timeout=5)
cache.touch('key5', timeout=0)
self.assertIsNone(cache.get('key5'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with self.assertWarnsMessage(CacheKeyWarning, expected_warning):
cache.set(key, 'value')
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_callable_returning_none(self):
self.assertIsNone(cache.get_or_set('mykey', lambda: None))
# Previous get_or_set() doesn't store None in the cache.
self.assertEqual(cache.get('mykey', 'default'), 'default')
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super().setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super().tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_get_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2})
cache.set('expired', 'expired', 0.01)
with self.assertNumQueries(1):
self.assertEqual(cache.get_many(['a', 'b']), {'a': 1, 'b': 2})
time.sleep(0.02)
with self.assertNumQueries(2):
self.assertEqual(cache.get_many(['a', 'b', 'expired']), {'a': 1, 'b': 2})
def test_delete_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2, 'c': 3})
with self.assertNumQueries(1):
cache.delete_many(['a', 'b', 'c'])
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
databases = {'default', 'other'}
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable', database='default', verbosity=0)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable', database='other', verbosity=0)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
self.locked = self.cache._lock.locked()
return {}
limit_locmem_entries = override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
OPTIONS={'MAX_ENTRIES': 9},
))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super().setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
@limit_locmem_entries
def test_lru_get(self):
"""get() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
@limit_locmem_entries
def test_lru_set(self):
"""set() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(3, 9):
cache.set(key, key, timeout=None)
cache.set(9, 9, timeout=None)
for key in range(3, 10):
self.assertEqual(cache.get(key), key)
for key in range(3):
self.assertIsNone(cache.get(key))
@limit_locmem_entries
def test_lru_incr(self):
"""incr() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
cache.incr(key)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key + 1)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache')
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
with self.subTest(location=location):
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def test_invalid_key_characters(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
# when using the ascii protocol.
with self.assertRaises(Exception):
cache.set('key with spaces', 'value')
def test_invalid_key_length(self):
# memcached limits key length to 250
with self.assertRaises(Exception):
cache.set('a' * 251, 'value')
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Some clients (e.g. pylibmc) raise when the value is too large,
# while others (e.g. python-memcached) intentionally return True
# indicating success. This test is primarily checking that the key
# was deleted, so the return/exception behavior for the set()
# itself is not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
def test_set_many_returns_failing_keys(self):
def fail_set_multi(mapping, *args, **kwargs):
return mapping.keys()
with mock.patch('%s.Client.set_multi' % self.client_library_name, side_effect=fail_set_multi):
failing_keys = cache.set_many({'key': 'value'})
self.assertEqual(failing_keys, ['key'])
@unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
))
class MemcachedCacheTests(BaseMemcachedTests, TestCase):
base_params = MemcachedCache_params
client_library_name = 'memcache'
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key in settings.CACHES:
with self.subTest(cache_key=cache_key):
self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'server_max_value_length': 9999},
))
def test_memcached_options(self):
self.assertEqual(cache._cache.server_max_value_length, 9999)
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
client_library_name = 'pylibmc'
# libmemcached manages its own connections.
should_disconnect_on_close = False
# By default, pylibmc/libmemcached don't verify keys client-side and so
# this test triggers a server-side bug that causes later tests to fail
# (#19914). The `verify_keys` behavior option could be set to True (which
# would avoid triggering the server-side bug), however this test would
# still fail due to https://github.com/lericson/pylibmc/issues/219.
@unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail")
def test_invalid_key_characters(self):
pass
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super().setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super().tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
self.assertTrue(os.path.exists(self.dirname))
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
def test_get_does_not_ignore_non_filenotfound_exceptions(self):
with mock.patch('builtins.open', side_effect=OSError):
with self.assertRaises(OSError):
cache.get('foo')
def test_empty_cache_file_considered_expired(self):
cache_file = cache._key_to_file('foo')
with open(cache_file, 'wb') as fh:
fh.write(b'')
with open(cache_file, 'rb') as fh:
self.assertIs(cache._is_expired(fh), True)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
host = 'www.example.com'
path = '/cache/test/'
factory = RequestFactory(HTTP_HOST=host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
with self.subTest(initial_cc=initial_cc, newheaders=newheaders):
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
factory = RequestFactory()
def setUp(self):
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super().tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_cached_control_private_not_cached(self):
"""Responses with 'Cache-Control: private' are not cached."""
view_with_private_cache = cache_page(3)(cache_control(private=True)(hello_world_view))
request = self.factory.get('/view/')
response = view_with_private_cache(request, '1')
self.assertEqual(response.content, b'Hello World 1')
response = view_with_private_cache(request, '2')
self.assertEqual(response.content, b'Hello World 2')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
8c680a02b9edd74dc712ae08c0976cdc424fe94a24fb5f99578dfc24c9dc17da | import os
from datetime import datetime
from django.test import SimpleTestCase
from django.utils.functional import lazystr
from django.utils.html import (
conditional_escape, escape, escapejs, format_html, html_safe, json_script,
linebreaks, smart_urlquote, strip_spaces_between_tags, strip_tags, urlize,
)
from django.utils.safestring import mark_safe
class TestUtilsHtml(SimpleTestCase):
def check_output(self, function, value, output=None):
"""
function(value) equals output. If output is None, function(value)
equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
items = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
with self.subTest(value=value, output=output):
for pattern in patterns:
with self.subTest(value=value, output=output, pattern=pattern):
self.check_output(escape, pattern % value, pattern % output)
self.check_output(escape, lazystr(pattern % value), pattern % output)
# Check repeated values.
self.check_output(escape, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(escape, '<&', '<&')
def test_format_html(self):
self.assertEqual(
format_html(
"{} {} {third} {fourth}",
"< Dangerous >",
mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=mark_safe("<i>safe again</i>"),
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br>sub1<br>sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br>sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(linebreaks, value, output)
self.check_output(linebreaks, lazystr(value), output)
def test_strip_tags(self):
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
# caused infinite loop on Pythons not patched with
# https://bugs.python.org/issue20288
('&gotcha&#;<>', '&gotcha&#;<>'),
('<sc<!-- -->ript>test<<!-- -->/script>', 'ript>test'),
('<script>alert()</script>&h', 'alert()h'),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(strip_tags, value, output)
self.check_output(strip_tags, lazystr(value), output)
def test_strip_tags_files(self):
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
with self.subTest(filename=filename):
path = os.path.join(os.path.dirname(__file__), 'files', filename)
with open(path) as fp:
content = fp.read()
start = datetime.now()
stripped = strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
with self.subTest(value=value):
self.check_output(strip_spaces_between_tags, value)
self.check_output(strip_spaces_between_tags, lazystr(value))
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(strip_spaces_between_tags, value, output)
self.check_output(strip_spaces_between_tags, lazystr(value), output)
def test_escapejs(self):
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
(
'and lots of whitespace: \r\n\t\v\f\b',
'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'
),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
(
'paragraph separator:\u2029and line separator:\u2028',
'paragraph separator:\\u2029and line separator:\\u2028'
),
('`', '\\u0060'),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(escapejs, value, output)
self.check_output(escapejs, lazystr(value), output)
def test_json_script(self):
tests = (
# "<", ">" and "&" are quoted inside JSON strings
(('&<>', '<script id="test_id" type="application/json">"\\u0026\\u003C\\u003E"</script>')),
# "<", ">" and "&" are quoted inside JSON objects
(
{'a': '<script>test&ing</script>'},
'<script id="test_id" type="application/json">'
'{"a": "\\u003Cscript\\u003Etest\\u0026ing\\u003C/script\\u003E"}</script>'
),
# Lazy strings are quoted
(lazystr('&<>'), '<script id="test_id" type="application/json">"\\u0026\\u003C\\u003E"</script>'),
(
{'a': lazystr('<script>test&ing</script>')},
'<script id="test_id" type="application/json">'
'{"a": "\\u003Cscript\\u003Etest\\u0026ing\\u003C/script\\u003E"}</script>'
),
)
for arg, expected in tests:
with self.subTest(arg=arg):
self.assertEqual(json_script(arg, 'test_id'), expected)
def test_smart_urlquote(self):
items = (
('http://öäü.com/', 'http://xn--4ca9at.com/'),
('http://öäü.com/öäü/', 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/'),
# Everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered
# safe as per RFC.
('http://example.com/path/öäü/', 'http://example.com/path/%C3%B6%C3%A4%C3%BC/'),
('http://example.com/%C3%B6/ä/', 'http://example.com/%C3%B6/%C3%A4/'),
('http://example.com/?x=1&y=2+3&z=', 'http://example.com/?x=1&y=2+3&z='),
('http://example.com/?x=<>"\'', 'http://example.com/?x=%3C%3E%22%27'),
('http://example.com/?q=http://example.com/?x=1%26q=django',
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
('http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango',
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
('http://.www.f oo.bar/', 'http://.www.f%20oo.bar/'),
)
# IDNs are properly quoted
for value, output in items:
with self.subTest(value=value, output=output):
self.assertEqual(smart_urlquote(value), output)
def test_conditional_escape(self):
s = '<h1>interop</h1>'
self.assertEqual(conditional_escape(s), '<h1>interop</h1>')
self.assertEqual(conditional_escape(mark_safe(s)), s)
self.assertEqual(conditional_escape(lazystr(mark_safe(s))), s)
def test_html_safe(self):
@html_safe
class HtmlClass:
def __str__(self):
return "<h1>I'm a html class!</h1>"
html_obj = HtmlClass()
self.assertTrue(hasattr(HtmlClass, '__html__'))
self.assertTrue(hasattr(html_obj, '__html__'))
self.assertEqual(str(html_obj), html_obj.__html__())
def test_html_safe_subclass(self):
class BaseClass:
def __html__(self):
# defines __html__ on its own
return 'some html content'
def __str__(self):
return 'some non html content'
@html_safe
class Subclass(BaseClass):
def __str__(self):
# overrides __str__ and is marked as html_safe
return 'some html safe content'
subclass_obj = Subclass()
self.assertEqual(str(subclass_obj), subclass_obj.__html__())
def test_html_safe_defines_html_error(self):
msg = "can't apply @html_safe to HtmlClass because it defines __html__()."
with self.assertRaisesMessage(ValueError, msg):
@html_safe
class HtmlClass:
def __html__(self):
return "<h1>I'm a html class!</h1>"
def test_html_safe_doesnt_define_str(self):
msg = "can't apply @html_safe to HtmlClass because it doesn't define __str__()."
with self.assertRaisesMessage(ValueError, msg):
@html_safe
class HtmlClass:
pass
def test_urlize(self):
tests = (
(
'Search for google.com/?q=! and see.',
'Search for <a href="http://google.com/?q=">google.com/?q=</a>! and see.'
),
(
lazystr('Search for google.com/?q=!'),
'Search for <a href="http://google.com/?q=">google.com/?q=</a>!'
),
('[email protected]', '<a href="mailto:[email protected]">[email protected]</a>'),
)
for value, output in tests:
with self.subTest(value=value):
self.assertEqual(urlize(value), output)
def test_urlize_unchanged_inputs(self):
tests = (
('a' + '@a' * 50000) + 'a', # simple_email_re catastrophic test
('a' + '.' * 1000000) + 'a', # trailing_punctuation catastrophic test
'foo@',
'@foo.com',
'[email protected]',
'foo@localhost',
'foo@localhost.',
)
for value in tests:
with self.subTest(value=value):
self.assertEqual(urlize(value), value)
|
45007d1d9ba9fa9fc921dcdaaf06ed7be0a73b31ceb6aa0ed4870a67c9a21851 | from django.test import SimpleTestCase
from django.utils.functional import cached_property, lazy
class FunctionalTests(SimpleTestCase):
def test_lazy(self):
t = lazy(lambda: tuple(range(3)), list, tuple)
for a, b in zip(t(), range(3)):
self.assertEqual(a, b)
def test_lazy_base_class(self):
"""lazy also finds base class methods in the proxy object"""
class Base:
def base_method(self):
pass
class Klazz(Base):
pass
t = lazy(lambda: Klazz(), Klazz)()
self.assertIn('base_method', dir(t))
def test_lazy_base_class_override(self):
"""lazy finds the correct (overridden) method implementation"""
class Base:
def method(self):
return 'Base'
class Klazz(Base):
def method(self):
return 'Klazz'
t = lazy(lambda: Klazz(), Base)()
self.assertEqual(t.method(), 'Klazz')
def test_lazy_object_to_string(self):
class Klazz:
def __str__(self):
return "Î am ā Ǩlâzz."
def __bytes__(self):
return b"\xc3\x8e am \xc4\x81 binary \xc7\xa8l\xc3\xa2zz."
t = lazy(lambda: Klazz(), Klazz)()
self.assertEqual(str(t), "Î am ā Ǩlâzz.")
self.assertEqual(bytes(t), b"\xc3\x8e am \xc4\x81 binary \xc7\xa8l\xc3\xa2zz.")
def assertCachedPropertyWorks(self, attr, Class):
with self.subTest(attr=attr):
def get(source):
return getattr(source, attr)
obj = Class()
class SubClass(Class):
pass
subobj = SubClass()
# Docstring is preserved.
self.assertEqual(get(Class).__doc__, 'Here is the docstring...')
self.assertEqual(get(SubClass).__doc__, 'Here is the docstring...')
# It's cached.
self.assertEqual(get(obj), get(obj))
self.assertEqual(get(subobj), get(subobj))
# The correct value is returned.
self.assertEqual(get(obj)[0], 1)
self.assertEqual(get(subobj)[0], 1)
# State isn't shared between instances.
obj2 = Class()
subobj2 = SubClass()
self.assertNotEqual(get(obj), get(obj2))
self.assertNotEqual(get(subobj), get(subobj2))
# It behaves like a property when there's no instance.
self.assertIsInstance(get(Class), cached_property)
self.assertIsInstance(get(SubClass), cached_property)
# 'other_value' doesn't become a property.
self.assertTrue(callable(obj.other_value))
self.assertTrue(callable(subobj.other_value))
def test_cached_property(self):
"""cached_property caches its value and behaves like a property."""
class Class:
@cached_property
def value(self):
"""Here is the docstring..."""
return 1, object()
@cached_property
def __foo__(self):
"""Here is the docstring..."""
return 1, object()
def other_value(self):
"""Here is the docstring..."""
return 1, object()
other = cached_property(other_value, name='other')
attrs = ['value', 'other', '__foo__']
for attr in attrs:
self.assertCachedPropertyWorks(attr, Class)
def test_cached_property_auto_name(self):
"""
cached_property caches its value and behaves like a property
on mangled methods or when the name kwarg isn't set.
"""
class Class:
@cached_property
def __value(self):
"""Here is the docstring..."""
return 1, object()
def other_value(self):
"""Here is the docstring..."""
return 1, object()
other = cached_property(other_value)
other2 = cached_property(other_value, name='different_name')
attrs = ['_Class__value', 'other']
for attr in attrs:
self.assertCachedPropertyWorks(attr, Class)
# An explicit name is ignored.
obj = Class()
obj.other2
self.assertFalse(hasattr(obj, 'different_name'))
def test_cached_property_reuse_different_names(self):
"""Disallow this case because the decorated function wouldn't be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError(
"Cannot assign the same cached_property to two different "
"names ('a' and 'b')."
))
)
def test_cached_property_reuse_same_name(self):
"""
Reusing a cached_property on different classes under the same name is
allowed.
"""
counter = 0
@cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_cached_property_set_name_not_called(self):
cp = cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
msg = 'Cannot use cached_property instance without calling __set_name__() on it.'
with self.assertRaisesMessage(TypeError, msg):
Foo().cp
def test_lazy_equality(self):
"""
== and != work correctly for Promises.
"""
lazy_a = lazy(lambda: 4, int)
lazy_b = lazy(lambda: 4, int)
lazy_c = lazy(lambda: 5, int)
self.assertEqual(lazy_a(), lazy_b())
self.assertNotEqual(lazy_b(), lazy_c())
def test_lazy_repr_text(self):
original_object = 'Lazy translation text'
lazy_obj = lazy(lambda: original_object, str)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_repr_int(self):
original_object = 15
lazy_obj = lazy(lambda: original_object, int)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_repr_bytes(self):
original_object = b'J\xc3\xbcst a str\xc3\xadng'
lazy_obj = lazy(lambda: original_object, bytes)
self.assertEqual(repr(original_object), repr(lazy_obj()))
|
8813cafdfd9bc680bf338873c9eb9f4df41f30f3ae5cce24dcf67614a499f461 | import contextlib
import os
import py_compile
import shutil
import sys
import tempfile
import threading
import time
import weakref
import zipfile
from importlib import import_module
from pathlib import Path
from unittest import mock, skip
from django.apps.registry import Apps
from django.test import SimpleTestCase
from django.test.utils import extend_sys_path
from django.utils import autoreload
from django.utils.autoreload import WatchmanUnavailable
class TestIterModulesAndFiles(SimpleTestCase):
def import_and_cleanup(self, name):
import_module(name)
self.addCleanup(lambda: sys.path_importer_cache.clear())
self.addCleanup(lambda: sys.modules.pop(name, None))
def clear_autoreload_caches(self):
autoreload.iter_modules_and_files.cache_clear()
def assertFileFound(self, filename):
# Some temp directories are symlinks. Python resolves these fully while
# importing.
resolved_filename = filename.resolve()
self.clear_autoreload_caches()
# Test uncached access
self.assertIn(resolved_filename, list(autoreload.iter_all_python_module_files()))
# Test cached access
self.assertIn(resolved_filename, list(autoreload.iter_all_python_module_files()))
self.assertEqual(autoreload.iter_modules_and_files.cache_info().hits, 1)
def assertFileNotFound(self, filename):
resolved_filename = filename.resolve()
self.clear_autoreload_caches()
# Test uncached access
self.assertNotIn(resolved_filename, list(autoreload.iter_all_python_module_files()))
# Test cached access
self.assertNotIn(resolved_filename, list(autoreload.iter_all_python_module_files()))
self.assertEqual(autoreload.iter_modules_and_files.cache_info().hits, 1)
def temporary_file(self, filename):
dirname = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, dirname)
return Path(dirname) / filename
def test_paths_are_pathlib_instances(self):
for filename in autoreload.iter_all_python_module_files():
self.assertIsInstance(filename, Path)
def test_file_added(self):
"""
When a file is added, it's returned by iter_all_python_module_files().
"""
filename = self.temporary_file('test_deleted_removed_module.py')
filename.touch()
with extend_sys_path(str(filename.parent)):
self.import_and_cleanup('test_deleted_removed_module')
self.assertFileFound(filename.absolute())
def test_check_errors(self):
"""
When a file containing an error is imported in a function wrapped by
check_errors(), gen_filenames() returns it.
"""
filename = self.temporary_file('test_syntax_error.py')
filename.write_text("Ceci n'est pas du Python.")
with extend_sys_path(str(filename.parent)):
with self.assertRaises(SyntaxError):
autoreload.check_errors(import_module)('test_syntax_error')
self.assertFileFound(filename)
def test_check_errors_catches_all_exceptions(self):
"""
Since Python may raise arbitrary exceptions when importing code,
check_errors() must catch Exception, not just some subclasses.
"""
filename = self.temporary_file('test_exception.py')
filename.write_text('raise Exception')
with extend_sys_path(str(filename.parent)):
with self.assertRaises(Exception):
autoreload.check_errors(import_module)('test_exception')
self.assertFileFound(filename)
def test_zip_reload(self):
"""
Modules imported from zipped files have their archive location included
in the result.
"""
zip_file = self.temporary_file('zip_import.zip')
with zipfile.ZipFile(str(zip_file), 'w', zipfile.ZIP_DEFLATED) as zipf:
zipf.writestr('test_zipped_file.py', '')
with extend_sys_path(str(zip_file)):
self.import_and_cleanup('test_zipped_file')
self.assertFileFound(zip_file)
def test_bytecode_conversion_to_source(self):
""".pyc and .pyo files are included in the files list."""
filename = self.temporary_file('test_compiled.py')
filename.touch()
compiled_file = Path(py_compile.compile(str(filename), str(filename.with_suffix('.pyc'))))
filename.unlink()
with extend_sys_path(str(compiled_file.parent)):
self.import_and_cleanup('test_compiled')
self.assertFileFound(compiled_file)
def test_weakref_in_sys_module(self):
"""iter_all_python_module_file() ignores weakref modules."""
time_proxy = weakref.proxy(time)
sys.modules['time_proxy'] = time_proxy
self.addCleanup(lambda: sys.modules.pop('time_proxy', None))
list(autoreload.iter_all_python_module_files()) # No crash.
class TestCommonRoots(SimpleTestCase):
def test_common_roots(self):
paths = (
Path('/first/second'),
Path('/first/second/third'),
Path('/first/'),
Path('/root/first/'),
)
results = autoreload.common_roots(paths)
self.assertCountEqual(results, [Path('/first/'), Path('/root/first/')])
class TestSysPathDirectories(SimpleTestCase):
def setUp(self):
self._directory = tempfile.TemporaryDirectory()
self.directory = Path(self._directory.name).resolve().absolute()
self.file = self.directory / 'test'
self.file.touch()
def tearDown(self):
self._directory.cleanup()
def test_sys_paths_with_directories(self):
with extend_sys_path(str(self.file)):
paths = list(autoreload.sys_path_directories())
self.assertIn(self.file.parent, paths)
def test_sys_paths_non_existing(self):
nonexistant_file = Path(self.directory.name) / 'does_not_exist'
with extend_sys_path(str(nonexistant_file)):
paths = list(autoreload.sys_path_directories())
self.assertNotIn(nonexistant_file, paths)
self.assertNotIn(nonexistant_file.parent, paths)
def test_sys_paths_absolute(self):
paths = list(autoreload.sys_path_directories())
self.assertTrue(all(p.is_absolute() for p in paths))
def test_sys_paths_directories(self):
with extend_sys_path(str(self.directory)):
paths = list(autoreload.sys_path_directories())
self.assertIn(self.directory, paths)
class GetReloaderTests(SimpleTestCase):
@mock.patch('django.utils.autoreload.WatchmanReloader')
def test_watchman_unavailable(self, mocked_watchman):
mocked_watchman.check_availability.side_effect = WatchmanUnavailable
self.assertIsInstance(autoreload.get_reloader(), autoreload.StatReloader)
@mock.patch.object(autoreload.WatchmanReloader, 'check_availability')
def test_watchman_available(self, mocked_available):
# If WatchmanUnavailable isn't raised, Watchman will be chosen.
mocked_available.return_value = None
result = autoreload.get_reloader()
self.assertIsInstance(result, autoreload.WatchmanReloader)
class RunWithReloaderTests(SimpleTestCase):
@mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'true'})
@mock.patch('django.utils.autoreload.get_reloader')
def test_swallows_keyboard_interrupt(self, mocked_get_reloader):
mocked_get_reloader.side_effect = KeyboardInterrupt()
autoreload.run_with_reloader(lambda: None) # No exception
@mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'false'})
@mock.patch('django.utils.autoreload.restart_with_reloader')
def test_calls_sys_exit(self, mocked_restart_reloader):
mocked_restart_reloader.return_value = 1
with self.assertRaises(SystemExit) as exc:
autoreload.run_with_reloader(lambda: None)
self.assertEqual(exc.exception.code, 1)
@mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'true'})
@mock.patch('django.utils.autoreload.start_django')
@mock.patch('django.utils.autoreload.get_reloader')
def test_calls_start_django(self, mocked_reloader, mocked_start_django):
mocked_reloader.return_value = mock.sentinel.RELOADER
autoreload.run_with_reloader(mock.sentinel.METHOD)
self.assertEqual(mocked_start_django.call_count, 1)
self.assertSequenceEqual(
mocked_start_django.call_args[0],
[mock.sentinel.RELOADER, mock.sentinel.METHOD]
)
class StartDjangoTests(SimpleTestCase):
@mock.patch('django.utils.autoreload.StatReloader')
def test_watchman_becomes_unavailable(self, mocked_stat):
mocked_stat.should_stop.return_value = True
fake_reloader = mock.MagicMock()
fake_reloader.should_stop = False
fake_reloader.run.side_effect = autoreload.WatchmanUnavailable()
autoreload.start_django(fake_reloader, lambda: None)
self.assertEqual(mocked_stat.call_count, 1)
@mock.patch('django.utils.autoreload.ensure_echo_on')
def test_echo_on_called(self, mocked_echo):
fake_reloader = mock.MagicMock()
autoreload.start_django(fake_reloader, lambda: None)
self.assertEqual(mocked_echo.call_count, 1)
@mock.patch('django.utils.autoreload.check_errors')
def test_check_errors_called(self, mocked_check_errors):
fake_method = mock.MagicMock(return_value=None)
fake_reloader = mock.MagicMock()
autoreload.start_django(fake_reloader, fake_method)
self.assertCountEqual(mocked_check_errors.call_args[0], [fake_method])
@mock.patch('threading.Thread')
@mock.patch('django.utils.autoreload.check_errors')
def test_starts_thread_with_args(self, mocked_check_errors, mocked_thread):
fake_reloader = mock.MagicMock()
fake_main_func = mock.MagicMock()
fake_thread = mock.MagicMock()
mocked_check_errors.return_value = fake_main_func
mocked_thread.return_value = fake_thread
autoreload.start_django(fake_reloader, fake_main_func, 123, abc=123)
self.assertEqual(mocked_thread.call_count, 1)
self.assertEqual(
mocked_thread.call_args[1],
{'target': fake_main_func, 'args': (123,), 'kwargs': {'abc': 123}}
)
self.assertSequenceEqual(fake_thread.setDaemon.call_args[0], [True])
self.assertTrue(fake_thread.start.called)
class TestCheckErrors(SimpleTestCase):
def test_mutates_error_files(self):
fake_method = mock.MagicMock(side_effect=RuntimeError())
wrapped = autoreload.check_errors(fake_method)
with mock.patch.object(autoreload, '_error_files') as mocked_error_files:
with self.assertRaises(RuntimeError):
wrapped()
self.assertEqual(mocked_error_files.append.call_count, 1)
class TestRaiseLastException(SimpleTestCase):
@mock.patch('django.utils.autoreload._exception', None)
def test_no_exception(self):
# Should raise no exception if _exception is None
autoreload.raise_last_exception()
def test_raises_exception(self):
class MyException(Exception):
pass
# Create an exception
try:
raise MyException('Test Message')
except MyException:
exc_info = sys.exc_info()
with mock.patch('django.utils.autoreload._exception', exc_info):
with self.assertRaises(MyException, msg='Test Message'):
autoreload.raise_last_exception()
class RestartWithReloaderTests(SimpleTestCase):
executable = '/usr/bin/python'
def patch_autoreload(self, argv):
patch_call = mock.patch('django.utils.autoreload.subprocess.call', return_value=0)
patches = [
mock.patch('django.utils.autoreload.sys.argv', argv),
mock.patch('django.utils.autoreload.sys.executable', self.executable),
mock.patch('django.utils.autoreload.sys.warnoptions', ['all']),
]
for p in patches:
p.start()
self.addCleanup(p.stop)
mock_call = patch_call.start()
self.addCleanup(patch_call.stop)
return mock_call
def test_manage_py(self):
argv = ['./manage.py', 'runserver']
mock_call = self.patch_autoreload(argv)
autoreload.restart_with_reloader()
self.assertEqual(mock_call.call_count, 1)
self.assertEqual(mock_call.call_args[0][0], [self.executable, '-Wall'] + argv)
def test_python_m_django(self):
main = '/usr/lib/pythonX.Y/site-packages/django/__main__.py'
argv = [main, 'runserver']
mock_call = self.patch_autoreload(argv)
with mock.patch('django.__main__.__file__', main):
autoreload.restart_with_reloader()
self.assertEqual(mock_call.call_count, 1)
self.assertEqual(mock_call.call_args[0][0], [self.executable, '-Wall', '-m', 'django'] + argv[1:])
class ReloaderTests(SimpleTestCase):
RELOADER_CLS = None
def setUp(self):
self._tempdir = tempfile.TemporaryDirectory()
self.tempdir = Path(self._tempdir.name).resolve().absolute()
self.existing_file = self.ensure_file(self.tempdir / 'test.py')
self.nonexistant_file = (self.tempdir / 'does_not_exist.py').absolute()
self.reloader = self.RELOADER_CLS()
def tearDown(self):
self._tempdir.cleanup()
self.reloader.stop()
def ensure_file(self, path):
path.parent.mkdir(exist_ok=True, parents=True)
path.touch()
# On Linux and Windows updating the mtime of a file using touch() will set a timestamp
# value that is in the past, as the time value for the last kernel tick is used rather
# than getting the correct absolute time.
# To make testing simpler set the mtime to be the observed time when this function is
# called.
self.set_mtime(path, time.time())
return path.absolute()
def set_mtime(self, fp, value):
os.utime(str(fp), (value, value))
def increment_mtime(self, fp, by=1):
current_time = time.time()
self.set_mtime(fp, current_time + by)
@contextlib.contextmanager
def tick_twice(self):
ticker = self.reloader.tick()
next(ticker)
yield
next(ticker)
class IntegrationTests:
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_file(self, mocked_modules, notify_mock):
self.reloader.watch_file(self.existing_file)
with self.tick_twice():
self.increment_mtime(self.existing_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [self.existing_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_nonexistant_file(self, mocked_modules, notify_mock):
self.reloader.watch_file(self.nonexistant_file)
with self.tick_twice():
self.ensure_file(self.nonexistant_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [self.nonexistant_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_nonexistant_file_in_non_existing_directory(self, mocked_modules, notify_mock):
non_existing_directory = self.tempdir / 'non_existing_dir'
nonexistant_file = non_existing_directory / 'test'
self.reloader.watch_file(nonexistant_file)
with self.tick_twice():
self.ensure_file(nonexistant_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [nonexistant_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_glob(self, mocked_modules, notify_mock):
non_py_file = self.ensure_file(self.tempdir / 'non_py_file')
self.reloader.watch_dir(self.tempdir, '*.py')
with self.tick_twice():
self.increment_mtime(non_py_file)
self.increment_mtime(self.existing_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [self.existing_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_glob_non_existing_directory(self, mocked_modules, notify_mock):
non_existing_directory = self.tempdir / 'does_not_exist'
nonexistant_file = non_existing_directory / 'test.py'
self.reloader.watch_dir(non_existing_directory, '*.py')
with self.tick_twice():
self.ensure_file(nonexistant_file)
self.set_mtime(nonexistant_file, time.time())
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [nonexistant_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_multiple_globs(self, mocked_modules, notify_mock):
self.ensure_file(self.tempdir / 'x.test')
self.reloader.watch_dir(self.tempdir, '*.py')
self.reloader.watch_dir(self.tempdir, '*.test')
with self.tick_twice():
self.increment_mtime(self.existing_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [self.existing_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_overlapping_globs(self, mocked_modules, notify_mock):
self.reloader.watch_dir(self.tempdir, '*.py')
self.reloader.watch_dir(self.tempdir, '*.p*')
with self.tick_twice():
self.increment_mtime(self.existing_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [self.existing_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_glob_recursive(self, mocked_modules, notify_mock):
non_py_file = self.ensure_file(self.tempdir / 'dir' / 'non_py_file')
py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py')
self.reloader.watch_dir(self.tempdir, '**/*.py')
with self.tick_twice():
self.increment_mtime(non_py_file)
self.increment_mtime(py_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [py_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_multiple_recursive_globs(self, mocked_modules, notify_mock):
non_py_file = self.ensure_file(self.tempdir / 'dir' / 'test.txt')
py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py')
self.reloader.watch_dir(self.tempdir, '**/*.txt')
self.reloader.watch_dir(self.tempdir, '**/*.py')
with self.tick_twice():
self.increment_mtime(non_py_file)
self.increment_mtime(py_file)
self.assertEqual(notify_mock.call_count, 2)
self.assertCountEqual(notify_mock.call_args_list, [mock.call(py_file), mock.call(non_py_file)])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_nested_glob_recursive(self, mocked_modules, notify_mock):
inner_py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py')
self.reloader.watch_dir(self.tempdir, '**/*.py')
self.reloader.watch_dir(inner_py_file.parent, '**/*.py')
with self.tick_twice():
self.increment_mtime(inner_py_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [inner_py_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_overlapping_glob_recursive(self, mocked_modules, notify_mock):
py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py')
self.reloader.watch_dir(self.tempdir, '**/*.p*')
self.reloader.watch_dir(self.tempdir, '**/*.py*')
with self.tick_twice():
self.increment_mtime(py_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [py_file])
class BaseReloaderTests(ReloaderTests):
RELOADER_CLS = autoreload.BaseReloader
def test_watch_without_absolute(self):
with self.assertRaisesMessage(ValueError, 'test.py must be absolute.'):
self.reloader.watch_file('test.py')
def test_watch_with_single_file(self):
self.reloader.watch_file(self.existing_file)
watched_files = list(self.reloader.watched_files())
self.assertIn(self.existing_file, watched_files)
def test_watch_with_glob(self):
self.reloader.watch_dir(self.tempdir, '*.py')
watched_files = list(self.reloader.watched_files())
self.assertIn(self.existing_file, watched_files)
def test_watch_files_with_recursive_glob(self):
inner_file = self.ensure_file(self.tempdir / 'test' / 'test.py')
self.reloader.watch_dir(self.tempdir, '**/*.py')
watched_files = list(self.reloader.watched_files())
self.assertIn(self.existing_file, watched_files)
self.assertIn(inner_file, watched_files)
def test_run_loop_catches_stopiteration(self):
def mocked_tick():
yield
with mock.patch.object(self.reloader, 'tick', side_effect=mocked_tick) as tick:
self.reloader.run_loop()
self.assertEqual(tick.call_count, 1)
def test_run_loop_stop_and_return(self):
def mocked_tick(*args):
yield
self.reloader.stop()
return # Raises StopIteration
with mock.patch.object(self.reloader, 'tick', side_effect=mocked_tick) as tick:
self.reloader.run_loop()
self.assertEqual(tick.call_count, 1)
def test_wait_for_apps_ready_checks_for_exception(self):
app_reg = Apps()
app_reg.ready_event.set()
# thread.is_alive() is False if it's not started.
dead_thread = threading.Thread()
self.assertFalse(self.reloader.wait_for_apps_ready(app_reg, dead_thread))
def test_wait_for_apps_ready_without_exception(self):
app_reg = Apps()
app_reg.ready_event.set()
thread = mock.MagicMock()
thread.is_alive.return_value = True
self.assertTrue(self.reloader.wait_for_apps_ready(app_reg, thread))
def skip_unless_watchman_available():
try:
autoreload.WatchmanReloader.check_availability()
except WatchmanUnavailable as e:
return skip('Watchman unavailable: %s' % e)
return lambda func: func
@skip_unless_watchman_available()
class WatchmanReloaderTests(ReloaderTests, IntegrationTests):
RELOADER_CLS = autoreload.WatchmanReloader
def test_watch_glob_ignores_non_existing_directories_two_levels(self):
with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe:
self.reloader._watch_glob(self.tempdir / 'does_not_exist' / 'more', ['*'])
self.assertFalse(mocked_subscribe.called)
def test_watch_glob_uses_existing_parent_directories(self):
with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe:
self.reloader._watch_glob(self.tempdir / 'does_not_exist', ['*'])
self.assertSequenceEqual(
mocked_subscribe.call_args[0],
[
self.tempdir, 'glob-parent-does_not_exist:%s' % self.tempdir,
['anyof', ['match', 'does_not_exist/*', 'wholename']]
]
)
def test_watch_glob_multiple_patterns(self):
with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe:
self.reloader._watch_glob(self.tempdir, ['*', '*.py'])
self.assertSequenceEqual(
mocked_subscribe.call_args[0],
[
self.tempdir, 'glob:%s' % self.tempdir,
['anyof', ['match', '*', 'wholename'], ['match', '*.py', 'wholename']]
]
)
def test_watched_roots_contains_files(self):
paths = self.reloader.watched_roots([self.existing_file])
self.assertIn(self.existing_file.parent, paths)
def test_watched_roots_contains_directory_globs(self):
self.reloader.watch_dir(self.tempdir, '*.py')
paths = self.reloader.watched_roots([])
self.assertIn(self.tempdir, paths)
def test_watched_roots_contains_sys_path(self):
with extend_sys_path(str(self.tempdir)):
paths = self.reloader.watched_roots([])
self.assertIn(self.tempdir, paths)
def test_check_server_status(self):
self.assertTrue(self.reloader.check_server_status())
def test_check_server_status_raises_error(self):
with mock.patch.object(self.reloader.client, 'query') as mocked_query:
mocked_query.side_effect = Exception()
with self.assertRaises(autoreload.WatchmanUnavailable):
self.reloader.check_server_status()
@mock.patch('pywatchman.client')
def test_check_availability(self, mocked_client):
mocked_client().capabilityCheck.side_effect = Exception()
with self.assertRaisesMessage(WatchmanUnavailable, 'Cannot connect to the watchman service'):
self.RELOADER_CLS.check_availability()
@mock.patch('pywatchman.client')
def test_check_availability_lower_version(self, mocked_client):
mocked_client().capabilityCheck.return_value = {'version': '4.8.10'}
with self.assertRaisesMessage(WatchmanUnavailable, 'Watchman 4.9 or later is required.'):
self.RELOADER_CLS.check_availability()
def test_pywatchman_not_available(self):
with mock.patch.object(autoreload, 'pywatchman') as mocked:
mocked.__bool__.return_value = False
with self.assertRaisesMessage(WatchmanUnavailable, 'pywatchman not installed.'):
self.RELOADER_CLS.check_availability()
def test_update_watches_raises_exceptions(self):
class TestException(Exception):
pass
with mock.patch.object(self.reloader, '_update_watches') as mocked_watches:
with mock.patch.object(self.reloader, 'check_server_status') as mocked_server_status:
mocked_watches.side_effect = TestException()
mocked_server_status.return_value = True
with self.assertRaises(TestException):
self.reloader.update_watches()
self.assertIsInstance(mocked_server_status.call_args[0][0], TestException)
class StatReloaderTests(ReloaderTests, IntegrationTests):
RELOADER_CLS = autoreload.StatReloader
def setUp(self):
super().setUp()
# Shorten the sleep time to speed up tests.
self.reloader.SLEEP_TIME = 0.01
def test_snapshot_files_ignores_missing_files(self):
with mock.patch.object(self.reloader, 'watched_files', return_value=[self.nonexistant_file]):
self.assertEqual(dict(self.reloader.snapshot_files()), {})
def test_snapshot_files_updates(self):
with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file]):
snapshot1 = dict(self.reloader.snapshot_files())
self.assertIn(self.existing_file, snapshot1)
self.increment_mtime(self.existing_file)
snapshot2 = dict(self.reloader.snapshot_files())
self.assertNotEqual(snapshot1[self.existing_file], snapshot2[self.existing_file])
def test_does_not_fire_without_changes(self):
with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file]), \
mock.patch.object(self.reloader, 'notify_file_changed') as notifier:
mtime = self.existing_file.stat().st_mtime
initial_snapshot = {self.existing_file: mtime}
second_snapshot = self.reloader.loop_files(initial_snapshot, time.time())
self.assertEqual(second_snapshot, {})
notifier.assert_not_called()
def test_fires_when_created(self):
with mock.patch.object(self.reloader, 'watched_files', return_value=[self.nonexistant_file]), \
mock.patch.object(self.reloader, 'notify_file_changed') as notifier:
self.nonexistant_file.touch()
mtime = self.nonexistant_file.stat().st_mtime
second_snapshot = self.reloader.loop_files({}, mtime - 1)
self.assertCountEqual(second_snapshot.keys(), [self.nonexistant_file])
notifier.assert_called_once_with(self.nonexistant_file)
def test_fires_with_changes(self):
with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file]), \
mock.patch.object(self.reloader, 'notify_file_changed') as notifier:
initial_snapshot = {self.existing_file: 1}
second_snapshot = self.reloader.loop_files(initial_snapshot, time.time())
notifier.assert_called_once_with(self.existing_file)
self.assertCountEqual(second_snapshot.keys(), [self.existing_file])
|
47d6be3a7ea2d80e11628f47dc91b57d5359f85d1cc24ef83f16b0ec6cbb484a | import unittest
from datetime import datetime
from django.test import SimpleTestCase, ignore_warnings
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.http import (
base36_to_int, escape_leading_slashes, http_date, int_to_base36,
is_safe_url, is_same_domain, parse_etags, parse_http_date, quote_etag,
urlencode, urlquote, urlquote_plus, urlsafe_base64_decode,
urlsafe_base64_encode, urlunquote, urlunquote_plus,
)
class URLEncodeTests(SimpleTestCase):
cannot_encode_none_msg = (
'Cannot encode None in a query string. Did you mean to pass an '
'empty string or omit the value?'
)
def test_tuples(self):
self.assertEqual(urlencode((('a', 1), ('b', 2), ('c', 3))), 'a=1&b=2&c=3')
def test_dict(self):
result = urlencode({'a': 1, 'b': 2, 'c': 3})
# Dictionaries are treated as unordered.
self.assertIn(result, [
'a=1&b=2&c=3',
'a=1&c=3&b=2',
'b=2&a=1&c=3',
'b=2&c=3&a=1',
'c=3&a=1&b=2',
'c=3&b=2&a=1',
])
def test_dict_containing_sequence_not_doseq(self):
self.assertEqual(urlencode({'a': [1, 2]}, doseq=False), 'a=%5B%271%27%2C+%272%27%5D')
def test_dict_containing_sequence_doseq(self):
self.assertEqual(urlencode({'a': [1, 2]}, doseq=True), 'a=1&a=2')
def test_dict_containing_empty_sequence_doseq(self):
self.assertEqual(urlencode({'a': []}, doseq=True), '')
def test_multivaluedict(self):
result = urlencode(MultiValueDict({
'name': ['Adrian', 'Simon'],
'position': ['Developer'],
}), doseq=True)
# MultiValueDicts are similarly unordered.
self.assertIn(result, [
'name=Adrian&name=Simon&position=Developer',
'position=Developer&name=Adrian&name=Simon',
])
def test_dict_with_bytes_values(self):
self.assertEqual(urlencode({'a': b'abc'}, doseq=True), 'a=abc')
def test_dict_with_sequence_of_bytes(self):
self.assertEqual(urlencode({'a': [b'spam', b'eggs', b'bacon']}, doseq=True), 'a=spam&a=eggs&a=bacon')
def test_dict_with_bytearray(self):
self.assertEqual(urlencode({'a': bytearray(range(2))}, doseq=True), 'a=0&a=1')
self.assertEqual(urlencode({'a': bytearray(range(2))}, doseq=False), 'a=%5B%270%27%2C+%271%27%5D')
def test_generator(self):
def gen():
yield from range(2)
self.assertEqual(urlencode({'a': gen()}, doseq=True), 'a=0&a=1')
self.assertEqual(urlencode({'a': gen()}, doseq=False), 'a=%5B%270%27%2C+%271%27%5D')
def test_none(self):
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({'a': None})
def test_none_in_sequence(self):
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({'a': [None]}, doseq=True)
def test_none_in_generator(self):
def gen():
yield None
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({'a': gen()}, doseq=True)
class Base36IntTests(SimpleTestCase):
def test_roundtrip(self):
for n in [0, 1, 1000, 1000000]:
self.assertEqual(n, base36_to_int(int_to_base36(n)))
def test_negative_input(self):
with self.assertRaisesMessage(ValueError, 'Negative base36 conversion input.'):
int_to_base36(-1)
def test_to_base36_errors(self):
for n in ['1', 'foo', {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
int_to_base36(n)
def test_invalid_literal(self):
for n in ['#', ' ']:
with self.assertRaisesMessage(ValueError, "invalid literal for int() with base 36: '%s'" % n):
base36_to_int(n)
def test_input_too_large(self):
with self.assertRaisesMessage(ValueError, 'Base36 input too large'):
base36_to_int('1' * 14)
def test_to_int_errors(self):
for n in [123, {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
base36_to_int(n)
def test_values(self):
for n, b36 in [(0, '0'), (1, '1'), (42, '16'), (818469960, 'django')]:
self.assertEqual(int_to_base36(n), b36)
self.assertEqual(base36_to_int(b36), n)
class IsSafeURLTests(unittest.TestCase):
def test_bad_urls(self):
bad_urls = (
'http://example.com',
'http:///example.com',
'https://example.com',
'ftp://example.com',
r'\\example.com',
r'\\\example.com',
r'/\\/example.com',
r'\\\example.com',
r'\\example.com',
r'\\//example.com',
r'/\/example.com',
r'\/example.com',
r'/\example.com',
'http:///example.com',
r'http:/\//example.com',
r'http:\/example.com',
r'http:/\example.com',
'javascript:alert("XSS")',
'\njavascript:alert(x)',
'\x08//example.com',
r'http://otherserver\@example.com',
r'http:\\testserver\@example.com',
r'http://testserver\me:[email protected]',
r'http://testserver\@example.com',
r'http:\\testserver\confirm\[email protected]',
'http:999999999',
'ftp:9999999999',
'\n',
'http://[2001:cdba:0000:0000:0000:0000:3257:9652/',
'http://2001:cdba:0000:0000:0000:0000:3257:9652]/',
)
for bad_url in bad_urls:
with self.subTest(url=bad_url):
self.assertIs(is_safe_url(bad_url, allowed_hosts={'testserver', 'testserver2'}), False)
def test_good_urls(self):
good_urls = (
'/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://example.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'http://testserver/[email protected]',
'/url%20with%20spaces/',
'path/http:2222222222',
)
for good_url in good_urls:
with self.subTest(url=good_url):
self.assertIs(is_safe_url(good_url, allowed_hosts={'otherserver', 'testserver'}), True)
def test_basic_auth(self):
# Valid basic auth credentials are allowed.
self.assertIs(is_safe_url(r'http://user:pass@testserver/', allowed_hosts={'user:pass@testserver'}), True)
def test_no_allowed_hosts(self):
# A path without host is allowed.
self.assertIs(is_safe_url('/confirm/[email protected]', allowed_hosts=None), True)
# Basic auth without host is not allowed.
self.assertIs(is_safe_url(r'http://testserver\@example.com', allowed_hosts=None), False)
def test_allowed_hosts_str(self):
self.assertIs(is_safe_url('http://good.com/good', allowed_hosts='good.com'), True)
self.assertIs(is_safe_url('http://good.co/evil', allowed_hosts='good.com'), False)
def test_secure_param_https_urls(self):
secure_urls = (
'https://example.com/p',
'HTTPS://example.com/p',
'/view/?param=http://example.com',
)
for url in secure_urls:
with self.subTest(url=url):
self.assertIs(is_safe_url(url, allowed_hosts={'example.com'}, require_https=True), True)
def test_secure_param_non_https_urls(self):
insecure_urls = (
'http://example.com/p',
'ftp://example.com/p',
'//example.com/p',
)
for url in insecure_urls:
with self.subTest(url=url):
self.assertIs(is_safe_url(url, allowed_hosts={'example.com'}, require_https=True), False)
class URLSafeBase64Tests(unittest.TestCase):
def test_roundtrip(self):
bytestring = b'foo'
encoded = urlsafe_base64_encode(bytestring)
decoded = urlsafe_base64_decode(encoded)
self.assertEqual(bytestring, decoded)
@ignore_warnings(category=RemovedInDjango40Warning)
class URLQuoteTests(unittest.TestCase):
def test_quote(self):
self.assertEqual(urlquote('Paris & Orl\xe9ans'), 'Paris%20%26%20Orl%C3%A9ans')
self.assertEqual(urlquote('Paris & Orl\xe9ans', safe="&"), 'Paris%20&%20Orl%C3%A9ans')
def test_unquote(self):
self.assertEqual(urlunquote('Paris%20%26%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(urlunquote('Paris%20&%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
def test_quote_plus(self):
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans'), 'Paris+%26+Orl%C3%A9ans')
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans', safe="&"), 'Paris+&+Orl%C3%A9ans')
def test_unquote_plus(self):
self.assertEqual(urlunquote_plus('Paris+%26+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(urlunquote_plus('Paris+&+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
class IsSameDomainTests(unittest.TestCase):
def test_good(self):
for pair in (
('example.com', 'example.com'),
('example.com', '.example.com'),
('foo.example.com', '.example.com'),
('example.com:8888', 'example.com:8888'),
('example.com:8888', '.example.com:8888'),
('foo.example.com:8888', '.example.com:8888'),
):
self.assertIs(is_same_domain(*pair), True)
def test_bad(self):
for pair in (
('example2.com', 'example.com'),
('foo.example.com', 'example.com'),
('example.com:9999', 'example.com:8888'),
('foo.example.com:8888', ''),
):
self.assertIs(is_same_domain(*pair), False)
class ETagProcessingTests(unittest.TestCase):
def test_parsing(self):
self.assertEqual(
parse_etags(r'"" , "etag", "e\\tag", W/"weak"'),
['""', '"etag"', r'"e\\tag"', 'W/"weak"']
)
self.assertEqual(parse_etags('*'), ['*'])
# Ignore RFC 2616 ETags that are invalid according to RFC 7232.
self.assertEqual(parse_etags(r'"etag", "e\"t\"ag"'), ['"etag"'])
def test_quoting(self):
self.assertEqual(quote_etag('etag'), '"etag"') # unquoted
self.assertEqual(quote_etag('"etag"'), '"etag"') # quoted
self.assertEqual(quote_etag('W/"etag"'), 'W/"etag"') # quoted, weak
class HttpDateProcessingTests(unittest.TestCase):
def test_http_date(self):
t = 1167616461.0
self.assertEqual(http_date(t), 'Mon, 01 Jan 2007 01:54:21 GMT')
def test_parsing_rfc1123(self):
parsed = parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_rfc850(self):
parsed = parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_asctime(self):
parsed = parse_http_date('Sun Nov 6 08:49:37 1994')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_year_less_than_70(self):
parsed = parse_http_date('Sun Nov 6 08:49:37 0050')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(2050, 11, 6, 8, 49, 37))
class EscapeLeadingSlashesTests(unittest.TestCase):
def test(self):
tests = (
('//example.com', '/%2Fexample.com'),
('//', '/%2F'),
)
for url, expected in tests:
with self.subTest(url=url):
self.assertEqual(escape_leading_slashes(url), expected)
|
7580f5b4b45054bd500949d84e7385e00aae99f92ef4489178b43b0e83468f4e | import json
import sys
from django.test import SimpleTestCase
from django.utils import text
from django.utils.functional import lazystr
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy, override
IS_WIDE_BUILD = (len('\U0001F4A9') == 1)
class TestUtilsText(SimpleTestCase):
def test_get_text_list(self):
self.assertEqual(text.get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d')
self.assertEqual(text.get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c')
self.assertEqual(text.get_text_list(['a', 'b'], 'and'), 'a and b')
self.assertEqual(text.get_text_list(['a']), 'a')
self.assertEqual(text.get_text_list([]), '')
with override('ar'):
self.assertEqual(text.get_text_list(['a', 'b', 'c']), "a، b أو c")
def test_smart_split(self):
testdata = [
('This is "a person" test.',
['This', 'is', '"a person"', 'test.']),
('This is "a person\'s" test.',
['This', 'is', '"a person\'s"', 'test.']),
('This is "a person\\"s" test.',
['This', 'is', '"a person\\"s"', 'test.']),
('"a \'one',
['"a', "'one"]),
('all friends\' tests',
['all', 'friends\'', 'tests']),
('url search_page words="something else"',
['url', 'search_page', 'words="something else"']),
("url search_page words='something else'",
['url', 'search_page', "words='something else'"]),
('url search_page words "something else"',
['url', 'search_page', 'words', '"something else"']),
('url search_page words-"something else"',
['url', 'search_page', 'words-"something else"']),
('url search_page words=hello',
['url', 'search_page', 'words=hello']),
('url search_page words="something else',
['url', 'search_page', 'words="something', 'else']),
("cut:','|cut:' '",
["cut:','|cut:' '"]),
(lazystr("a b c d"), # Test for #20231
['a', 'b', 'c', 'd']),
]
for test, expected in testdata:
self.assertEqual(list(text.smart_split(test)), expected)
def test_truncate_chars(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.', truncator.chars(100)),
self.assertEqual('The quick brown fox …', truncator.chars(21)),
self.assertEqual('The quick brown fo.....', truncator.chars(23, '.....')),
self.assertEqual('.....', truncator.chars(4, '.....')),
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
self.assertEqual('oüoüoüoü', nfc.chars(8))
self.assertEqual('oüoüoüoü', nfd.chars(8))
self.assertEqual('oü…', nfc.chars(3))
self.assertEqual('oü…', nfd.chars(3))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator('-B\u030AB\u030A----8')
self.assertEqual('-B\u030A…', truncator.chars(3))
self.assertEqual('-B\u030AB\u030A-…', truncator.chars(5))
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator('-----')
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual('…', text.Truncator('asdf').chars(0))
# lazy strings are handled correctly
self.assertEqual(text.Truncator(lazystr('The quick brown fox')).chars(10), 'The quick…')
def test_truncate_words(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.', truncator.words(10))
self.assertEqual('The quick brown fox…', truncator.words(4))
self.assertEqual('The quick brown fox[snip]', truncator.words(4, '[snip]'))
# lazy strings are handled correctly
truncator = text.Truncator(lazystr('The quick brown fox jumped over the lazy dog.'))
self.assertEqual('The quick brown fox…', truncator.words(4))
def test_truncate_html_words(self):
truncator = text.Truncator(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>'
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>',
truncator.words(10, html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox…</em></strong></p>',
truncator.words(4, html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox....</em></strong></p>',
truncator.words(4, '....', html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox</em></strong></p>',
truncator.words(4, '', html=True)
)
# Test with new line inside tag
truncator = text.Truncator(
'<p>The quick <a href="xyz.html"\n id="mylink">brown fox</a> jumped over the lazy dog.</p>'
)
self.assertEqual(
'<p>The quick <a href="xyz.html"\n id="mylink">brown…</a></p>',
truncator.words(3, html=True)
)
# Test self-closing tags
truncator = text.Truncator('<br/>The <hr />quick brown fox jumped over the lazy dog.')
self.assertEqual('<br/>The <hr />quick brown…', truncator.words(3, html=True))
truncator = text.Truncator('<br>The <hr/>quick <em>brown fox</em> jumped over the lazy dog.')
self.assertEqual('<br>The <hr/>quick <em>brown…</em>', truncator.words(3, html=True))
# Test html entities
truncator = text.Truncator('<i>Buenos días! ¿Cómo está?</i>')
self.assertEqual('<i>Buenos días! ¿Cómo…</i>', truncator.words(3, html=True))
truncator = text.Truncator('<p>I <3 python, what about you?</p>')
self.assertEqual('<p>I <3 python…</p>', truncator.words(3, html=True))
re_tag_catastrophic_test = ('</a' + '\t' * 50000) + '//>'
truncator = text.Truncator(re_tag_catastrophic_test)
self.assertEqual(re_tag_catastrophic_test, truncator.words(500, html=True))
def test_wrap(self):
digits = '1234 67 9'
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
self.assertEqual(text.wrap('short\na long line', 7), 'short\na long\nline')
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8), 'do-not-break-long-words\nplease?\nok')
long_word = 'l%sng' % ('o' * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(text.wrap('a %s word' % long_word, 10), 'a\n%s\nword' % long_word)
self.assertEqual(text.wrap(lazystr(digits), 100), '1234 67 9')
def test_normalize_newlines(self):
self.assertEqual(text.normalize_newlines("abc\ndef\rghi\r\n"), "abc\ndef\nghi\n")
self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n")
self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi")
self.assertEqual(text.normalize_newlines(""), "")
self.assertEqual(text.normalize_newlines(lazystr("abc\ndef\rghi\r\n")), "abc\ndef\nghi\n")
def test_phone2numeric(self):
numeric = text.phone2numeric('0800 flowers')
self.assertEqual(numeric, '0800 3569377')
lazy_numeric = lazystr(text.phone2numeric('0800 flowers'))
self.assertEqual(lazy_numeric, '0800 3569377')
def test_slugify(self):
items = (
# given - expected - unicode?
('Hello, World!', 'hello-world', False),
('spam & eggs', 'spam-eggs', False),
('spam & ıçüş', 'spam-ıçüş', True),
('foo ıç bar', 'foo-ıç-bar', True),
(' foo ıç bar', 'foo-ıç-bar', True),
('你好', '你好', True),
)
for value, output, is_unicode in items:
self.assertEqual(text.slugify(value, allow_unicode=is_unicode), output)
# interning the result may be useful, e.g. when fed to Path.
self.assertEqual(sys.intern(text.slugify('a')), 'a')
def test_unescape_entities(self):
items = [
('', ''),
('foo', 'foo'),
('&', '&'),
('&am;', '&am;'),
('&', '&'),
('&#xk;', '&#xk;'),
('&', '&'),
('foo & bar', 'foo & bar'),
('foo & bar', 'foo & bar'),
]
for value, output in items:
self.assertEqual(text.unescape_entities(value), output)
self.assertEqual(text.unescape_entities(lazystr(value)), output)
def test_unescape_string_literal(self):
items = [
('"abc"', 'abc'),
("'abc'", 'abc'),
('"a \"bc\""', 'a "bc"'),
("'\'ab\' c'", "'ab' c"),
]
for value, output in items:
self.assertEqual(text.unescape_string_literal(value), output)
self.assertEqual(text.unescape_string_literal(lazystr(value)), output)
def test_get_valid_filename(self):
filename = "^&'@{}[],$=!-#()%+~_123.txt"
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
self.assertEqual(text.get_valid_filename(lazystr(filename)), "-_123.txt")
def test_compress_sequence(self):
data = [{'key': i} for i in range(10)]
seq = list(json.JSONEncoder().iterencode(data))
seq = [s.encode() for s in seq]
actual_length = len(b''.join(seq))
out = text.compress_sequence(seq)
compressed_length = len(b''.join(out))
self.assertTrue(compressed_length < actual_length)
def test_format_lazy(self):
self.assertEqual('django/test', format_lazy('{}/{}', 'django', lazystr('test')))
self.assertEqual('django/test', format_lazy('{0}/{1}', *('django', 'test')))
self.assertEqual('django/test', format_lazy('{a}/{b}', **{'a': 'django', 'b': 'test'}))
self.assertEqual('django/test', format_lazy('{a[0]}/{a[1]}', a=('django', 'test')))
t = {}
s = format_lazy('{0[a]}-{p[a]}', t, p=t)
t['a'] = lazystr('django')
self.assertEqual('django-django', s)
t['a'] = 'update'
self.assertEqual('update-update', s)
# The format string can be lazy. (string comes from contrib.admin)
s = format_lazy(
gettext_lazy("Added {name} \"{object}\"."),
name='article', object='My first try',
)
with override('fr'):
self.assertEqual('Ajout de article «\xa0My first try\xa0».', s)
|
929fd437ef9307511504a7bf63e0ad02e3aed26e0c682a35003554c59aad2b6f | from io import BytesIO
from itertools import chain
from urllib.parse import urlencode
from django.core.exceptions import DisallowedHost
from django.core.handlers.wsgi import LimitedStream, WSGIRequest
from django.http import HttpRequest, RawPostDataException, UnreadablePostError
from django.http.request import HttpHeaders, split_domain_port
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.client import FakePayload
class RequestsTests(SimpleTestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(list(request.GET), [])
self.assertEqual(list(request.POST), [])
self.assertEqual(list(request.COOKIES), [])
self.assertEqual(list(request.META), [])
# .GET and .POST should be QueryDicts
self.assertEqual(request.GET.urlencode(), '')
self.assertEqual(request.POST.urlencode(), '')
# and FILES should be MultiValueDict
self.assertEqual(request.FILES.getlist('foo'), [])
self.assertIsNone(request.content_type)
self.assertIsNone(request.content_params)
def test_httprequest_full_path(self):
request = HttpRequest()
request.path = '/;some/?awful/=path/foo:bar/'
request.path_info = '/prefix' + request.path
request.META['QUERY_STRING'] = ';some=query&+query=string'
expected = '/%3Bsome/%3Fawful/%3Dpath/foo:bar/?;some=query&+query=string'
self.assertEqual(request.get_full_path(), expected)
self.assertEqual(request.get_full_path_info(), '/prefix' + expected)
def test_httprequest_full_path_with_query_string_and_fragment(self):
request = HttpRequest()
request.path = '/foo#bar'
request.path_info = '/prefix' + request.path
request.META['QUERY_STRING'] = 'baz#quux'
self.assertEqual(request.get_full_path(), '/foo%23bar?baz#quux')
self.assertEqual(request.get_full_path_info(), '/prefix/foo%23bar?baz#quux')
def test_httprequest_repr(self):
request = HttpRequest()
request.path = '/somepath/'
request.method = 'GET'
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), "<HttpRequest: GET '/somepath/'>")
def test_httprequest_repr_invalid_method_and_path(self):
request = HttpRequest()
self.assertEqual(repr(request), "<HttpRequest>")
request = HttpRequest()
request.method = "GET"
self.assertEqual(repr(request), "<HttpRequest>")
request = HttpRequest()
request.path = ""
self.assertEqual(repr(request), "<HttpRequest>")
def test_wsgirequest(self):
request = WSGIRequest({
'PATH_INFO': 'bogus',
'REQUEST_METHOD': 'bogus',
'CONTENT_TYPE': 'text/html; charset=utf8',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(list(request.GET), [])
self.assertEqual(list(request.POST), [])
self.assertEqual(list(request.COOKIES), [])
self.assertEqual(
set(request.META),
{'PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'CONTENT_TYPE', 'wsgi.input'}
)
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
self.assertEqual(request.content_type, 'text/html')
self.assertEqual(request.content_params, {'charset': 'utf8'})
def test_wsgirequest_with_script_name(self):
"""
The request's path is correctly assembled, regardless of whether or
not the SCRIPT_NAME has a trailing slash (#20169).
"""
# With trailing slash
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX/',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/PREFIX/somepath/')
# Without trailing slash
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/PREFIX/somepath/')
def test_wsgirequest_script_url_double_slashes(self):
"""
WSGI squashes multiple successive slashes in PATH_INFO, WSGIRequest
should take that into account when populating request.path and
request.META['SCRIPT_NAME'] (#17133).
"""
request = WSGIRequest({
'SCRIPT_URL': '/mst/milestones//accounts/login//help',
'PATH_INFO': '/milestones/accounts/login/help',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/mst/milestones/accounts/login/help')
self.assertEqual(request.META['SCRIPT_NAME'], '/mst')
def test_wsgirequest_with_force_script_name(self):
"""
The FORCE_SCRIPT_NAME setting takes precedence over the request's
SCRIPT_NAME environment parameter (#20169).
"""
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX/',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_path_with_force_script_name_trailing_slash(self):
"""
The request's path is correctly assembled, regardless of whether or not
the FORCE_SCRIPT_NAME setting has a trailing slash (#20169).
"""
# With trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
# Without trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_repr(self):
request = WSGIRequest({'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(repr(request), "<WSGIRequest: GET '/'>")
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), "<WSGIRequest: GET '/somepath/'>")
def test_wsgirequest_path_info(self):
def wsgi_str(path_info, encoding='utf-8'):
path_info = path_info.encode(encoding) # Actual URL sent by the browser (bytestring)
path_info = path_info.decode('iso-8859-1') # Value in the WSGI environ dict (native string)
return path_info
# Regression for #19468
request = WSGIRequest({'PATH_INFO': wsgi_str("/سلام/"), 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, "/سلام/")
# The URL may be incorrectly encoded in a non-UTF-8 encoding (#26971)
request = WSGIRequest({
'PATH_INFO': wsgi_str("/café/", encoding='iso-8859-1'),
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
# Since it's impossible to decide the (wrong) encoding of the URL, it's
# left percent-encoded in the path.
self.assertEqual(request.path, "/caf%E9/")
def test_limited_stream(self):
# Read all of a limited stream
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(), b'te')
# Reading again returns nothing.
self.assertEqual(stream.read(), b'')
# Read a number of characters greater than the stream has to offer
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(5), b'te')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read sequentially from a stream
stream = LimitedStream(BytesIO(b'12345678'), 8)
self.assertEqual(stream.read(5), b'12345')
self.assertEqual(stream.read(5), b'678')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read lines from a stream
stream = LimitedStream(BytesIO(b'1234\n5678\nabcd\nefgh\nijkl'), 24)
# Read a full line, unconditionally
self.assertEqual(stream.readline(), b'1234\n')
# Read a number of characters less than a line
self.assertEqual(stream.readline(2), b'56')
# Read the rest of the partial line
self.assertEqual(stream.readline(), b'78\n')
# Read a full line, with a character limit greater than the line length
self.assertEqual(stream.readline(6), b'abcd\n')
# Read the next line, deliberately terminated at the line end
self.assertEqual(stream.readline(4), b'efgh')
# Read the next line... just the line end
self.assertEqual(stream.readline(), b'\n')
# Read everything else.
self.assertEqual(stream.readline(), b'ijkl')
# Regression for #15018
# If a stream contains a newline, but the provided length
# is less than the number of provided characters, the newline
# doesn't reset the available character count
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.readline(10), b'1234\n')
self.assertEqual(stream.readline(3), b'abc')
# Now expire the available characters
self.assertEqual(stream.readline(3), b'd')
# Reading again returns nothing.
self.assertEqual(stream.readline(2), b'')
# Same test, but with read, not readline.
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.read(6), b'1234\na')
self.assertEqual(stream.read(2), b'bc')
self.assertEqual(stream.read(2), b'd')
self.assertEqual(stream.read(2), b'')
self.assertEqual(stream.read(), b'')
def test_stream(self):
payload = FakePayload('name=value')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload},
)
self.assertEqual(request.read(), b'name=value')
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or body.
"""
payload = FakePayload('name=value')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'name': ['value']})
self.assertEqual(request.body, b'name=value')
self.assertEqual(request.read(), b'name=value')
def test_value_after_read(self):
"""
Construction of POST or body is not allowed after reading
from request.
"""
payload = FakePayload('name=value')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(request.read(2), b'na')
with self.assertRaises(RawPostDataException):
request.body
self.assertEqual(request.POST, {})
def test_non_ascii_POST(self):
payload = FakePayload(urlencode({'key': 'España'}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_alternate_charset_POST(self):
"""
Test a POST with non-utf-8 payload encoding.
"""
payload = FakePayload(urlencode({'key': 'España'.encode('latin-1')}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded; charset=iso-8859-1',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_body_after_POST_multipart_form_data(self):
"""
Reading body after parsing multipart/form-data is not allowed
"""
# Because multipart is used for large amounts of data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting body = '' either.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'name': ['value']})
with self.assertRaises(RawPostDataException):
request.body
def test_body_after_POST_multipart_related(self):
"""
Reading body after parsing multipart that isn't form-data is allowed
"""
# Ticket #9054
# There are cases in which the multipart data is related instead of
# being a binary upload, in which case it should still be accessible
# via body.
payload_data = b"\r\n".join([
b'--boundary',
b'Content-ID: id; name="name"',
b'',
b'value',
b'--boundary--'
b''])
payload = FakePayload(payload_data)
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/related; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(request.POST, {})
self.assertEqual(request.body, payload_data)
def test_POST_multipart_with_content_length_zero(self):
"""
Multipart POST requests with Content-Length >= 0 are valid and need to be handled.
"""
# According to:
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# Every request.POST with Content-Length >= 0 is a valid request,
# this test ensures that we handle Content-Length == 0.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': 0,
'wsgi.input': payload,
})
self.assertEqual(request.POST, {})
def test_POST_binary_only(self):
payload = b'\r\n\x01\x00\x00\x00ab\x00\x00\xcd\xcc,@'
environ = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/octet-stream',
'CONTENT_LENGTH': len(payload),
'wsgi.input': BytesIO(payload),
}
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
# Same test without specifying content-type
environ.update({'CONTENT_TYPE': '', 'wsgi.input': BytesIO(payload)})
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
def test_read_by_lines(self):
payload = FakePayload('name=value')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(list(request), [b'name=value'])
def test_POST_after_body_read(self):
"""
POST should be populated even if body is read first
"""
payload = FakePayload('name=value')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
request.body # evaluate
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read(self):
"""
POST should be populated even if body is read first, and then
the stream is read second.
"""
payload = FakePayload('name=value')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
request.body # evaluate
self.assertEqual(request.read(1), b'n')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read_multipart(self):
"""
POST should be populated even if body is read first, and then
the stream is read second. Using multipart/form-data instead of urlencoded.
"""
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
request.body # evaluate
# Consume enough data to mess up the parsing:
self.assertEqual(request.read(13), b'--boundary\r\nC')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_immutable_for_mutipart(self):
"""
MultiPartParser.parse() leaves request.POST immutable.
"""
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--',
]))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertFalse(request.POST._mutable)
def test_POST_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
POST, the exception is identifiable (not a generic OSError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise OSError('kaboom!')
payload = b'name=value'
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload),
})
with self.assertRaises(UnreadablePostError):
request.body
def test_set_encoding_clears_POST(self):
payload = FakePayload('name=Hello Günter')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'name': ['Hello Günter']})
request.encoding = 'iso-8859-16'
self.assertEqual(request.POST, {'name': ['Hello GĂŒnter']})
def test_set_encoding_clears_GET(self):
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'wsgi.input': '',
'QUERY_STRING': 'name=Hello%20G%C3%BCnter',
})
self.assertEqual(request.GET, {'name': ['Hello Günter']})
request.encoding = 'iso-8859-16'
self.assertEqual(request.GET, {'name': ['Hello G\u0102\u0152nter']})
def test_FILES_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
FILES, the exception is identifiable (not a generic OSError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise OSError('kaboom!')
payload = b'x'
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=foo_',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload),
})
with self.assertRaises(UnreadablePostError):
request.FILES
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_raw_uri(self):
factory = RequestFactory(HTTP_HOST='evil.com')
request = factory.get('////absolute-uri')
self.assertEqual(request.get_raw_uri(), 'http://evil.com//absolute-uri')
request = factory.get('/?foo=bar')
self.assertEqual(request.get_raw_uri(), 'http://evil.com/?foo=bar')
request = factory.get('/path/with:colons')
self.assertEqual(request.get_raw_uri(), 'http://evil.com/path/with:colons')
class HostValidationTests(SimpleTestCase):
poisoned_hosts = [
'[email protected]',
'example.com:[email protected]',
'example.com:[email protected]:80',
'example.com:80/badpath',
'example.com: recovermypassword.com',
]
@override_settings(
USE_X_FORWARDED_HOST=False,
ALLOWED_HOSTS=[
'forward.com', 'example.com', 'internal.com', '12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]', 'xn--4ca9at.com',
'.multitenant.com', 'INSENSITIVE.com', '[::ffff:169.254.169.254]',
])
def test_http_get_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is ignored.
self.assertEqual(request.get_host(), 'example.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punycode for öäü.com
'anything.multitenant.com',
'multitenant.com',
'insensitive.com',
'example.com.',
'example.com.:80',
'[::ffff:169.254.169.254]',
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
# Poisoned host headers are rejected as suspicious
for host in chain(self.poisoned_hosts, ['other.com', 'example.com..']):
with self.assertRaises(DisallowedHost):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_HOST=True, ALLOWED_HOSTS=['*'])
def test_http_get_host_with_x_forwarded_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is obeyed.
self.assertEqual(request.get_host(), 'forward.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
# Poisoned host headers are rejected as suspicious
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punycode for öäü.com
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
for host in self.poisoned_hosts:
with self.assertRaises(DisallowedHost):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_PORT=False)
def test_get_port(self):
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
'HTTP_X_FORWARDED_PORT': '80',
}
# Shouldn't use the X-Forwarded-Port header
self.assertEqual(request.get_port(), '8080')
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
}
self.assertEqual(request.get_port(), '8080')
@override_settings(USE_X_FORWARDED_PORT=True)
def test_get_port_with_x_forwarded_port(self):
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
'HTTP_X_FORWARDED_PORT': '80',
}
# Should use the X-Forwarded-Port header
self.assertEqual(request.get_port(), '80')
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
}
self.assertEqual(request.get_port(), '8080')
@override_settings(DEBUG=True, ALLOWED_HOSTS=[])
def test_host_validation_in_debug_mode(self):
"""
If ALLOWED_HOSTS is empty and DEBUG is True, variants of localhost are
allowed.
"""
valid_hosts = ['localhost', '127.0.0.1', '[::1]']
for host in valid_hosts:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertEqual(request.get_host(), host)
# Other hostnames raise a DisallowedHost.
with self.assertRaises(DisallowedHost):
request = HttpRequest()
request.META = {'HTTP_HOST': 'example.com'}
request.get_host()
@override_settings(ALLOWED_HOSTS=[])
def test_get_host_suggestion_of_allowed_host(self):
"""get_host() makes helpful suggestions if a valid-looking host is not in ALLOWED_HOSTS."""
msg_invalid_host = "Invalid HTTP_HOST header: %r."
msg_suggestion = msg_invalid_host + " You may need to add %r to ALLOWED_HOSTS."
msg_suggestion2 = msg_invalid_host + " The domain name provided is not valid according to RFC 1034/1035"
for host in [ # Valid-looking hosts
'example.com',
'12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]',
'xn--4ca9at.com', # Punycode for öäü.com
]:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
with self.assertRaisesMessage(DisallowedHost, msg_suggestion % (host, host)):
request.get_host()
for domain, port in [ # Valid-looking hosts with a port number
('example.com', 80),
('12.34.56.78', 443),
('[2001:19f0:feee::dead:beef:cafe]', 8080),
]:
host = '%s:%s' % (domain, port)
request = HttpRequest()
request.META = {'HTTP_HOST': host}
with self.assertRaisesMessage(DisallowedHost, msg_suggestion % (host, domain)):
request.get_host()
for host in self.poisoned_hosts:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
with self.assertRaisesMessage(DisallowedHost, msg_invalid_host % host):
request.get_host()
request = HttpRequest()
request.META = {'HTTP_HOST': "invalid_hostname.com"}
with self.assertRaisesMessage(DisallowedHost, msg_suggestion2 % "invalid_hostname.com"):
request.get_host()
def test_split_domain_port_removes_trailing_dot(self):
domain, port = split_domain_port('example.com.:8080')
self.assertEqual(domain, 'example.com')
self.assertEqual(port, '8080')
class BuildAbsoluteURITests(SimpleTestCase):
factory = RequestFactory()
def test_absolute_url(self):
request = HttpRequest()
url = 'https://www.example.com/asdf'
self.assertEqual(request.build_absolute_uri(location=url), url)
def test_host_retrieval(self):
request = HttpRequest()
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(
request.build_absolute_uri(location='/path/with:colons'),
'http://www.example.com/path/with:colons'
)
def test_request_path_begins_with_two_slashes(self):
# //// creates a request with a path beginning with //
request = self.factory.get('////absolute-uri')
tests = (
# location isn't provided
(None, 'http://testserver//absolute-uri'),
# An absolute URL
('http://example.com/?foo=bar', 'http://example.com/?foo=bar'),
# A schema-relative URL
('//example.com/?foo=bar', 'http://example.com/?foo=bar'),
# Relative URLs
('/foo/bar/', 'http://testserver/foo/bar/'),
('/foo/./bar/', 'http://testserver/foo/bar/'),
('/foo/../bar/', 'http://testserver/bar/'),
('///foo/bar/', 'http://testserver/foo/bar/'),
)
for location, expected_url in tests:
with self.subTest(location=location):
self.assertEqual(request.build_absolute_uri(location=location), expected_url)
class RequestHeadersTests(SimpleTestCase):
ENVIRON = {
# Non-headers are ignored.
'PATH_INFO': '/somepath/',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
# These non-HTTP prefixed headers are included.
'CONTENT_TYPE': 'text/html',
'CONTENT_LENGTH': '100',
# All HTTP-prefixed headers are included.
'HTTP_ACCEPT': '*',
'HTTP_HOST': 'example.com',
'HTTP_USER_AGENT': 'python-requests/1.2.0',
}
def test_base_request_headers(self):
request = HttpRequest()
request.META = self.ENVIRON
self.assertEqual(dict(request.headers), {
'Content-Type': 'text/html',
'Content-Length': '100',
'Accept': '*',
'Host': 'example.com',
'User-Agent': 'python-requests/1.2.0',
})
def test_wsgi_request_headers(self):
request = WSGIRequest(self.ENVIRON)
self.assertEqual(dict(request.headers), {
'Content-Type': 'text/html',
'Content-Length': '100',
'Accept': '*',
'Host': 'example.com',
'User-Agent': 'python-requests/1.2.0',
})
def test_wsgi_request_headers_getitem(self):
request = WSGIRequest(self.ENVIRON)
self.assertEqual(request.headers['User-Agent'], 'python-requests/1.2.0')
self.assertEqual(request.headers['user-agent'], 'python-requests/1.2.0')
self.assertEqual(request.headers['Content-Type'], 'text/html')
self.assertEqual(request.headers['Content-Length'], '100')
def test_wsgi_request_headers_get(self):
request = WSGIRequest(self.ENVIRON)
self.assertEqual(request.headers.get('User-Agent'), 'python-requests/1.2.0')
self.assertEqual(request.headers.get('user-agent'), 'python-requests/1.2.0')
self.assertEqual(request.headers.get('Content-Type'), 'text/html')
self.assertEqual(request.headers.get('Content-Length'), '100')
class HttpHeadersTests(SimpleTestCase):
def test_basic(self):
environ = {
'CONTENT_TYPE': 'text/html',
'CONTENT_LENGTH': '100',
'HTTP_HOST': 'example.com',
}
headers = HttpHeaders(environ)
self.assertEqual(sorted(headers), ['Content-Length', 'Content-Type', 'Host'])
self.assertEqual(headers, {
'Content-Type': 'text/html',
'Content-Length': '100',
'Host': 'example.com',
})
def test_parse_header_name(self):
tests = (
('PATH_INFO', None),
('HTTP_ACCEPT', 'Accept'),
('HTTP_USER_AGENT', 'User-Agent'),
('HTTP_X_FORWARDED_PROTO', 'X-Forwarded-Proto'),
('CONTENT_TYPE', 'Content-Type'),
('CONTENT_LENGTH', 'Content-Length'),
)
for header, expected in tests:
with self.subTest(header=header):
self.assertEqual(HttpHeaders.parse_header_name(header), expected)
|
af3ab0ea7fc3a68caae2cbb3ba101b48a4cf3cbe6d7bcc3ce7631532b3b0ed93 | import datetime
import pickle
import unittest
from operator import attrgetter
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import Count, F, Q
from django.db.models.sql.constants import LOUTER
from django.db.models.sql.where import NothingNode, WhereNode
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import (
FK1, Annotation, Article, Author, BaseA, Book, CategoryItem,
CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA,
Classroom, CommonMixedCaseForeignKeys, Company, Cover, CustomPk,
CustomPkTag, Detail, DumbCategory, Eaten, Employment, ExtraInfo, Fan, Food,
Identifier, Individual, Item, Job, JobResponsibilities, Join, LeafA, LeafB,
LoopX, LoopZ, ManagedModel, Member, MixedCaseDbColumnCategoryItem,
MixedCaseFieldCategoryItem, ModelA, ModelB, ModelC, ModelD, MyObject,
NamedCategory, Node, Note, NullableName, Number, ObjectA, ObjectB, ObjectC,
OneToOneCategory, Order, OrderItem, Page, Paragraph, Person, Plaything,
PointerA, Program, ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking,
Related, RelatedIndividual, RelatedObject, Report, ReportComment,
ReservedName, Responsibility, School, SharedConnection, SimpleCategory,
SingleObject, SpecialCategory, Staff, StaffUser, Student, Tag, Task,
Teacher, Ticket21203Child, Ticket21203Parent, Ticket23605A, Ticket23605B,
Ticket23605C, TvChef, Valid, X,
)
class Queries1Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name='t3', parent=cls.t1)
t4 = Tag.objects.create(name='t4', parent=cls.t3)
cls.t5 = Tag.objects.create(name='t5', parent=cls.t3)
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
cls.n3 = Note.objects.create(note='n3', misc='foo', id=3)
ann1 = Annotation.objects.create(name='a1', tag=cls.t1)
ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
ann2.notes.add(n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(info='e2', note=n2, value=41)
e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3)
cls.i1.tags.set([cls.t1, cls.t2])
cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=n2)
cls.i2.tags.set([cls.t1, cls.t3])
cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3)
i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3)
i4.tags.set([t4])
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
Report.objects.create(name='r2', creator=a3)
Report.objects.create(name='r3')
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
Cover.objects.create(title="first", item=i4)
Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'})
self.assertIn('v0', str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn('w0', str(qs4.query).lower())
# So, 'U0."id"' is referenced in SELECT and WHERE twice.
self.assertEqual(str(qs4.query).lower().count('u0.'), 4)
def test_ticket1050(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=True),
['<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__id__isnull=True),
['<Item: three>']
)
def test_ticket1801(self):
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i3),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
['<Author: a2>']
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)),
['<Item: one>']
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)),
[]
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)),
[]
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertQuerysetEqual(list(qs), ['<Author: a2>'])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertQuerysetEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
['<Item: two>']
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3],
['<Item: one>', '<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3],
['<Item: one>', '<Item: two>']
)
def test_tickets_2080_3592(self):
self.assertQuerysetEqual(
Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='one') | Q(name='a3')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(name='a3') | Q(item__name='one')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='three') | Q(report__name='r3')),
['<Author: a2>']
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertQuerysetEqual(
Author.objects.filter(Q(id__in=[]) | Q(id__in=[])),
[]
)
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values('creator').distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name='two').values('creator', 'name').distinct().count(),
4
)
self.assertEqual(
(
Item.objects
.exclude(name='two')
.extra(select={'foo': '%s'}, select_params=(1,))
.values('creator', 'name', 'foo')
.distinct()
.count()
),
4
)
self.assertEqual(
(
Item.objects
.exclude(name='two')
.extra(select={'foo': '%s'}, select_params=(1,))
.values('creator', 'name')
.distinct()
.count()
),
4
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values('creator', 'name').count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
self.assertQuerysetEqual(
q1,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(q2, ['<Item: one>'])
self.assertQuerysetEqual(
(q1 | q2).order_by('name'),
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>'])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertQuerysetEqual(
((q1 & q2) | q3).order_by('name'),
['<Item: four>', '<Item: one>']
)
def test_order_by_tables(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by('name').query
self.assertEqual(len([
t for t in combined_query.alias_map if combined_query.alias_refcount[t]
]), 1)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by('greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(qs.query))
qs = qs.order_by('id')
self.assertNotIn('OUTER JOIN', str(qs.query))
def test_get_clears_ordering(self):
"""
get() should clear ordering for optimization purposes.
"""
with CaptureQueriesContext(connection) as captured_queries:
Author.objects.order_by('name').get(pk=self.a1.pk)
self.assertNotIn('order by', captured_queries[0]['sql'].lower())
def test_tickets_4088_4306(self):
self.assertQuerysetEqual(
Report.objects.filter(creator=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__num=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), [])
self.assertQuerysetEqual(
Report.objects.filter(creator__id=self.a1.id),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__name='a1'),
['<Report: r1>']
)
def test_ticket4510(self):
self.assertQuerysetEqual(
Author.objects.filter(report__name='r1'),
['<Author: a1>']
)
def test_ticket7378(self):
self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>'])
def test_tickets_5324_6704(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__name='t4'),
['<Item: four>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct(),
['<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(),
['<Item: two>', '<Item: three>', '<Item: one>']
)
self.assertQuerysetEqual(
Author.objects.exclude(item__name='one').distinct().order_by('name'),
['<Author: a2>', '<Author: a3>', '<Author: a4>']
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'),
['<Item: three>']
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3))
self.assertEqual(
len([
x for x in qs.query.alias_map.values()
if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]
]),
1
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertQuerysetEqual(
Tag.objects.filter(parent__isnull=True).order_by('name'),
['<Tag: t1>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__isnull=True).order_by('name'),
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
def test_ticket2091(self):
t = Tag.objects.get(name='t4')
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[t]),
['<Item: four>']
)
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = 127
msg = 'Maximum recursion depth exceeded: too many subqueries.'
with self.assertRaisesMessage(RuntimeError, msg):
for i in range(local_recursion_limit * 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases, {
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD',
'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN',
}
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'):
Author.objects.all() & Tag.objects.all()
with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'):
Author.objects.all() | Tag.objects.all()
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4)
self.assertEqual(
Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(),
4
)
def test_ticket2400(self):
self.assertQuerysetEqual(
Author.objects.filter(item__isnull=True),
['<Author: a3>']
)
self.assertQuerysetEqual(
Tag.objects.filter(item__isnull=True),
['<Tag: t5>']
)
def test_ticket2496(self):
self.assertQuerysetEqual(
Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1],
['<Item: four>']
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'):
Note.objects.filter({'note': 'n1', 'misc': 'foo'})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertQuerysetEqual(
Item.objects.order_by('note__note', 'name'),
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertQuerysetEqual(
Author.objects.order_by('extra', '-name'),
['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>']
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertQuerysetEqual(
Cover.objects.all(),
['<Cover: first>', '<Cover: second>']
)
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertQuerysetEqual(
Item.objects.order_by('creator', 'name'),
['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>']
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=False).order_by('tags', 'id'),
['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>']
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by('name')
self.assertQuerysetEqual(
qs,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertEqual(len(qs.query.alias_map), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by('note__note', 'name')
self.assertQuerysetEqual(
qs,
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertEqual(repr(qs[0].note), '<Note: n2>')
self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>')
def test_ticket3037(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')),
['<Item: four>']
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertSequenceEqual(
Note.objects.values('misc').distinct().order_by('note', '-misc'),
[{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}]
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn('note_id', ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertSequenceEqual(ExtraInfo.objects.values('note_id'), [{'note_id': 1}, {'note_id': 2}])
# ...or use the field name.
self.assertSequenceEqual(ExtraInfo.objects.values('note'), [{'note': 1}, {'note': 2}])
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertQuerysetEqual(
Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)),
['<Author: a1>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id),
['<Author: a1>']
)
def test_ticket6981(self):
self.assertQuerysetEqual(
Tag.objects.select_related('parent').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket9926(self):
self.assertQuerysetEqual(
Tag.objects.select_related("parent", "category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.select_related('parent', "parent__category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1)
self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2)
self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2)
self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0))
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(select={'a': 1}),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(select={'a': 1}).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
name = "one"
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7155(self):
# Nullable dates
self.assertQuerysetEqual(
Item.objects.datetimes('modified', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7098(self):
# Make sure semi-deprecated ordering by related models syntax still
# works.
self.assertSequenceEqual(
Item.objects.values('note__note').order_by('queries_note.note', 'id'),
[{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}]
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertQuerysetEqual(
Tag.objects.filter(parent=self.t1, name='t3').order_by('name'),
['<Tag: t3>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
# More twisted cases, involving nested negations.
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one')),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'),
['<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'),
['<Item: four>', '<Item: one>', '<Item: three>']
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(
query2.get_compiler(qs.db).as_sql()[0],
query
)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer('name', 'creator')
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertQuerysetEqual(
self.n1.annotation_set.filter(
Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5)
),
['<Annotation: a1>']
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertQuerysetEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
['<Item: one>', '<Item: two>']
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal='m')
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal='m'), [])
self.assertQuerysetEqual(q.exclude(meal='m'), [])
self.assertQuerysetEqual(q.complex_filter({'pk': 1}), [])
self.assertQuerysetEqual(q.select_related('food'), [])
self.assertQuerysetEqual(q.annotate(Count('food')), [])
self.assertQuerysetEqual(q.order_by('meal', 'food'), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(
q.extra(select={'foo': "1"}),
[]
)
self.assertQuerysetEqual(q.reverse(), [])
q.query.low_mark = 1
with self.assertRaisesMessage(AssertionError, 'Cannot change a query once a slice has been taken'):
q.extra(select={'foo': "1"})
self.assertQuerysetEqual(q.defer('meal'), [])
self.assertQuerysetEqual(q.only('meal'), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(
len(Note.objects.order_by('extrainfo__info').distinct()),
3
)
# Pickling of QuerySets using datetimes() should work.
qs = Item.objects.datetimes('created', 'month')
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertQuerysetEqual(
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')),
['<Tag: t2>', '<Tag: t3>']
)
# Multi-valued values() and values_list() querysets should raise errors.
with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'):
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'):
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertSequenceEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{'id': 1}, {'id': 2}, {'id': 3}]
)
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')),
['<Annotation: a1>']
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
yield n_obj.pk
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertQuerysetEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name='a1')),
['<Author: a1>']
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertQuerysetEqual(
Item.objects.exclude(modified=self.time1).order_by('name'),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__name=self.t1.name),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by('parent__name')), 5)
# Empty querysets can be merged with others.
self.assertQuerysetEqual(
Note.objects.none() | Note.objects.all(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(
Note.objects.all() | Note.objects.none(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), [])
self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket9411(self):
# Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's
# sufficient that this query runs without error.
qs = Tag.objects.values_list('id', flat=True).order_by('id')
qs.query.bump_prefix(qs.query)
first = qs[0]
self.assertEqual(list(qs), list(range(first, first + 5)))
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertQuerysetEqual(
Author.objects.filter(Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name='xyz')),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(report=self.r1, name='xyz') | Q(item__note__extrainfo=self.e2)),
['<Author: a2>']
)
self.assertQuerysetEqual(
Annotation.objects.filter(Q(tag__parent=self.t1) | Q(notes__note='n1', name='a1')),
['<Annotation: a1>']
)
xx = ExtraInfo.objects.create(info='xx', note=self.n3)
self.assertQuerysetEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
['<Note: n1>', '<Note: n3>']
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len([x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias]]),
1
)
def test_ticket17429(self):
"""
Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertQuerysetEqual(
Tag.objects.all(),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
ordered=False
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4'),
[repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | Q(tags__name='t3')))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | ~Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | ~Q(tags__name='t3')))])
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))])
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))])
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertQuerysetEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertQuerysetEqual(q, ['<NamedCategory: Generic>'])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 2)
self.assertNotIn('INNER JOIN', str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>',
'<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 4)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q1 & q2
self.assertQuerysetEqual(q3, [])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
def test_ticket19672(self):
self.assertQuerysetEqual(
Report.objects.filter(Q(creator__isnull=False) & ~Q(creator__extra__value=41)),
['<Report: r1>']
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count('item'))
qs = qs.filter(~Q(extra__value=0)).order_by('name')
self.assertIn('SELECT', str(qs.query))
self.assertQuerysetEqual(
qs,
['<Author: a1>', '<Author: a2>', '<Author: a3>', '<Author: a4>']
)
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, note, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name='generic')
def test_common_mixed_case_foreign_keys(self):
"""
Valid query should be generated when fields fetched from joined tables
include FKs whose names only differ by case.
"""
c1 = SimpleCategory.objects.create(name='c1')
c2 = SimpleCategory.objects.create(name='c2')
c3 = SimpleCategory.objects.create(name='c3')
category = CategoryItem.objects.create(category=c1)
mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(CaTeGoRy=c2)
mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(category=c3)
CommonMixedCaseForeignKeys.objects.create(
category=category,
mixed_case_field_category=mixed_case_field_category,
mixed_case_db_column_category=mixed_case_db_column_category,
)
qs = CommonMixedCaseForeignKeys.objects.values(
'category',
'mixed_case_field_category',
'mixed_case_db_column_category',
'category__category',
'mixed_case_field_category__CaTeGoRy',
'mixed_case_db_column_category__category',
)
self.assertTrue(qs.first())
class Queries2Tests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=4)
Number.objects.create(num=8)
Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertQuerysetEqual(Number.objects.filter(num__lt=4), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertQuerysetEqual(
Number.objects.filter(num__gt=8, num__lt=13),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
['<Number: 8>']
)
def test_ticket12239(self):
# Custom lookups are registered to round float values correctly on gte
# and lt IntegerField queries.
self.assertQuerysetEqual(
Number.objects.filter(num__gt=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gt=12), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.0),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12.0),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), [])
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lte=11.9),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.0),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.9),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(TestCase):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_ticket8683(self):
# An error should be raised when QuerySet.datetimes() is passed the
# wrong type of field.
with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."):
Item.objects.datetimes('name', 'month')
def test_ticket22023(self):
with self.assertRaisesMessage(TypeError, "Cannot call only() after .values() or .values_list()"):
Valid.objects.values().only()
with self.assertRaisesMessage(TypeError, "Cannot call defer() after .values() or .values_list()"):
Valid.objects.values().defer()
class Queries4Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
n1 = Note.objects.create(note='n1', misc='foo')
n2 = Note.objects.create(note='n2', misc='bar')
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
cls.r2 = Report.objects.create(name='r2', creator=cls.a3)
cls.r3 = Report.objects.create(name='r3')
Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=cls.a1)
Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=cls.a3)
def test_ticket24525(self):
tag = Tag.objects.create()
anth100 = tag.note_set.create(note='ANTH', misc='100')
math101 = tag.note_set.create(note='MATH', misc='101')
s1 = tag.annotation_set.create(name='1')
s2 = tag.annotation_set.create(name='2')
s1.notes.set([math101, anth100])
s2.notes.set([math101])
result = math101.annotation_set.all() & tag.annotation_set.exclude(notes__in=[anth100])
self.assertEqual(list(result), [s2])
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
msg = 'Unsaved model instance <NamedCategory: Other> cannot be used in an ORM query.'
with self.assertRaisesMessage(ValueError, msg):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1'))
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1'))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True))
q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by()
q2 = (
Item.objects
.filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1'))
.order_by()
)
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by()
q2 = (
Item.objects.filter(Q(creator__report__name='e1')).order_by() |
Item.objects.filter(Q(creator=self.a1)).order_by()
)
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Joins having identical connections are correctly recreated in the
# rhs query, in case the query is ORed together (#18748).
Report.objects.create(name='r4', creator=self.a1)
q1 = Author.objects.filter(report__name='r5')
q2 = Author.objects.filter(report__name='r4').filter(report__name='r1')
combined = q1 | q2
self.assertEqual(str(combined.query).count('JOIN'), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, 'a1')
def test_join_reuse_order(self):
# Join aliases are reused in order. This shouldn't raise AssertionError
# because change_map contains a circular reference (#26522).
s1 = School.objects.create()
s2 = School.objects.create()
s3 = School.objects.create()
t1 = Teacher.objects.create()
otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1)
qs1 = otherteachers.filter(schools=s1).filter(schools=s2)
qs2 = otherteachers.filter(schools=s1).filter(schools=s3)
self.assertQuerysetEqual(qs1 | qs2, [])
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL.
ManagedModel.objects.create(data='mm1', tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data='mm'), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ''
else:
expected_null_charfield_repr = None
self.assertSequenceEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by("name"),
['e1', 'e2', expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertQuerysetEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
['<Report: r1>', '<Report: r2>', '<Report: r3>']
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, 'd2')
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by('name')
self.assertIn('ORDER BY', qs.query.get_compiler(qs.db).as_sql()[0])
def test_order_by_reverse_fk(self):
# It is possible to order by reverse of foreign key, although that can lead
# to duplicate results.
c1 = SimpleCategory.objects.create(name="category1")
c2 = SimpleCategory.objects.create(name="category2")
CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c1)
self.assertSequenceEqual(SimpleCategory.objects.order_by('categoryitem', 'pk'), [c1, c2, c1])
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])),
[]
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False).order_by('pk')
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True).order_by('pk')
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
class Queries5Tests(TestCase):
@classmethod
def setUpTestData(cls):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
a1 = Author.objects.create(name='a1', num=1001, extra=e1)
a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.rank1 = Ranking.objects.create(rank=2, author=a2)
Ranking.objects.create(rank=1, author=a3)
Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
self.assertQuerysetEqual(
Ranking.objects.all().order_by('rank'),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertQuerysetEqual(
Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank')
qs = Ranking.objects.extra(select={'good': sql})
self.assertEqual(
[o.good for o in qs.extra(order_by=('-good',))],
[True, False, False]
)
self.assertQuerysetEqual(
qs.extra(order_by=('-good', 'id')),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values('id', 'rank').order_by('id')
self.assertEqual(
[d['rank'] for d in dicts],
[2, 1, 3]
)
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank')
qs = Ranking.objects.extra(select={'good': sql})
dicts = qs.values().order_by('id')
for d in dicts:
del d['id']
del d['author_id']
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]]
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=['django_site'])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
author_start = Author.objects.get(name='a1')
ranking_start = Ranking.objects.get(author__name='a1')
# Make sure that the IDs from different tables don't happen to match.
self.assertQuerysetEqual(
Ranking.objects.filter(author__name='a1'),
['<Ranking: 3: a1>']
)
self.assertEqual(
Ranking.objects.filter(author__name='a1').update(rank=4636),
1
)
r = Ranking.objects.get(author__name='a1')
self.assertEqual(r.id, ranking_start.id)
self.assertEqual(r.author.id, author_start.id)
self.assertEqual(r.rank, 4636)
r.rank = 3
r.save()
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertQuerysetEqual(
Note.objects.exclude(Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q() | ~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.exclude(~Q() & ~Q()),
['<Note: n1>', '<Note: n2>']
)
def test_extra_select_literal_percent_s(self):
# Allow %%s to escape select clauses
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s'"})[0].foo,
'%s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s bar %%s'"})[0].foo,
'%s bar %s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'bar %%s'"})[0].foo,
'bar %s'
)
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by('custom'), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
Plaything.objects.create(name="p1")
self.assertQuerysetEqual(
Plaything.objects.all(),
['<Plaything: p1>']
)
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name='s')
r = RelatedObject.objects.create(single=s, f=1)
Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk')
self.assertNotIn('JOIN', str(qs.query))
qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk')
self.assertIn('INNER', str(qs.query))
qs = qs.order_by('others__single__name')
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count('LEFT'), 1)
self.assertEqual(str(qs.query).count('INNER'), 1)
self.assertQuerysetEqual(
qs,
['<Plaything: p2>']
)
class DisjunctiveFilterTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
ExtraInfo.objects.create(info='e1', note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object related to the LeafA we create.
LeafA.objects.create(data='first')
self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>'])
self.assertQuerysetEqual(
LeafA.objects.filter(Q(data='first') | Q(join__b__data='second')),
['<LeafA: first>']
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info='e2')).filter(note=self.n1),
['<ExtraInfo: e1>']
)
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(info='e2') | ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1),
['<ExtraInfo: e1>']
)
class Queries6Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name='t3', parent=cls.t1)
cls.t4 = Tag.objects.create(name='t4', parent=cls.t3)
cls.t5 = Tag.objects.create(name='t5', parent=cls.t3)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
ann1 = Annotation.objects.create(name='a1', tag=cls.t1)
ann1.notes.add(n1)
Annotation.objects.create(name='a2', tag=cls.t4)
def test_parallel_iterators(self):
# Parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), '<Tag: t1>')
self.assertEqual(repr(next(i1)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t1>')
self.assertEqual(repr(next(i2)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t3>')
self.assertEqual(repr(next(i1)), '<Tag: t3>')
qs = X.objects.all()
self.assertFalse(qs)
self.assertFalse(qs)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(
qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'),
2
)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# preemptively discovered cases).
self.assertQuerysetEqual(
PointerA.objects.filter(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
PointerA.objects.exclude(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
Tag.objects.exclude(children=None),
['<Tag: t1>', '<Tag: t3>']
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertQuerysetEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertQuerysetEqual(
Annotation.objects.exclude(tag__children__name="t2"),
['<Annotation: a2>']
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
['<Annotation: a1>']
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by('name')
self.assertIsNot(q1, q1.all())
def test_ticket_11320(self):
qs = Tag.objects.exclude(category=None).exclude(category__name='foo')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 1)
def test_distinct_ordered_sliced_subquery_aggregation(self):
self.assertEqual(Tag.objects.distinct().order_by('category__name')[:3].count(), 3)
def test_multiple_columns_with_the_same_name_slice(self):
self.assertEqual(
list(Tag.objects.order_by('name').values_list('name', 'category__name')[:2]),
[('t1', 'Generic'), ('t2', 'Generic')],
)
self.assertSequenceEqual(
Tag.objects.order_by('name').select_related('category')[:2],
[self.t1, self.t2],
)
self.assertEqual(
list(Tag.objects.order_by('-name').values_list('name', 'parent__name')[:2]),
[('t5', 't3'), ('t4', 't3')],
)
self.assertSequenceEqual(
Tag.objects.order_by('-name').select_related('parent')[:2],
[self.t5, self.t4],
)
class RawQueriesTests(TestCase):
def setUp(self):
Note.objects.create(note='n1', misc='foo', id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ['n1']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>")
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ['n1', 'foo']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>")
class GeneratorExpressionTests(SimpleTestCase):
def test_ticket10432(self):
# Using an empty generator expression as the rvalue for an "__in"
# lookup is legal.
self.assertCountEqual(Note.objects.filter(pk__in=(x for x in ())), [])
class ComparisonTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="x%Y"),
['<Item: x%y>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__istartswith="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iendswith="A_b"),
['<Item: a_b>']
)
class ExistsSql(TestCase):
def test_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql']
id, name = connection.ops.quote_name('id'), connection.ops.quote_name('name')
self.assertNotIn(id, qstr)
self.assertNotIn(name, qstr)
def test_ticket_18414(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.exists())
self.assertTrue(Article.objects.distinct().exists())
self.assertTrue(Article.objects.distinct()[1:3].exists())
self.assertFalse(Article.objects.distinct()[1:1].exists())
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_18414_distinct_on(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.distinct('name').exists())
self.assertTrue(Article.objects.distinct('name')[1:2].exists())
self.assertFalse(Article.objects.distinct('name')[2:3].exists())
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertIs(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertIs(Tag.objects.all().ordered, True)
self.assertIs(Tag.objects.all().order_by().ordered, False)
def test_explicit_ordering(self):
self.assertIs(Annotation.objects.all().order_by('id').ordered, True)
def test_empty_queryset(self):
self.assertIs(Annotation.objects.none().ordered, True)
def test_order_by_extra(self):
self.assertIs(Annotation.objects.all().extra(order_by=['id']).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count('notes'))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by('num_notes').ordered, True)
@skipUnlessDBFeature('allow_sliced_subqueries_with_in')
class SubqueryTests(TestCase):
@classmethod
def setUpTestData(cls):
NamedCategory.objects.create(id=1, name='first')
NamedCategory.objects.create(id=2, name='second')
NamedCategory.objects.create(id=3, name='third')
NamedCategory.objects.create(id=4, name='fourth')
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])
self.assertEqual(set(query.values_list('id', flat=True)), {1, 2})
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2]
self.assertEqual({x.id for x in query}, {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3]
self.assertEqual({x.id for x in query}, {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:]
self.assertEqual({x.id for x in query}, {2})
def test_related_sliced_subquery(self):
"""
Related objects constraints can safely contain sliced subqueries.
refs #22434
"""
generic = NamedCategory.objects.create(id=5, name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', category=generic)
ManagedModel.objects.create(data='mm1', tag=t1, public=True)
mm2 = ManagedModel.objects.create(data='mm2', tag=t2, public=True)
query = ManagedModel.normal_manager.filter(
tag__in=Tag.objects.order_by('-id')[:1]
)
self.assertEqual({x.id for x in query}, {mm2.id})
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 2, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {3})
def test_distinct_ordered_sliced_subquery(self):
# Implicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by('name')[0:2],
).order_by('name').values_list('name', flat=True), ['first', 'fourth']
)
# Explicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by('-name').values('id')[0:2],
).order_by('name').values_list('name', flat=True), ['second', 'third']
)
# Annotated value.
self.assertSequenceEqual(
DumbCategory.objects.filter(
id__in=DumbCategory.objects.annotate(
double_id=F('id') * 2
).order_by('id').distinct().values('double_id')[0:2],
).order_by('id').values_list('id', flat=True), [2, 4]
)
@skipUnlessDBFeature('allow_sliced_subqueries_with_in')
class QuerySetBitwiseOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
school = School.objects.create()
cls.room_1 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 1')
cls.room_2 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 2')
cls.room_3 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 3')
cls.room_4 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 4')
def test_or_with_rhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)
qs2 = Classroom.objects.filter(has_blackboard=False)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3])
def test_or_with_lhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)[:1]
qs2 = Classroom.objects.filter(has_blackboard=False)
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4])
def test_or_with_both_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=False)[:1]
qs2 = Classroom.objects.filter(has_blackboard=True)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2])
def test_or_with_both_slice_and_ordering(self):
qs1 = Classroom.objects.filter(has_blackboard=False).order_by('-pk')[:1]
qs2 = Classroom.objects.filter(has_blackboard=True).order_by('-name')[:1]
self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4])
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"#13227 -- If a queryset is already evaluated, it can still be used as a query arg"
n = Note(note='Test1', misc='misc')
n.save()
e = ExtraInfo(info='good', note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good')
def test_no_model_options_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.")
try:
Note.objects.filter(pk__lte=F('pk') + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned")
try:
Note.objects.filter(note=F('misc')).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(SimpleTestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertCountEqual(Number.objects.none().values('num').order_by('num'), [])
def test_values_subquery(self):
self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values('pk')), [])
self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values_list('pk')), [])
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertCountEqual(q.values(), [])
self.assertCountEqual(q.values_list(), [])
class ValuesQuerysetTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=72)
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(1, 2))
qs = qs.order_by('value_minus_x')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'})
qs = qs.order_by('value_minus_one').order_by('value_plus_one')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(select={
'value_plus_one': 'num+1',
'value_minus_one': 'num-1',
'constant_value': '1'
})
qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'},
order_by=['value_minus_one'],
)
qs = qs.values('num')
def test_extra_select_params_values_order_in_extra(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={'value_plus_x': 'num+%s'},
select_params=[1],
order_by=['value_plus_x'],
)
qs = qs.filter(num=72)
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_multiple_select_params_values_order_by(self):
# testing for 23259 issue
qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(72, 72))
qs = qs.order_by('value_minus_x')
qs = qs.filter(num=1)
qs = qs.values('num')
self.assertSequenceEqual(qs, [])
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num')
self.assertSequenceEqual(qs, [(72,)])
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num', flat=True)
self.assertSequenceEqual(qs, [72])
def test_field_error_values_list(self):
# see #23443
msg = "Cannot resolve keyword %r into field. Join on 'name' not permitted." % 'foo'
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.values_list('name__foo')
def test_named_values_list_flat(self):
msg = "'flat' and 'named' can't be used together."
with self.assertRaisesMessage(TypeError, msg):
Number.objects.values_list('num', flat=True, named=True)
def test_named_values_list_bad_field_name(self):
msg = "Type names and field names must be valid identifiers: '1'"
with self.assertRaisesMessage(ValueError, msg):
Number.objects.extra(select={'1': 'num+1'}).values_list('1', named=True).first()
def test_named_values_list_with_fields(self):
qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id'))
values = qs.values_list('num', 'num2', named=True).first()
self.assertEqual(type(values).__name__, 'Row')
self.assertEqual(values._fields, ('num', 'num2'))
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
def test_named_values_list_without_fields(self):
qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id'))
values = qs.values_list(named=True).first()
self.assertEqual(type(values).__name__, 'Row')
self.assertEqual(values._fields, ('num2', 'id', 'num', 'id__count'))
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
self.assertEqual(values.id__count, 1)
def test_named_values_list_expression_with_default_alias(self):
expr = Count('id')
values = Number.objects.annotate(id__count1=expr).values_list(expr, 'id__count1', named=True).first()
self.assertEqual(values._fields, ('id__count2', 'id__count1'))
def test_named_values_list_expression(self):
expr = F('num') + 1
qs = Number.objects.annotate(combinedexpression1=expr).values_list(expr, 'combinedexpression1', named=True)
values = qs.first()
self.assertEqual(values._fields, ('combinedexpression2', 'combinedexpression1'))
class QuerySetSupportsPythonIdioms(TestCase):
@classmethod
def setUpTestData(cls):
some_date = datetime.datetime(2014, 5, 16, 12, 1)
for i in range(1, 8):
Article.objects.create(
name="Article {}".format(i), created=some_date)
def get_ordered_articles(self):
return Article.objects.all().order_by('name')
def test_can_get_items_using_index_and_slice_notation(self):
self.assertEqual(self.get_ordered_articles()[0].name, 'Article 1')
self.assertQuerysetEqual(
self.get_ordered_articles()[1:3],
["<Article: Article 2>", "<Article: Article 3>"]
)
def test_slicing_with_steps_can_be_used(self):
self.assertQuerysetEqual(
self.get_ordered_articles()[::2], [
"<Article: Article 1>",
"<Article: Article 3>",
"<Article: Article 5>",
"<Article: Article 7>"
]
)
def test_slicing_without_step_is_lazy(self):
with self.assertNumQueries(0):
self.get_ordered_articles()[0:5]
def test_slicing_with_tests_is_not_lazy(self):
with self.assertNumQueries(1):
self.get_ordered_articles()[0:5:3]
def test_slicing_can_slice_again_after_slicing(self):
self.assertQuerysetEqual(
self.get_ordered_articles()[0:5][0:2],
["<Article: Article 1>", "<Article: Article 2>"]
)
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][4:], ["<Article: Article 5>"])
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][5:], [])
# Some more tests!
self.assertQuerysetEqual(
self.get_ordered_articles()[2:][0:2],
["<Article: Article 3>", "<Article: Article 4>"]
)
self.assertQuerysetEqual(
self.get_ordered_articles()[2:][:2],
["<Article: Article 3>", "<Article: Article 4>"]
)
self.assertQuerysetEqual(self.get_ordered_articles()[2:][2:3], ["<Article: Article 5>"])
# Using an offset without a limit is also possible.
self.assertQuerysetEqual(
self.get_ordered_articles()[5:],
["<Article: Article 6>", "<Article: Article 7>"]
)
def test_slicing_cannot_filter_queryset_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot filter a query once a slice has been taken."):
Article.objects.all()[0:5].filter(id=1)
def test_slicing_cannot_reorder_queryset_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot reorder a query once a slice has been taken."):
Article.objects.all()[0:5].order_by('id')
def test_slicing_cannot_combine_queries_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot combine queries once a slice has been taken."):
Article.objects.all()[0:1] & Article.objects.all()[4:5]
def test_slicing_negative_indexing_not_supported_for_single_element(self):
"""hint: inverting your ordering might do what you need"""
with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."):
Article.objects.all()[-1]
def test_slicing_negative_indexing_not_supported_for_range(self):
"""hint: inverting your ordering might do what you need"""
with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."):
Article.objects.all()[0:-5]
def test_can_get_number_of_items_in_queryset_using_standard_len(self):
self.assertEqual(len(Article.objects.filter(name__exact='Article 1')), 1)
def test_can_combine_queries_using_and_and_or_operators(self):
s1 = Article.objects.filter(name__exact='Article 1')
s2 = Article.objects.filter(name__exact='Article 2')
self.assertQuerysetEqual(
(s1 | s2).order_by('name'),
["<Article: Article 1>", "<Article: Article 2>"]
)
self.assertQuerysetEqual(s1 & s2, [])
class WeirdQuerysetSlicingTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
Article.objects.create(name='three', created=datetime.datetime.now())
Article.objects.create(name='four', created=datetime.datetime.now())
food = Food.objects.create(name='spam')
Eaten.objects.create(meal='spam with eggs', food=food)
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
with self.assertRaisesMessage(TypeError, 'Cannot reverse a query once a slice has been taken.'):
Article.objects.all()[:0].latest('created')
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
def test_empty_sliced_subquery(self):
self.assertEqual(Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0)
def test_empty_sliced_subquery_exclude(self):
self.assertEqual(Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1)
def test_zero_length_values_slicing(self):
n = 42
with self.assertNumQueries(0):
self.assertQuerysetEqual(Article.objects.values()[n:n], [])
self.assertQuerysetEqual(Article.objects.values_list()[n:n], [])
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
ReservedName.objects.create(name='a', order=42)
ReservedName.objects.create(name='b', order=37)
self.assertQuerysetEqual(
ReservedName.objects.all().order_by('order'),
['<ReservedName: b>', '<ReservedName: a>']
)
self.assertQuerysetEqual(
ReservedName.objects.extra(select={'stuff': 'name'}, order_by=('order', 'stuff')),
['<ReservedName: b>', '<ReservedName: a>']
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_in_subquery(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple'))),
{lunch}
)
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple').values('eaten__meal'))),
set()
)
self.assertEqual(
set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal='lunch'))),
{apple}
)
def test_nested_in_subquery(self):
extra = ExtraInfo.objects.create()
author = Author.objects.create(num=42, extra=extra)
report = Report.objects.create(creator=author)
comment = ReportComment.objects.create(report=report)
comments = ReportComment.objects.filter(
report__in=Report.objects.filter(
creator__in=extra.author_set.all(),
),
)
self.assertSequenceEqual(comments, [comment])
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])),
{apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food=apple)),
{lunch, dinner}
)
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Food.objects.filter(eaten=lunch)),
{apple}
)
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(parent=node1)),
[node2]
)
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(node=node2)),
[node1]
)
class IsNullTests(TestCase):
def test_primary_key(self):
custom = CustomPk.objects.create(name='pk')
null = Related.objects.create()
notnull = Related.objects.create(custom=custom)
self.assertSequenceEqual(Related.objects.filter(custom__isnull=False), [notnull])
self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null])
def test_to_field(self):
apple = Food.objects.create(name="apple")
Eaten.objects.create(food=apple, meal="lunch")
Eaten.objects.create(meal="lunch")
self.assertQuerysetEqual(
Eaten.objects.filter(food__isnull=False),
['<Eaten: apple at lunch>']
)
self.assertQuerysetEqual(
Eaten.objects.filter(food__isnull=True),
['<Eaten: None at lunch>']
)
class ConditionalTests(TestCase):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'):
list(LoopX.objects.all()) # Force queryset evaluation with list()
with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'):
list(LoopZ.objects.all()) # Force queryset evaluation with list()
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by('parent')), 5)
# ... but you can still order in a non-recursive fashion among linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(
LoopX.objects.all().order_by('y__x__y__x__id'),
[]
)
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping')
def test_null_ordering_added(self):
query = Tag.objects.values_list('parent_id', flat=True).order_by().query
query.group_by = ['parent_id']
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
def test_in_list_limit(self):
# The "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = list(range(2050))
max_query_params = connection.features.max_query_params
if max_query_params is None or max_query_params >= len(numbers):
Number.objects.bulk_create(Number(num=num) for num in numbers)
for number in [1000, 1001, 2000, len(numbers)]:
with self.subTest(number=number):
self.assertEqual(Number.objects.filter(num__in=numbers[:number]).count(), number)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ['one', 'two', 'three']
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux', objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name='deux')
Q2 = Q(objectc__objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__name='ein')
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name='one', objectc__objecta__name='two')
Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
"""
Can create an instance of a model with only the PK field (#17056)."
"""
DumbCategory.objects.create()
class ExcludeTests(TestCase):
@classmethod
def setUpTestData(cls):
f1 = Food.objects.create(name='apples')
Food.objects.create(name='oranges')
Eaten.objects.create(food=f1, meal='dinner')
j1 = Job.objects.create(name='Manager')
r1 = Responsibility.objects.create(description='Playing golf')
j2 = Job.objects.create(name='Programmer')
r2 = Responsibility.objects.create(description='Programming')
JobResponsibilities.objects.create(job=j1, responsibility=r1)
JobResponsibilities.objects.create(job=j2, responsibility=r2)
def test_to_field(self):
self.assertQuerysetEqual(
Food.objects.exclude(eaten__meal='dinner'),
['<Food: oranges>'])
self.assertQuerysetEqual(
Job.objects.exclude(responsibilities__description='Playing golf'),
['<Job: Programmer>'])
self.assertQuerysetEqual(
Responsibility.objects.exclude(jobs__name='Manager'),
['<Responsibility: Programming>'])
def test_ticket14511(self):
alex = Person.objects.get_or_create(name='Alex')[0]
jane = Person.objects.get_or_create(name='Jane')[0]
oracle = Company.objects.get_or_create(name='Oracle')[0]
google = Company.objects.get_or_create(name='Google')[0]
microsoft = Company.objects.get_or_create(name='Microsoft')[0]
intel = Company.objects.get_or_create(name='Intel')[0]
def employ(employer, employee, title):
Employment.objects.get_or_create(employee=employee, employer=employer, title=title)
employ(oracle, alex, 'Engineer')
employ(oracle, alex, 'Developer')
employ(google, alex, 'Engineer')
employ(google, alex, 'Manager')
employ(microsoft, alex, 'Manager')
employ(intel, alex, 'Manager')
employ(microsoft, jane, 'Developer')
employ(intel, jane, 'Manager')
alex_tech_employers = alex.employers.filter(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertSequenceEqual(alex_tech_employers, [google, oracle])
alex_nontech_employers = alex.employers.exclude(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft])
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
@classmethod
def setUpTestData(cls):
# Create a few Orders.
cls.o1 = Order.objects.create(pk=1)
cls.o2 = Order.objects.create(pk=2)
cls.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1)
cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2)
cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2)
cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3)
cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1),
['<Order: 3>'])
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)),
['<Order: 3>'])
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
['<Order: 1>'])
class Exclude15786(TestCase):
"""Regression test for #15786"""
def test_ticket15786(self):
c1 = SimpleCategory.objects.create(name='c1')
c2 = SimpleCategory.objects.create(name='c2')
OneToOneCategory.objects.create(category=c1)
OneToOneCategory.objects.create(category=c2)
rel = CategoryRelationship.objects.create(first=c1, second=c2)
self.assertEqual(
CategoryRelationship.objects.exclude(
first__onetoonecategory=F('second__onetoonecategory')
).get(), rel
)
class NullInExcludeTest(TestCase):
@classmethod
def setUpTestData(cls):
NullableName.objects.create(name='i1')
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = '' if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
['i1', none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i1']),
[none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i3']),
['i1', none_val], attrgetter('name'))
inner_qs = NullableName.objects.filter(name='i1').values_list('name')
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val], attrgetter('name'))
# The inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]),
['i1'], attrgetter('name'))
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name='i1'))),
list(NullableName.objects.filter(Q(name='i1'))))
self.assertNotIn(
'IS NOT NULL',
str(NullableName.objects.filter(~~Q(name='i1')).query))
class EmptyStringsAsNullTest(TestCase):
"""
Filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
@classmethod
def setUpTestData(cls):
cls.nc = NamedCategory.objects.create(name='')
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=['nonexistent']),
[self.nc.pk], attrgetter('pk')
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=['nonexistent']),
[self.nc.pk], attrgetter('pk')
)
def test_21001(self):
foo = NamedCategory.objects.create(name='foo')
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name=''),
[foo.pk], attrgetter('pk')
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(SimpleTestCase):
class DummyNode:
def as_sql(self, compiler, connection):
return 'dummy', []
class MockCompiler:
def compile(self, node):
return node.as_sql(self, connection)
def __call__(self, name):
return connection.ops.quote_name(name)
def test_empty_full_handling_conjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(compiler, connection), ('(dummy AND dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy AND dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
def test_empty_full_handling_disjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()], connector='OR')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('(dummy OR dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy OR dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('dummy', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy)', []))
def test_empty_nodes(self):
compiler = WhereNodeTest.MockCompiler()
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w.negate()
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.connector = 'OR'
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='AND')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
class QuerySetExceptionTests(SimpleTestCase):
def test_iter_exceptions(self):
qs = ExtraInfo.objects.only('author')
msg = "'ManyToOneRel' object has no attribute 'attname'"
with self.assertRaisesMessage(AttributeError, msg):
list(qs)
def test_invalid_qs_list(self):
# Test for #19895 - second iteration over invalid queryset
# raises errors.
qs = Article.objects.order_by('invalid_column')
msg = "Cannot resolve keyword 'invalid_column' into field."
with self.assertRaisesMessage(FieldError, msg):
list(qs)
with self.assertRaisesMessage(FieldError, msg):
list(qs)
def test_invalid_order_by(self):
msg = "Invalid order_by arguments: ['*']"
with self.assertRaisesMessage(FieldError, msg):
list(Article.objects.order_by('*'))
def test_invalid_queryset_model(self):
msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".'
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.filter(extra=Article.objects.all()))
class NullJoinPromotionOrTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name='foo')
d2 = ModelD.objects.create(name='bar')
cls.a1 = ModelA.objects.create(name='a1', d=cls.d1)
c = ModelC.objects.create(name='c')
b = ModelB.objects.create(name='b', c=c)
cls.a2 = ModelA.objects.create(name='a2', b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = (
Q(d__name='foo') |
Q(b__name='foo') |
Q(b__c__name='foo')
)
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count('INNER JOIN'), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note='n', misc='m')
e = ExtraInfo.objects.create(info='info', note=n)
a = Author.objects.create(name='Author1', num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name='Foo', creator=a)
r2 = Report.objects.create(name='Bar')
Report.objects.create(name='Bar', creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) |
Q(creator__ranking__rank=1, name='Foo')
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count(' JOIN '), 2)
self.assertSequenceEqual(qs.order_by('name'), [r2, r1])
def test_ticket_21748(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
i3 = Identifier.objects.create(name='i3')
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertSequenceEqual(Identifier.objects.filter(program=None, channel=None), [i3])
self.assertSequenceEqual(Identifier.objects.exclude(program=None, channel=None).order_by('name'), [i1, i2])
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk')
qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneg. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)
).order_by('pk')
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id))
).order_by('pk')
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count('JOIN'),
str(qs2.query).count('JOIN'))
self.assertEqual(0, str(qs1.query).count('INNER JOIN'))
self.assertEqual(str(qs1.query).count('INNER JOIN'),
str(qs2.query).count('INNER JOIN'))
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# We don't accidentally trim reverse joins - we can't know if there is
# anything on the other side of the join, so trimming reverse joins
# can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn('INNER JOIN', str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
The queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name='foo').select_related('tag')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name='foo').filter(member__name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name='r4').filter(report__name='r1')
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_inverted_q_across_relations(self):
"""
When a trimmable join is specified in the query (here school__), the
ORM detects it and removes unnecessary joins. The set of reusable joins
are updated after trimming the query so that other lookups don't
consider that the outer query's filters are in effect for the subquery
(#26551).
"""
springfield_elementary = School.objects.create()
hogward = School.objects.create()
Student.objects.create(school=springfield_elementary)
hp = Student.objects.create(school=hogward)
Classroom.objects.create(school=hogward, name='Potion')
Classroom.objects.create(school=springfield_elementary, name='Main')
qs = Student.objects.filter(
~(Q(school__classroom__name='Main') & Q(school__classroom__has_blackboard=None))
)
self.assertSequenceEqual(qs, [hp])
class DisjunctionPromotionTests(TestCase):
def test_disjunction_promotion_select_related(self):
fk1 = FK1.objects.create(f1='f1', f2='f2')
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(' JOIN '), 0)
qs = qs.select_related('a', 'b')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 0)
self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2)
with self.assertNumQueries(1):
self.assertSequenceEqual(qs, [basea])
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion3_demote(self):
# This one needs demotion logic: the first filter causes a to be
# outer joined, the second filter makes it inner join again.
qs = BaseA.objects.filter(
Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion4_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
# Demote needed for the "a" join. It is marked as outer join by
# above filter (even if it is trimmed away).
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion5_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# But b__f1 does.
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar')))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo'))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | (Q(a__f1='bar')) & (Q(b__f1='bar') | Q(c__f1='foo')))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
Identifier.objects.create(name='extra')
program = Program.objects.create(identifier=Identifier.objects.create(name='program'))
channel = Channel.objects.create(identifier=Identifier.objects.create(name='channel'))
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=channel).order_by('name'),
['<Identifier: channel>', '<Identifier: extra>']
)
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=None).order_by('name'),
['<Identifier: program>']
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text='pg3')
pg2 = Page.objects.create(text='pg2')
pg1 = Page.objects.create(text='pg1')
pa1 = Paragraph.objects.create(text='pa1')
pa1.page.set([pg1, pg2])
pa2 = Paragraph.objects.create(text='pa2')
pa2.page.set([pg2, pg3])
pa3 = Paragraph.objects.create(text='pa3')
ch1 = Chapter.objects.create(title='ch1', paragraph=pa1)
ch2 = Chapter.objects.create(title='ch2', paragraph=pa2)
ch3 = Chapter.objects.create(title='ch3', paragraph=pa3)
b1 = Book.objects.create(title='b1', chapter=ch1)
b2 = Book.objects.create(title='b2', chapter=ch2)
b3 = Book.objects.create(title='b3', chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text='pg1')
self.assertNotIn('IS NOT NULL', str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data='foo')
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data='bar', parent=my1)
parents = MyObject.objects.filter(parent=F('id'))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id'))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name='foo')
a1 = Annotation.objects.create(tag=t, name='a1')
a2 = Annotation.objects.create(tag=t, name='a2')
a3 = Annotation.objects.create(tag=t, name='a3')
n = Note.objects.create(note='foo', misc='bar')
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
class EmptyStringPromotionTests(SimpleTestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name='')
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn('LEFT OUTER JOIN', str(qs.query))
else:
self.assertNotIn('LEFT OUTER JOIN', str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# If a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertSequenceEqual(Order.objects.filter(items__in=OrderItem.objects.values_list('status')), [o1])
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data='foo')
lfa2 = LeafA.objects.create(data='bar')
lfb1 = LeafB.objects.create(data='lfb1')
lfb2 = LeafB.objects.create(data='lfb2')
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertSequenceEqual(qs, [lfb1])
class Ticket18785Tests(SimpleTestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = Item.objects.exclude(
note__isnull=False
).filter(
name='something', creator__extra__isnull=True
).order_by()
self.assertEqual(1, str(qs.query).count('INNER JOIN'))
self.assertEqual(0, str(qs.query).count('OUTER JOIN'))
class Ticket20788Tests(TestCase):
def test_ticket_20788(self):
Paragraph.objects.create()
paragraph = Paragraph.objects.create()
page = paragraph.page.create()
chapter = Chapter.objects.create(paragraph=paragraph)
Book.objects.create(chapter=chapter)
paragraph2 = Paragraph.objects.create()
Page.objects.create()
chapter2 = Chapter.objects.create(paragraph=paragraph2)
book2 = Book.objects.create(chapter=chapter2)
sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page)
self.assertSequenceEqual(sentences_not_in_pub, [book2])
class Ticket12807Tests(TestCase):
def test_ticket_12807(self):
p1 = Paragraph.objects.create()
p2 = Paragraph.objects.create()
# The ORed condition below should have no effect on the query - the
# ~Q(pk__in=[]) will always be True.
qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk))
self.assertSequenceEqual(qs, [p1])
class RelatedLookupTypeTests(TestCase):
error = 'Cannot query "%s": Must be "%s" instance.'
@classmethod
def setUpTestData(cls):
cls.oa = ObjectA.objects.create(name="oa")
cls.poa = ProxyObjectA.objects.get(name="oa")
cls.coa = ChildObjectA.objects.create(name="coa")
cls.wrong_type = Order.objects.create(id=cls.oa.pk)
cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1)
ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2)
cls.pob = ProxyObjectB.objects.all()
ObjectC.objects.create(childobjecta=cls.coa)
def test_wrong_type_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup.
"""
# Passing incorrect object type
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.get(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.wrong_type])
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob])
# Passing an object of the class on which query is done.
with self.assertRaisesMessage(ValueError, self.error % (self.ob, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.poa, self.ob])
with self.assertRaisesMessage(ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)):
ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob])
def test_wrong_backward_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup for backward relations.
"""
with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.oa, self.ob])
with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.exclude(objectb=self.oa)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.get(objectb=self.wrong_type)
def test_correct_lookup(self):
"""
When passing proxy model objects, child objects, or parent objects,
lookups work fine.
"""
out_a = ['<ObjectA: oa>']
out_b = ['<ObjectB: ob>', '<ObjectB: pob>']
out_c = ['<ObjectC: >']
# proxy model objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta=self.poa).order_by('name'), out_b)
self.assertQuerysetEqual(ObjectA.objects.filter(objectb__in=self.pob).order_by('pk'), out_a * 2)
# child objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.coa]), [])
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by('name'), out_b)
self.assertQuerysetEqual(
ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by('name'),
out_b
)
# parent objects
self.assertQuerysetEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c)
# QuerySet related object type checking shouldn't issue queries
# (the querysets aren't evaluated here, hence zero queries) (#23266).
with self.assertNumQueries(0):
ObjectB.objects.filter(objecta__in=ObjectA.objects.all())
def test_values_queryset_lookup(self):
"""
#23396 - Ensure ValueQuerySets are not checked for compatibility with the lookup field
"""
# Make sure the num and objecta field values match.
ob = ObjectB.objects.get(name='ob')
ob.num = ob.objecta.pk
ob.save()
pob = ObjectB.objects.get(name='pob')
pob.num = pob.objecta.pk
pob.save()
self.assertQuerysetEqual(ObjectB.objects.filter(
objecta__in=ObjectB.objects.all().values_list('num')
).order_by('pk'), ['<ObjectB: ob>', '<ObjectB: pob>'])
class Ticket14056Tests(TestCase):
def test_ticket_14056(self):
s1 = SharedConnection.objects.create(data='s1')
s2 = SharedConnection.objects.create(data='s2')
s3 = SharedConnection.objects.create(data='s3')
PointerA.objects.create(connection=s2)
expected_ordering = (
[s1, s3, s2] if connection.features.nulls_order_largest
else [s2, s1, s3]
)
self.assertSequenceEqual(SharedConnection.objects.order_by('-pointera__connection', 'pk'), expected_ordering)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name='jackstaff')
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name='jillstaff')
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
'creator__staffuser__staff', 'owner__staffuser__staff')
self.assertEqual(str(qs.query).count(' JOIN '), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff)
self.assertEqual(task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related('parent').defer('parent__created')
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].parent.parent_bool, True)
class ValuesJoinPromotionTests(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
qs = qs.values('parent__parent__id')
self.assertIn(' INNER JOIN ', str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values('parent__parent__id')
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values('objecta__name')
self.assertIn(' INNER JOIN ', str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(' LEFT OUTER JOIN %s' % tblname, str(qs.query))
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1')
sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2')
sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3')
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertSequenceEqual(SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by('name'), [sc2, sc3])
self.assertSequenceEqual(SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1])
class ReverseM2MCustomPkTests(TestCase):
def test_ticket_21879(self):
cpt1 = CustomPkTag.objects.create(id='cpt1', tag='cpt1')
cp1 = CustomPk.objects.create(name='cp1', extra='extra')
cp1.custompktag_set.add(cpt1)
self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1])
self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1])
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F('school')))
self.assertSequenceEqual(queryset, [st2])
class Ticket23605Tests(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
# See also #24090.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True,
modelc_fk=c1, modela_fk=a1)
complex_q = Q(pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000 /
F("ticket23605b__modelc_fk__field_c0")
) &
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True) & ~Q(ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True) &
Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
))).filter(ticket23605b__field_b1=True))
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertSequenceEqual(qs1, [a1])
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertSequenceEqual(qs2, [a2])
class TestTicket24279(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertQuerysetEqual(qs, [])
class TestInvalidValuesRelation(SimpleTestCase):
def test_invalid_values(self):
msg = "invalid literal for int() with base 10: 'abc'"
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag='abc')
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag__in=[123, 'abc'])
class TestTicket24605(TestCase):
def test_ticket_24605(self):
"""
Subquery table names should be quoted.
"""
i1 = Individual.objects.create(alive=True)
RelatedIndividual.objects.create(related=i1)
i2 = Individual.objects.create(alive=False)
RelatedIndividual.objects.create(related=i2)
i3 = Individual.objects.create(alive=True)
i4 = Individual.objects.create(alive=False)
self.assertSequenceEqual(Individual.objects.filter(Q(alive=False), Q(related_individual__isnull=True)), [i4])
self.assertSequenceEqual(
Individual.objects.exclude(Q(alive=False), Q(related_individual__isnull=True)).order_by('pk'),
[i1, i2, i3]
)
class Ticket23622Tests(TestCase):
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_23622(self):
"""
Make sure __pk__in and __in work the same for related fields when
using a distinct on subquery.
"""
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=0.0)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=123,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=23,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=234,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=12,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=567,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=76,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=7,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=56,
field_b1=True,
modelc_fk=c1,
)
qx = (
Q(ticket23605b__pk__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) &
Q(ticket23605b__field_b0__gte=300)
)
qy = (
Q(ticket23605b__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) &
Q(ticket23605b__field_b0__gte=300)
)
self.assertEqual(
set(Ticket23605A.objects.filter(qx).values_list('pk', flat=True)),
set(Ticket23605A.objects.filter(qy).values_list('pk', flat=True))
)
self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
|
617b7badf11ba0ba32b85d0799f4ef3925dc2acecfc1de74f7dc76dd82b2af7b | import base64
import os
import shutil
import string
import tempfile
import unittest
from datetime import timedelta
from http import cookies
from django.conf import settings
from django.contrib.sessions.backends.base import UpdateError
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import (
SessionStore as CacheDBSession,
)
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import (
SessionStore as CookieSession,
)
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.sessions.serializers import (
JSONSerializer, PickleSerializer,
)
from django.core import management
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.http import HttpResponse
from django.test import (
RequestFactory, TestCase, ignore_warnings, override_settings,
)
from django.utils import timezone
from .models import SessionStore as CustomDatabaseSession
class SessionTestsMixin:
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertIs(self.session.modified, False)
self.assertIs(self.session.accessed, False)
def test_get_empty(self):
self.assertIsNone(self.session.get('cat'))
def test_store(self):
self.session['cat'] = "dog"
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertIsNone(self.session.get('some key'))
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_default_named_argument(self):
self.assertEqual(self.session.pop('some key', default='does not exist'), 'does not exist')
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_no_default_keyerror_raised(self):
with self.assertRaises(KeyError):
self.session.pop('some key')
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_update(self):
self.session.update({'update key': 1})
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertIs(self.session.accessed, True)
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.values()), [1])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_keys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.keys()), ['x'])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_items(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_save(self):
self.session.save()
self.assertIs(self.session.exists(self.session.session_key), True)
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertIs(self.session.exists(self.session.session_key), False)
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertIsNone(self.session.session_key)
self.assertIs(self.session.modified, True)
self.assertIs(self.session.accessed, True)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_cycle_with_no_session_cache(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_data = self.session.items()
self.session = self.backend(self.session.session_key)
self.assertIs(hasattr(self.session, '_session_cache'), False)
self.session.cycle_key()
self.assertCountEqual(self.session.items(), prev_data)
def test_save_doesnt_clear_data(self):
self.session['a'] = 'b'
self.session.save()
self.assertEqual(self.session['a'], 'b')
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
session.save()
self.assertNotEqual(session.session_key, '1')
self.assertIsNone(session.get('cat'))
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_empty_string_invalid(self):
"""Falsey values (Such as an empty string) are rejected."""
self.session._session_key = ''
self.assertIsNone(self.session.session_key)
def test_session_key_too_short_invalid(self):
"""Strings shorter than 8 characters are rejected."""
self.session._session_key = '1234567'
self.assertIsNone(self.session.session_key)
def test_session_key_valid_string_saved(self):
"""Strings of length 8 and up are accepted and stored."""
self.session._session_key = '12345678'
self.assertEqual(self.session.session_key, '12345678')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
with self.assertRaises(AttributeError):
set_session_key(self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), False)
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), True)
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b'flaskdj:alkdjf').decode('ascii')
with self.assertLogs('django.security.SuspiciousSession', 'WARNING') as cm:
self.assertEqual({}, self.session.decode(bad_encode))
# The failed decode is logged.
self.assertIn('corrupted', cm.output[0])
def test_actual_expiry(self):
# this doesn't work with JSONSerializer (serializing timedelta)
with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'):
self.session = self.backend() # reinitialize after overriding settings
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
def test_session_load_does_not_create_record(self):
"""
Loading an unknown session key does not create a session record.
Creating session records on load is a DOS vulnerability.
"""
session = self.backend('someunknownkey')
session.load()
self.assertIsNone(session.session_key)
self.assertIs(session.exists(session.session_key), False)
# provided unknown key was cycled, not reused
self.assertNotEqual(session.session_key, 'someunknownkey')
def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self):
"""
Sessions shouldn't be resurrected by a concurrent request.
"""
# Create new session.
s1 = self.backend()
s1['test_data'] = 'value1'
s1.save(must_create=True)
# Logout in another context.
s2 = self.backend(s1.session_key)
s2.delete()
# Modify session in first context.
s1['test_data'] = 'value2'
with self.assertRaises(UpdateError):
# This should throw an exception as the session is deleted, not
# resurrect the session.
s1.save()
self.assertEqual(s1.load(), {})
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
session_engine = 'django.contrib.sessions.backends.db'
@property
def model(self):
return self.backend.get_model_class()
def test_session_str(self):
"Session repr should be the session key."
self.session['x'] = 1
self.session.save()
session_key = self.session.session_key
s = self.model.objects.get(session_key=session_key)
self.assertEqual(str(s), session_key)
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
# Change it
self.model.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, self.model.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, self.model.objects.count())
with override_settings(SESSION_ENGINE=self.session_engine):
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, self.model.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CustomDatabaseSessionTests(DatabaseSessionTests):
backend = CustomDatabaseSession
session_engine = 'sessions_tests.models'
def test_extra_session_field(self):
# Set the account ID to be picked up by a custom session storage
# and saved to a custom session model database column.
self.session['_auth_user_id'] = 42
self.session.save()
# Make sure that the customized create_model_instance() was called.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.account_id, 42)
# Make the session "anonymous".
self.session.pop('_auth_user_id')
self.session.save()
# Make sure that save() on an existing session did the right job.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertIsNone(s.account_id)
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertIs(self.session.exists(self.session.session_key), True)
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS.
with self.assertRaises(InvalidCacheBackendError):
self.backend()
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super().setUp()
def tearDown(self):
super().tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH='/if/this/directory/exists/you/have/a/weird/computer',
)
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
with self.assertRaises(ImproperlyConfigured):
self.backend()
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an OSError - by creating
# a new session, making it unclear whether the slashes were detected.
with self.assertRaises(InvalidSessionKey):
self.backend()._key_to_file("a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
with self.assertRaises(InvalidSessionKey):
self.backend()._key_to_file("a/b/c")
@override_settings(
SESSION_ENGINE="django.contrib.sessions.backends.file",
SESSION_COOKIE_AGE=0,
)
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([
session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)
])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# One object in the present without an expiry (should be deleted since
# its modification time + SESSION_COOKIE_AGE will be in the past when
# clearsessions runs).
other_session2 = self.backend()
other_session2['foo'] = 'bar'
other_session2.save()
# Three sessions are in the filesystem before clearsessions...
self.assertEqual(3, count_sessions())
management.call_command('clearsessions')
# ... and two are deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertIsNotNone(caches['default'].get(self.session.cache_key))
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'session',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertIsNone(caches['default'].get(self.session.cache_key))
self.assertIsNotNone(caches['sessions'].get(self.session.cache_key))
def test_create_and_save(self):
self.session = self.backend()
self.session.create()
self.session.save()
self.assertIsNotNone(caches['default'].get(self.session.cache_key))
class SessionMiddlewareTests(TestCase):
request_factory = RequestFactory()
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = self.request_factory.get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]['secure'], True)
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = self.request_factory.get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'], True)
self.assertIn(
cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
@override_settings(SESSION_COOKIE_SAMESITE='Strict')
def test_samesite_session_cookie(self):
request = self.request_factory.get('/')
response = HttpResponse()
middleware = SessionMiddleware()
middleware.process_request(request)
request.session['hello'] = 'world'
response = middleware.process_response(request, response)
self.assertEqual(response.cookies[settings.SESSION_COOKIE_NAME]['samesite'], 'Strict')
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = self.request_factory.get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertEqual(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'], '')
self.assertNotIn(
cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
def test_session_save_on_500(self):
request = self.request_factory.get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# The value wasn't saved above.
self.assertNotIn('hello', request.session.load())
def test_session_update_error_redirect(self):
path = '/foo/'
request = self.request_factory.get(path)
response = HttpResponse()
middleware = SessionMiddleware()
request.session = DatabaseSession()
request.session.save(must_create=True)
request.session.delete()
msg = (
"The request's session was deleted before the request completed. "
"The user may have logged out in a concurrent request, for example."
)
with self.assertRaisesMessage(SuspiciousOperation, msg):
# Handle the response through the middleware. It will try to save
# the deleted session which will cause an UpdateError that's caught
# and raised as a SuspiciousOperation.
middleware.process_response(request, response)
def test_session_delete_on_end(self):
request = self.request_factory.get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# The cookie was deleted, not recreated.
# A deleted cookie header looks like:
# Set-Cookie: sessionid=; expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}=""; expires=Thu, 01 Jan 1970 00:00:00 GMT; '
'Max-Age=0; Path=/'.format(
settings.SESSION_COOKIE_NAME,
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
@override_settings(SESSION_COOKIE_DOMAIN='.example.local', SESSION_COOKIE_PATH='/example/')
def test_session_delete_on_end_with_custom_domain_and_path(self):
request = self.request_factory.get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# The cookie was deleted, not recreated.
# A deleted cookie header with a custom domain and path looks like:
# Set-Cookie: sessionid=; Domain=.example.local;
# expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=0;
# Path=/example/
self.assertEqual(
'Set-Cookie: {}=""; Domain=.example.local; expires=Thu, '
'01 Jan 1970 00:00:00 GMT; Max-Age=0; Path=/example/'.format(
settings.SESSION_COOKIE_NAME,
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
def test_flush_empty_without_session_cookie_doesnt_set_cookie(self):
request = self.request_factory.get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# A cookie should not be set.
self.assertEqual(response.cookies, {})
# The session is accessed so "Vary: Cookie" should be set.
self.assertEqual(response['Vary'], 'Cookie')
def test_empty_session_saved(self):
"""
If a session is emptied of data but still has a key, it should still
be updated.
"""
request = self.request_factory.get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Set a session key and some data.
middleware.process_request(request)
request.session['foo'] = 'bar'
# Handle the response through the middleware.
response = middleware.process_response(request, response)
self.assertEqual(tuple(request.session.items()), (('foo', 'bar'),))
# A cookie should be set, along with Vary: Cookie.
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Empty the session data.
del request.session['foo']
# Handle the response through the middleware.
response = HttpResponse('Session test')
response = middleware.process_response(request, response)
self.assertEqual(dict(request.session.values()), {})
session = Session.objects.get(session_key=request.session.session_key)
self.assertEqual(session.get_decoded(), {})
# While the session is empty, it hasn't been flushed so a cookie should
# still be set, along with Vary: Cookie.
self.assertGreater(len(request.session.session_key), 8)
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class CookieSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super().test_actual_expiry()
def test_unpickling_exception(self):
# signed_cookies backend should handle unpickle exceptions gracefully
# by creating a new session
self.assertEqual(self.session.serializer, JSONSerializer)
self.session.save()
self.session.serializer = PickleSerializer
self.session.load()
@unittest.skip("Cookie backend doesn't have an external store to create records in.")
def test_session_load_does_not_create_record(self):
pass
@unittest.skip("CookieSession is stored in the client and there is no way to query it.")
def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self):
pass
|
1552d6b77af7e8169a4069dc332fa0768be4b093dade8e6b898356f039424cca | import datetime
import itertools
import unittest
from copy import copy
from unittest import mock
from django.db import (
DatabaseError, IntegrityError, OperationalError, connection,
)
from django.db.models import Model
from django.db.models.deletion import CASCADE, PROTECT
from django.db.models.fields import (
AutoField, BigAutoField, BigIntegerField, BinaryField, BooleanField,
CharField, DateField, DateTimeField, IntegerField, PositiveIntegerField,
SlugField, TextField, TimeField, UUIDField,
)
from django.db.models.fields.related import (
ForeignKey, ForeignObject, ManyToManyField, OneToOneField,
)
from django.db.models.indexes import Index
from django.db.transaction import TransactionManagementError, atomic
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext, isolate_apps
from django.utils import timezone
from .fields import (
CustomManyToManyField, InheritedManyToManyField, MediumBlobField,
)
from .models import (
Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex,
AuthorWithDefaultHeight, AuthorWithEvenLongerName, AuthorWithIndexedName,
Book, BookForeignObj, BookWeak, BookWithLongName, BookWithO2O,
BookWithoutAuthor, BookWithSlug, IntegerPK, Node, Note, NoteRename, Tag,
TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests for the schema-alteration code.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex,
AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak,
BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Node, Note,
Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
# isolated_local_models contains models that are in test methods
# decorated with @isolate_apps.
self.isolated_local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if 'schema' in new_apps.all_models:
for model in self.local_models:
for many_to_many in model._meta.many_to_many:
through = many_to_many.remote_field.through
if through and through._meta.auto_created:
del new_apps.all_models['schema'][through._meta.model_name]
del new_apps.all_models['schema'][model._meta.model_name]
if self.isolated_local_models:
with connection.schema_editor() as editor:
for model in self.isolated_local_models:
editor.delete_model(model)
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names()
for model in itertools.chain(SchemaTests.models, self.local_models):
tbl = converter(model._meta.db_table)
if tbl in table_names:
editor.delete_model(model)
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
# SQLite also doesn't error properly
if not columns:
raise DatabaseError("Table does not exist (empty pragma)")
return columns
def get_primary_key(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_primary_key_column(cursor, table)
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return [
c['columns'][0]
for c in connection.introspection.get_constraints(cursor, table).values()
if c['index'] and len(c['columns']) == 1
]
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def get_constraints_for_column(self, model, column_name):
constraints = self.get_constraints(model._meta.db_table)
constraints_for_column = []
for name, details in constraints.items():
if details['columns'] == [column_name]:
constraints_for_column.append(name)
return sorted(constraints_for_column)
def check_added_field_default(self, schema_editor, model, field, field_name, expected_default,
cast_function=None):
with connection.cursor() as cursor:
schema_editor.add_field(model, field)
cursor.execute("SELECT {} FROM {};".format(field_name, model._meta.db_table))
database_default = cursor.fetchall()[0][0]
if cast_function and not type(database_default) == type(expected_default):
database_default = cast_function(database_default)
self.assertEqual(database_default, expected_default)
def get_constraints_count(self, table, column, fk_to):
"""
Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the
number of foreign keys, unique constraints, and indexes on
`table`.`column`. The `fk_to` argument is a 2-tuple specifying the
expected foreign key relationship's (table, column).
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
counts = {'fks': 0, 'uniques': 0, 'indexes': 0}
for c in constraints.values():
if c['columns'] == [column]:
if c['foreign_key'] == fk_to:
counts['fks'] += 1
if c['unique']:
counts['uniques'] += 1
elif c['index']:
counts['indexes'] += 1
return counts
def assertIndexOrder(self, table, index, order):
constraints = self.get_constraints(table)
self.assertIn(index, constraints)
index_orders = constraints[index]['orders']
self.assertTrue(all(val == expected for val, expected in zip(index_orders, order)))
def assertForeignKeyExists(self, model, column, expected_fk_table, field='id'):
"""
Fail if the FK constraint on `model.Meta.db_table`.`column` to
`expected_fk_table`.id doesn't exist.
"""
constraints = self.get_constraints(model._meta.db_table)
constraint_fk = None
for details in constraints.values():
if details['columns'] == [column] and details['foreign_key']:
constraint_fk = details['foreign_key']
break
self.assertEqual(constraint_fk, (expected_fk_table, field))
def assertForeignKeyNotExists(self, model, column, expected_fk_table):
with self.assertRaises(AssertionError):
self.assertForeignKeyExists(model, column, expected_fk_table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
with connection.schema_editor() as editor:
# Create the table
editor.create_model(Author)
# The table is there
list(Author.objects.all())
# Clean up that table
editor.delete_model(Author)
# No deferred SQL should be left over.
self.assertEqual(editor.deferred_sql, [])
# The table is gone
with self.assertRaises(DatabaseError):
list(Author.objects.all())
@skipUnlessDBFeature('supports_foreign_keys')
def test_fk(self):
"Creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
self.assertForeignKeyExists(Book, 'author_id', 'schema_tag')
@skipUnlessDBFeature('can_create_inline_fk')
def test_inline_fk(self):
# Create some tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.create_model(Note)
self.assertForeignKeyNotExists(Note, 'book_id', 'schema_book')
# Add a foreign key from one to the other.
with connection.schema_editor() as editor:
new_field = ForeignKey(Book, CASCADE)
new_field.set_attributes_from_name('book')
editor.add_field(Note, new_field)
self.assertForeignKeyExists(Note, 'book_id', 'schema_book')
# Creating a FK field with a constraint uses a single statement without
# a deferred ALTER TABLE.
self.assertFalse([
sql for sql in (str(statement) for statement in editor.deferred_sql)
if sql.startswith('ALTER TABLE') and 'ADD CONSTRAINT' in sql
])
@skipUnlessDBFeature('supports_foreign_keys')
def test_char_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorCharFieldWithIndex)
# Change CharField to FK
old_field = AuthorCharFieldWithIndex._meta.get_field('char_field')
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name('char_field')
with connection.schema_editor() as editor:
editor.alter_field(AuthorCharFieldWithIndex, old_field, new_field, strict=True)
self.assertForeignKeyExists(AuthorCharFieldWithIndex, 'char_field_id', 'schema_author')
@skipUnlessDBFeature('supports_foreign_keys')
@skipUnlessDBFeature('supports_index_on_text_field')
def test_text_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorTextFieldWithIndex)
# Change TextField to FK
old_field = AuthorTextFieldWithIndex._meta.get_field('text_field')
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name('text_field')
with connection.schema_editor() as editor:
editor.alter_field(AuthorTextFieldWithIndex, old_field, new_field, strict=True)
self.assertForeignKeyExists(AuthorTextFieldWithIndex, 'text_field_id', 'schema_author')
@skipUnlessDBFeature('supports_foreign_keys')
def test_fk_to_proxy(self):
"Creating a FK to a proxy model creates database constraints."
class AuthorProxy(Author):
class Meta:
app_label = 'schema'
apps = new_apps
proxy = True
class AuthorRef(Model):
author = ForeignKey(AuthorProxy, on_delete=CASCADE)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [AuthorProxy, AuthorRef]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorRef)
self.assertForeignKeyExists(AuthorRef, 'author_id', 'schema_author')
@skipUnlessDBFeature('supports_foreign_keys')
def test_fk_db_constraint(self):
"The db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author')
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, CASCADE, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag')
# Alter to one with a constraint
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
self.assertForeignKeyExists(Author, 'tag_id', 'schema_tag')
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag')
@isolate_apps('schema')
def test_no_db_constraint_added_during_primary_key_change(self):
"""
When a primary key that's pointed to by a ForeignKey with
db_constraint=False is altered, a foreign key constraint isn't added.
"""
class Author(Model):
class Meta:
app_label = 'schema'
class BookWeak(Model):
author = ForeignKey(Author, CASCADE, db_constraint=False)
class Meta:
app_label = 'schema'
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWeak)
self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author')
old_field = Author._meta.get_field('id')
new_field = BigAutoField(primary_key=True)
new_field.model = Author
new_field.set_attributes_from_name('id')
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author')
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
self.assertForeignKeyNotExists(new_field.remote_field.through, 'tag_id', 'schema_tag')
@skipUnlessDBFeature('supports_foreign_keys')
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@skipUnlessDBFeature('supports_foreign_keys')
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@skipUnlessDBFeature('supports_foreign_keys')
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with CaptureQueriesContext(connection) as ctx, connection.schema_editor() as editor:
editor.add_field(Author, new_field)
drop_default_sql = editor.sql_alter_column_no_default % {
'column': editor.quote_name(new_field.name),
}
self.assertFalse(any(drop_default_sql in query['sql'] for query in ctx.captured_queries))
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['age'][0], "IntegerField")
self.assertEqual(columns['age'][1][6], True)
def test_add_field_remove_field(self):
"""
Adding a field and removing it removes all deferred sql referring to it.
"""
with connection.schema_editor() as editor:
# Create a table with a unique constraint on the slug field.
editor.create_model(Tag)
# Remove the slug column.
editor.remove_field(Tag, Tag._meta.get_field('slug'))
self.assertEqual(editor.deferred_sql, [])
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['surname'][0], "CharField")
self.assertEqual(columns['surname'][1][6],
connection.features.interprets_empty_strings_as_nulls)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns['awesome'][0]
self.assertEqual(field_type, connection.features.introspected_boolean_field_type)
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns['thing']
self.assertEqual(field_type, 'IntegerField')
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns['bits'][0], ("BinaryField", "TextField"))
@unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific")
def test_add_binaryfield_mediumblob(self):
"""
Test adding a custom-sized binary field on MySQL (#24846).
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field with default
new_field = MediumBlobField(blank=True, default=b'123')
new_field.set_attributes_from_name('bits')
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# Introspection treats BLOBs as TextFields
self.assertEqual(columns['bits'][0], "TextField")
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(columns['name'][1][6], True)
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
def test_alter_auto_field_to_integer_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to IntegerField
old_field = Author._meta.get_field('id')
new_field = IntegerField(primary_key=True)
new_field.set_attributes_from_name('id')
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
def test_alter_auto_field_to_char_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to CharField
old_field = Author._meta.get_field('id')
new_field = CharField(primary_key=True, max_length=50)
new_field.set_attributes_from_name('id')
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
def test_alter_not_unique_field_to_primary_key(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change UUIDField to primary key.
old_field = Author._meta.get_field('uuid')
new_field = UUIDField(primary_key=True)
new_field.set_attributes_from_name('uuid')
new_field.model = Author
with connection.schema_editor() as editor:
editor.remove_field(Author, Author._meta.get_field('id'))
editor.alter_field(Author, old_field, new_field, strict=True)
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipUnlessDBFeature('can_defer_constraint_checks', 'can_rollback_ddl')
def test_alter_fk_checks_deferred_constraints(self):
"""
#25492 - Altering a foreign key's structure and data in the same
transaction.
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field('parent')
new_field = ForeignKey(Node, CASCADE)
new_field.set_attributes_from_name('parent')
parent = Node.objects.create()
with connection.schema_editor() as editor:
# Update the parent FK to create a deferred constraint check.
Node.objects.update(parent=parent)
editor.alter_field(Node, old_field, new_field, strict=True)
def test_alter_text_field_to_date_field(self):
"""
#25002 - Test conversion of text field to date field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='1988-05-05')
old_field = Note._meta.get_field('info')
new_field = DateField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
def test_alter_text_field_to_datetime_field(self):
"""
#25002 - Test conversion of text field to datetime field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='1988-05-05 3:16:17.4567')
old_field = Note._meta.get_field('info')
new_field = DateTimeField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
def test_alter_text_field_to_time_field(self):
"""
#25002 - Test conversion of text field to time field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='3:16:17.4567')
old_field = Note._meta.get_field('info')
new_field = TimeField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='aaa')
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='bbb')
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns['height'][1][6])
# Create some test data
Author.objects.create(name='Not null author', height=12)
Author.objects.create(name='Null author')
# Verify null value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertIsNone(Author.objects.get(name='Null author').height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertFalse(columns['height'][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertEqual(Author.objects.get(name='Null author').height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field('name')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field('info')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipUnlessDBFeature('supports_combined_alters')
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns['height'][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns['height'][1][6])
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
self.assertForeignKeyExists(Book, 'author_id', 'schema_author')
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
self.assertForeignKeyExists(Book, 'author_id', 'schema_author')
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for details in constraints.values():
if details['foreign_key']:
self.fail('Found an unexpected FK constraint to %s' % details['columns'])
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
self.assertForeignKeyExists(LocalBook, 'author_id', 'schema_author')
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
BookWithO2O.objects.all().delete()
self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author')
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique anymore
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
self.assertForeignKeyExists(Book, 'author_id', 'schema_author')
@skipUnlessDBFeature('supports_foreign_keys')
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
Book.objects.all().delete()
self.assertForeignKeyExists(Book, 'author_id', 'schema_author')
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique now
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author')
def test_alter_field_fk_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
old_field = Book._meta.get_field('author')
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index on ForeignKey is replaced with a unique constraint for OneToOneField.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
def test_alter_field_fk_keeps_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
old_field = Book._meta.get_field('author')
# on_delete changed from CASCADE.
new_field = ForeignKey(Author, PROTECT)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index remains.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
def test_alter_field_o2o_to_fk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
old_field = BookWithO2O._meta.get_field('author')
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint on OneToOneField is replaced with an index for ForeignKey.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1})
def test_alter_field_o2o_keeps_unique(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = 1 if connection.features.supports_foreign_keys else 0
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
old_field = BookWithO2O._meta.get_field('author')
# on_delete changed from CASCADE.
new_field = OneToOneField(Author, PROTECT)
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field('author').column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint remains.
self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0})
def test_alter_db_table_case(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Alter the case of the table
old_table_name = Author._meta.db_table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, old_table_name, old_table_name.upper())
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name='Foo')
Author.objects.create(name='Bar')
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field('i')
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
def test_alter_int_pk_to_bigautofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
BigAutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field('i')
new_field = BigAutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# Delete the old PK
old_field = IntegerPK._meta.get_field('i')
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# The primary key constraint is gone. Result depends on database:
# 'id' for SQLite, None for others (must not be 'i').
self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ('id', None))
# Set up a model class as it currently stands. The original IntegerPK
# class is now out of date and some backends make use of the whole
# model class when modifying a field (such as sqlite3 when remaking a
# table) so an outdated model class leads to incorrect results.
class Transitional(Model):
i = IntegerField(unique=True)
j = IntegerField(unique=True)
class Meta:
app_label = 'schema'
apps = new_apps
db_table = 'INTEGERPK'
# model requires a new PK
old_field = Transitional._meta.get_field('j')
new_field = IntegerField(primary_key=True)
new_field.model = Transitional
new_field.set_attributes_from_name('j')
with connection.schema_editor() as editor:
editor.alter_field(Transitional, old_field, new_field, strict=True)
# Create a model class representing the updated model.
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = 'schema'
apps = new_apps
db_table = 'INTEGERPK'
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['display_name'][0], "CharField")
self.assertNotIn("name", columns)
@isolate_apps('schema')
def test_rename_referenced_field(self):
class Author(Model):
name = CharField(max_length=255, unique=True)
class Meta:
app_label = 'schema'
class Book(Model):
author = ForeignKey(Author, CASCADE, to_field='name')
class Meta:
app_label = 'schema'
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name('renamed')
with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor:
editor.alter_field(Author, Author._meta.get_field('name'), new_field)
# Ensure the foreign key reference was updated.
self.assertForeignKeyExists(Book, 'author_id', 'schema_author', 'renamed')
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns['detail_info'][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = 'schema'
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(columns['book_id'][0], "IntegerField")
self.assertEqual(columns['tag_id'][0], "IntegerField")
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Make sure the model state is coherent with the table one now that
# we've removed the tags field.
opts = LocalAuthorWithM2M._meta
opts.local_many_to_many.remove(new_field)
del new_apps.all_models['schema'][new_field.remote_field.through._meta.model_name]
opts._expire_cache()
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = 'schema'
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field, strict=True)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
LocalBookWithM2M._meta.get_field("tags").remote_field.through,
'tagm2mtest_id',
'schema_tagm2mtest',
)
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True)
# Ensure old M2M is gone
with self.assertRaises(DatabaseError):
self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through)
# This model looks like the new model and is used for teardown.
opts = LocalBookWithM2M._meta
opts.local_many_to_many.remove(old_field)
# Ensure the new M2M exists and points to UniqueTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(new_field.remote_field.through, 'uniquetest_id', 'schema_uniquetest')
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@isolate_apps('schema')
def test_m2m_rename_field_in_target_model(self):
class LocalTagM2MTest(Model):
title = CharField(max_length=255)
class Meta:
app_label = 'schema'
class LocalM2M(Model):
tags = ManyToManyField(LocalTagM2MTest)
class Meta:
app_label = 'schema'
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(LocalM2M)
editor.create_model(LocalTagM2MTest)
self.isolated_local_models = [LocalM2M, LocalTagM2MTest]
# Ensure the m2m table is there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
# Alter a field in LocalTagM2MTest.
old_field = LocalTagM2MTest._meta.get_field('title')
new_field = CharField(max_length=254)
new_field.contribute_to_class(LocalTagM2MTest, 'title1')
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True)
# Ensure the m2m table is still there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
@skipUnlessDBFeature('supports_column_check_constraints')
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
if not any(details['columns'] == ['height'] and details['check'] for details in constraints.values()):
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for details in constraints.values():
if details['columns'] == ["height"] and details['check']:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
if not any(details['columns'] == ['height'] and details['check'] for details in constraints.values()):
self.fail("No check constraint for height found")
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
with self.assertRaises(IntegrityError):
TagUniqueRename.objects.create(title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_name_quoting(self):
old_table_name = TagUniqueRename._meta.db_table
try:
with connection.schema_editor() as editor:
editor.create_model(TagUniqueRename)
editor.alter_db_table(TagUniqueRename, old_table_name, 'unique-table')
TagUniqueRename._meta.db_table = 'unique-table'
# This fails if the unique index name isn't quoted.
editor.alter_unique_together(TagUniqueRename, [], (('title', 'slug2'),))
finally:
TagUniqueRename._meta.db_table = old_table_name
@isolate_apps('schema')
@unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.')
@skipUnlessDBFeature('supports_foreign_keys')
def test_unique_no_unnecessary_fk_drops(self):
"""
If AlterField isn't selective about dropping foreign key constraints
when modifying a field with a unique constraint, the AlterField
incorrectly drops and recreates the Book.author foreign key even though
it doesn't restrict the field being changed (#29193).
"""
class Author(Model):
name = CharField(max_length=254, unique=True)
class Meta:
app_label = 'schema'
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = 'schema'
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.model = Author
new_field.set_attributes_from_name('name')
with self.assertLogs('django.db.backends.schema', 'DEBUG') as cm:
with connection.schema_editor() as editor:
editor.alter_field(Author, Author._meta.get_field('name'), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
@isolate_apps('schema')
@unittest.skipIf(connection.vendor == 'sqlite', 'SQLite remakes the table on field alteration.')
def test_unique_and_reverse_m2m(self):
"""
AlterField can modify a unique field when there's a reverse M2M
relation on the model.
"""
class Tag(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = 'schema'
class Book(Model):
tags = ManyToManyField(Tag, related_name='books')
class Meta:
app_label = 'schema'
self.isolated_local_models = [Book._meta.get_field('tags').remote_field.through]
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Book)
new_field = SlugField(max_length=75, unique=True)
new_field.model = Tag
new_field.set_attributes_from_name('slug')
with self.assertLogs('django.db.backends.schema', 'DEBUG') as cm:
with connection.schema_editor() as editor:
editor.alter_field(Tag, Tag._meta.get_field('slug'), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
# Ensure that the field is still unique.
Tag.objects.create(title='foo', slug='foo')
with self.assertRaises(IntegrityError):
Tag.objects.create(title='bar', slug='foo')
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, [])
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name('author')
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [['author', 'title']], [])
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
# Ensure there is an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c['columns'] == ["slug", "title"]
),
)
@isolate_apps('schema')
def test_db_table(self):
"""
Tests renaming of the table
"""
class Author(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = 'schema'
# Create the table and one referring it.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table
with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
# Ensure the table is there afterwards
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Ensure the foreign key reference was updated
self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor")
# Alter the table again
with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
def test_add_remove_index(self):
"""
Tests index addition and removal
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there and has no index
self.assertNotIn('title', self.get_indexes(Author._meta.db_table))
# Add the index
index = Index(fields=['name'], name='author_title_idx')
with connection.schema_editor() as editor:
editor.add_index(Author, index)
self.assertIn('name', self.get_indexes(Author._meta.db_table))
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn('name', self.get_indexes(Author._meta.db_table))
def test_remove_db_index_doesnt_remove_custom_indexes(self):
"""
Changing db_index to False doesn't remove indexes from Meta.indexes.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedName)
# Ensure the table has its index
self.assertIn('name', self.get_indexes(AuthorWithIndexedName._meta.db_table))
# Add the custom index
index = Index(fields=['-name'], name='author_name_idx')
author_index_name = index.name
with connection.schema_editor() as editor:
db_index_name = editor._create_index_name(
table_name=AuthorWithIndexedName._meta.db_table,
column_names=('name',),
)
try:
AuthorWithIndexedName._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedName, index)
old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertIn(author_index_name, old_constraints)
self.assertIn(db_index_name, old_constraints)
# Change name field to db_index=False
old_field = AuthorWithIndexedName._meta.get_field('name')
new_field = CharField(max_length=255)
new_field.set_attributes_from_name('name')
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithIndexedName, old_field, new_field, strict=True)
new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertNotIn(db_index_name, new_constraints)
# The index from Meta.indexes is still in the database.
self.assertIn(author_index_name, new_constraints)
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(AuthorWithIndexedName, index)
finally:
AuthorWithIndexedName._meta.indexes = []
def test_order_index(self):
"""
Indexes defined with ordering (ASC/DESC) defined on column
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
# The table doesn't have an index
self.assertNotIn('title', self.get_indexes(Author._meta.db_table))
index_name = 'author_name_idx'
# Add the index
index = Index(fields=['name', '-weight'], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(Author, index)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Author._meta.db_table, index_name, ['ASC', 'DESC'])
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
def test_text_field_with_db_index(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorTextFieldWithIndex)
# The text_field index is present if the database supports it.
assertion = self.assertIn if connection.features.supports_index_on_text_field else self.assertNotIn
assertion('text_field', self.get_indexes(AuthorTextFieldWithIndex._meta.db_table))
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'id')
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
'id',
self.get_indexes(Tag._meta.db_table),
)
self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'slug')
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@skipIfDBFeature('can_rollback_ddl')
def test_unsupported_transactional_ddl_disallowed(self):
message = (
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
with atomic(), connection.schema_editor() as editor:
with self.assertRaisesMessage(TransactionManagementError, message):
editor.execute(editor.sql_create_table % {'table': 'foo', 'definition': ''})
@skipUnlessDBFeature('supports_foreign_keys')
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id")
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@skipUnlessDBFeature('supports_foreign_keys')
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(AuthorWithEvenLongerName, CASCADE, related_name="something")
new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk")
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
@isolate_apps('schema')
@skipUnlessDBFeature('supports_foreign_keys')
def test_add_foreign_key_quoted_db_table(self):
class Author(Model):
class Meta:
db_table = '"table_author_double_quoted"'
app_label = 'schema'
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = 'schema'
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
if connection.vendor == 'mysql':
self.assertForeignKeyExists(Book, 'author_id', '"table_author_double_quoted"')
else:
self.assertForeignKeyExists(Book, 'author_id', 'table_author_double_quoted')
def test_add_foreign_object(self):
with connection.schema_editor() as editor:
editor.create_model(BookForeignObj)
new_field = ForeignObject(Author, on_delete=CASCADE, from_fields=['author_id'], to_fields=['id'])
new_field.set_attributes_from_name('author')
with connection.schema_editor() as editor:
editor.add_field(BookForeignObj, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail("Errors when applying initial migration for a model "
"with a table named after an SQL reserved word: %s" % e)
# The table is there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# The table is gone
with self.assertRaises(DatabaseError):
list(Thing.objects.all())
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, field_class=IntegerField, **kwargs):
kwargs['db_column'] = "CamelCase"
field = field_class(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
identifier_converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
constraint_name = 'CamelCaseIndex'
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_index % {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"using": "",
"columns": editor.quote_name(column),
"extra": "",
"condition": "",
}
)
self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table))
editor.alter_field(model, get_field(db_index=True), field, strict=True)
self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table))
constraint_name = 'CamelCaseUniqConstraint'
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(editor._create_unique_sql(model, [field.column], constraint_name))
self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table))
editor.alter_field(model, get_field(unique=True), field, strict=True)
self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table))
if editor.sql_create_fk:
constraint_name = 'CamelCaseFKConstraint'
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_fk % {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
"deferrable": connection.ops.deferrable_sql(),
}
)
self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table))
editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True)
self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table))
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '')
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default='surname default')
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], 'surname default')
# And that the default is no longer set in the database.
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name='Anonymous1')
self.assertIsNone(Author.objects.get().height)
old_field = Author._meta.get_field('height')
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name('height')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
@unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.')
def test_alter_field_default_doesnt_perfom_queries(self):
"""
No queries are performed if a field default changes and the field's
not changing from null to non-null.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
old_field = AuthorWithDefaultHeight._meta.get_field('height')
new_default = old_field.default * 2
new_field = PositiveIntegerField(null=True, blank=True, default=new_default)
new_field.set_attributes_from_name('height')
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True)
def test_add_textfield_unhashable_default(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name='Anonymous1')
# Create a field that has an unhashable default
new_field = TextField(default={})
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_add_indexed_charfield(self):
field = CharField(max_length=255, db_index=True)
field.set_attributes_from_name('nom_de_plume')
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, 'nom_de_plume'),
['schema_author_nom_de_plume_7570a851', 'schema_author_nom_de_plume_7570a851_like'],
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_add_unique_charfield(self):
field = CharField(max_length=255, unique=True)
field.set_attributes_from_name('nom_de_plume')
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, 'nom_de_plume'),
['schema_author_nom_de_plume_7570a851_like', 'schema_author_nom_de_plume_key']
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_index_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Author._meta.get_field('name')
new_field = CharField(max_length=255, db_index=True)
new_field.set_attributes_from_name('name')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, 'name'),
['schema_author_name_1fbc5617', 'schema_author_name_1fbc5617_like']
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
# Alter to add unique=True and create 2 indexes.
old_field = Author._meta.get_field('name')
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name('name')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, 'name'),
['schema_author_name_1fbc5617_like', 'schema_author_name_1fbc5617_uniq']
)
# Remove unique=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, 'name'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_index_to_textfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Note)
self.assertEqual(self.get_constraints_for_column(Note, 'info'), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Note._meta.get_field('info')
new_field = TextField(db_index=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Note, 'info'),
['schema_note_info_4b0ea695', 'schema_note_info_4b0ea695_like']
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Note, 'info'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield_with_db_index(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field('title')
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq']
)
# Alter to remove unique=True (should drop unique index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_remove_unique_and_db_index_from_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field('title')
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq']
)
# Alter to remove both unique=True and db_index=True (should drop all indexes)
new_field2 = CharField(max_length=100)
new_field2.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(self.get_constraints_for_column(BookWithoutAuthor, 'title'), [])
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_swap_unique_and_db_index_with_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
# Alter to set unique=True and remove db_index=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field('title')
new_field = CharField(max_length=100, unique=True)
new_field.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq']
)
# Alter to set db_index=True and remove unique=True (should restore index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name('title')
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, 'title'),
['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like']
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific")
def test_alter_field_add_db_index_to_charfield_with_unique(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Tag)
self.assertEqual(
self.get_constraints_for_column(Tag, 'slug'),
['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key']
)
# Alter to add db_index=True
old_field = Tag._meta.get_field('slug')
new_field = SlugField(db_index=True, unique=True)
new_field.set_attributes_from_name('slug')
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, 'slug'),
['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key']
)
# Alter to remove db_index=True
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name('slug')
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, 'slug'),
['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key']
)
def test_alter_field_add_index_to_integerfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, 'weight'), [])
# Alter to add db_index=True and create index.
old_field = Author._meta.get_field('weight')
new_field = IntegerField(null=True, db_index=True)
new_field.set_attributes_from_name('weight')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, 'weight'), ['schema_author_weight_587740f9'])
# Remove db_index=True to drop index.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, 'weight'), [])
def test_alter_pk_with_self_referential_field(self):
"""
Changing the primary key field name of a model with a self-referential
foreign key (#26384).
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field('node_id')
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name('id')
with connection.schema_editor() as editor:
editor.alter_field(Node, old_field, new_field, strict=True)
self.assertForeignKeyExists(Node, 'parent_id', Node._meta.db_table)
@mock.patch('django.db.backends.base.schema.datetime')
@mock.patch('django.db.backends.base.schema.timezone')
def test_add_datefield_and_datetimefield_use_effective_default(self, mocked_datetime, mocked_tz):
"""
effective_default() should be used for DateField, DateTimeField, and
TimeField if auto_now or auto_add_now is set (#25005).
"""
now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1)
now_tz = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1, tzinfo=timezone.utc)
mocked_datetime.now = mock.MagicMock(return_value=now)
mocked_tz.now = mock.MagicMock(return_value=now_tz)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check auto_now/auto_now_add attributes are not defined
columns = self.column_classes(Author)
self.assertNotIn("dob_auto_now", columns)
self.assertNotIn("dob_auto_now_add", columns)
self.assertNotIn("dtob_auto_now", columns)
self.assertNotIn("dtob_auto_now_add", columns)
self.assertNotIn("tob_auto_now", columns)
self.assertNotIn("tob_auto_now_add", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Ensure fields were added with the correct defaults
dob_auto_now = DateField(auto_now=True)
dob_auto_now.set_attributes_from_name('dob_auto_now')
self.check_added_field_default(
editor, Author, dob_auto_now, 'dob_auto_now', now.date(),
cast_function=lambda x: x.date(),
)
dob_auto_now_add = DateField(auto_now_add=True)
dob_auto_now_add.set_attributes_from_name('dob_auto_now_add')
self.check_added_field_default(
editor, Author, dob_auto_now_add, 'dob_auto_now_add', now.date(),
cast_function=lambda x: x.date(),
)
dtob_auto_now = DateTimeField(auto_now=True)
dtob_auto_now.set_attributes_from_name('dtob_auto_now')
self.check_added_field_default(
editor, Author, dtob_auto_now, 'dtob_auto_now', now,
)
dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True)
dt_tm_of_birth_auto_now_add.set_attributes_from_name('dtob_auto_now_add')
self.check_added_field_default(
editor, Author, dt_tm_of_birth_auto_now_add, 'dtob_auto_now_add', now,
)
tob_auto_now = TimeField(auto_now=True)
tob_auto_now.set_attributes_from_name('tob_auto_now')
self.check_added_field_default(
editor, Author, tob_auto_now, 'tob_auto_now', now.time(),
cast_function=lambda x: x.time(),
)
tob_auto_now_add = TimeField(auto_now_add=True)
tob_auto_now_add.set_attributes_from_name('tob_auto_now_add')
self.check_added_field_default(
editor, Author, tob_auto_now_add, 'tob_auto_now_add', now.time(),
cast_function=lambda x: x.time(),
)
def test_namespaced_db_table_create_index_name(self):
"""
Table names are stripped of their namespace/schema before being used to
generate index names.
"""
with connection.schema_editor() as editor:
max_name_length = connection.ops.max_name_length() or 200
namespace = 'n' * max_name_length
table_name = 't' * max_name_length
namespaced_table_name = '"%s"."%s"' % (namespace, table_name)
self.assertEqual(
editor._create_index_name(table_name, []),
editor._create_index_name(namespaced_table_name, []),
)
@unittest.skipUnless(connection.vendor == 'oracle', 'Oracle specific db_table syntax')
def test_creation_with_db_table_double_quotes(self):
oracle_user = connection.creation._test_database_user()
class Student(Model):
name = CharField(max_length=30)
class Meta:
app_label = 'schema'
apps = new_apps
db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user
class Document(Model):
name = CharField(max_length=30)
students = ManyToManyField(Student)
class Meta:
app_label = 'schema'
apps = new_apps
db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user
self.local_models = [Student, Document]
with connection.schema_editor() as editor:
editor.create_model(Student)
editor.create_model(Document)
doc = Document.objects.create(name='Test Name')
student = Student.objects.create(name='Some man')
doc.students.add(student)
def test_rename_table_renames_deferred_sql_references(self):
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.alter_db_table(Author, 'schema_author', 'schema_renamed_author')
editor.alter_db_table(Author, 'schema_book', 'schema_renamed_book')
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_table('schema_author'), False)
self.assertIs(statement.references_table('schema_book'), False)
@unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.')
def test_rename_column_renames_deferred_sql_references(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_title = Book._meta.get_field('title')
new_title = CharField(max_length=100, db_index=True)
new_title.set_attributes_from_name('renamed_title')
editor.alter_field(Book, old_title, new_title)
old_author = Book._meta.get_field('author')
new_author = ForeignKey(Author, CASCADE)
new_author.set_attributes_from_name('renamed_author')
editor.alter_field(Book, old_author, new_author)
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_column('book', 'title'), False)
self.assertIs(statement.references_column('book', 'author_id'), False)
@isolate_apps('schema')
def test_referenced_field_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the field
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = 'schema'
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field='field', db_constraint=False)
class Meta:
app_label = 'schema'
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name('renamed')
with connection.schema_editor(atomic=True) as editor:
editor.alter_field(Foo, Foo._meta.get_field('field'), new_field)
@isolate_apps('schema')
def test_referenced_table_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the table
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = 'schema'
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field='field', db_constraint=False)
class Meta:
app_label = 'schema'
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name('renamed')
with connection.schema_editor(atomic=True) as editor:
editor.alter_db_table(Foo, Foo._meta.db_table, 'renamed_table')
Foo._meta.db_table = 'renamed_table'
|
9897e244eef22cd7ed7f8bb6ac687db2b1a98e918ad54787e96a09a431cf1930 | """
A series of tests to establish that the command-line management tools work as
advertised - especially with regards to the handling of the
DJANGO_SETTINGS_MODULE and default settings.py files.
"""
import os
import re
import shutil
import socket
import subprocess
import sys
import tempfile
import unittest
from io import StringIO
from unittest import mock
import django
from django import conf, get_version
from django.conf import settings
from django.core.management import (
BaseCommand, CommandError, call_command, color,
)
from django.core.management.commands.loaddata import Command as LoaddataCommand
from django.core.management.commands.runserver import (
Command as RunserverCommand,
)
from django.core.management.commands.testserver import (
Command as TestserverCommand,
)
from django.db import ConnectionHandler, connection
from django.db.migrations.recorder import MigrationRecorder
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
custom_templates_dir = os.path.join(os.path.dirname(__file__), 'custom_templates')
SYSTEM_CHECK_MSG = 'System check identified no issues'
class AdminScriptTestCase(SimpleTestCase):
def setUp(self):
tmpdir = tempfile.TemporaryDirectory()
self.addCleanup(tmpdir.cleanup)
self.test_dir = os.path.join(tmpdir.name, 'test_project')
os.mkdir(self.test_dir)
with open(os.path.join(self.test_dir, '__init__.py'), 'w'):
pass
def write_settings(self, filename, apps=None, is_dir=False, sdict=None, extra=None):
if is_dir:
settings_dir = os.path.join(self.test_dir, filename)
os.mkdir(settings_dir)
settings_file_path = os.path.join(settings_dir, '__init__.py')
else:
settings_file_path = os.path.join(self.test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# Settings file automatically generated by admin_scripts test case\n')
if extra:
settings_file.write("%s\n" % extra)
exports = [
'DATABASES',
'ROOT_URLCONF',
'SECRET_KEY',
]
for s in exports:
if hasattr(settings, s):
o = getattr(settings, s)
if not isinstance(o, (dict, tuple, list)):
o = "'%s'" % o
settings_file.write("%s = %s\n" % (s, o))
if apps is None:
apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts']
settings_file.write("INSTALLED_APPS = %s\n" % apps)
if sdict:
for k, v in sdict.items():
settings_file.write("%s = %s\n" % (k, v))
def _ext_backend_paths(self):
"""
Returns the paths for any external backend packages.
"""
paths = []
for backend in settings.DATABASES.values():
package = backend['ENGINE'].split('.')[0]
if package != 'django':
backend_pkg = __import__(package)
backend_dir = os.path.dirname(backend_pkg.__file__)
paths.append(os.path.dirname(backend_dir))
return paths
def run_test(self, script, args, settings_file=None, apps=None):
base_dir = os.path.dirname(self.test_dir)
# The base dir for Django's tests is one level up.
tests_dir = os.path.dirname(os.path.dirname(__file__))
# The base dir for Django is one level above the test dir. We don't use
# `import django` to figure that out, so we don't pick up a Django
# from site-packages or similar.
django_dir = os.path.dirname(tests_dir)
ext_backend_base_dirs = self._ext_backend_paths()
# Define a temporary environment for the subprocess
test_environ = os.environ.copy()
# Set the test environment
if settings_file:
test_environ['DJANGO_SETTINGS_MODULE'] = settings_file
elif 'DJANGO_SETTINGS_MODULE' in test_environ:
del test_environ['DJANGO_SETTINGS_MODULE']
python_path = [base_dir, django_dir, tests_dir]
python_path.extend(ext_backend_base_dirs)
test_environ['PYTHONPATH'] = os.pathsep.join(python_path)
test_environ['PYTHONWARNINGS'] = ''
return subprocess.Popen(
[sys.executable, script] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=self.test_dir,
env=test_environ, universal_newlines=True,
).communicate()
def run_django_admin(self, args, settings_file=None):
script_dir = os.path.abspath(os.path.join(os.path.dirname(django.__file__), 'bin'))
return self.run_test(os.path.join(script_dir, 'django-admin.py'), args, settings_file)
def run_manage(self, args, settings_file=None, configured_settings=False):
template_manage_py = (
os.path.join(os.path.dirname(__file__), 'configured_settings_manage.py')
if configured_settings else
os.path.join(os.path.dirname(conf.__file__), 'project_template', 'manage.py-tpl')
)
test_manage_py = os.path.join(self.test_dir, 'manage.py')
shutil.copyfile(template_manage_py, test_manage_py)
with open(test_manage_py) as fp:
manage_py_contents = fp.read()
manage_py_contents = manage_py_contents.replace(
"{{ project_name }}", "test_project")
with open(test_manage_py, 'w') as fp:
fp.write(manage_py_contents)
return self.run_test('./manage.py', args, settings_file)
def assertNoOutput(self, stream):
"Utility assertion: assert that the given stream is empty"
self.assertEqual(len(stream), 0, "Stream should be empty: actually contains '%s'" % stream)
def assertOutput(self, stream, msg, regex=False):
"Utility assertion: assert that the given message exists in the output"
if regex:
self.assertIsNotNone(
re.search(msg, stream),
"'%s' does not match actual output text '%s'" % (msg, stream)
)
else:
self.assertIn(msg, stream, "'%s' does not match actual output text '%s'" % (msg, stream))
def assertNotInOutput(self, stream, msg):
"Utility assertion: assert that the given message doesn't exist in the output"
self.assertNotIn(msg, stream, "'%s' matches actual output text '%s'" % (msg, stream))
##########################################################################
# DJANGO ADMIN TESTS
# This first series of test classes checks the environment processing
# of the django-admin.py script
##########################################################################
class DjangoAdminNoSettings(AdminScriptTestCase):
"A series of tests for django-admin.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_commands_with_invalid_settings(self):
""""
Commands that don't require settings succeed if the settings file
doesn't exist.
"""
args = ['startproject']
out, err = self.run_django_admin(args, settings_file='bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "You must provide a project name", regex=True)
class DjangoAdminDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
super().setUp()
self.write_settings('settings.py')
def test_builtin_command(self):
"default: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"default: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"default: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"default: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"default: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"default: django-admin can't execute user commands if it isn't provided settings"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"default: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"default: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
super().setUp()
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes',
'admin_scripts', 'admin_scripts.complex_app'])
def test_builtin_command(self):
"fulldefault: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"fulldefault: django-admin builtin commands succeed if a settings file is provided"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"fulldefault: django-admin builtin commands succeed if the environment contains settings"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"fulldefault: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"fulldefault: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"fulldefault: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminMinimalSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
super().setUp()
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def test_builtin_command(self):
"minimal: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"minimal: django-admin builtin commands fail if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_environment(self):
"minimal: django-admin builtin commands fail if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_bad_settings(self):
"minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"minimal: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class DjangoAdminAlternateSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
super().setUp()
self.write_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminMultipleSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
super().setUp()
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminSettingsDirectory(AdminScriptTestCase):
"""
A series of tests for django-admin.py when the settings file is in a
directory. (see #9751).
"""
def setUp(self):
super().setUp()
self.write_settings('settings', is_dir=True)
def test_setup_environ(self):
"directory: startapp creates the correct directory"
args = ['startapp', 'settings_test']
app_path = os.path.join(self.test_dir, 'settings_test')
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
with open(os.path.join(app_path, 'apps.py')) as f:
content = f.read()
self.assertIn("class SettingsTestConfig(AppConfig)", content)
self.assertIn("name = 'settings_test'", content)
def test_setup_environ_custom_template(self):
"directory: startapp creates the correct directory with a custom template"
template_path = os.path.join(custom_templates_dir, 'app_template')
args = ['startapp', '--template', template_path, 'custom_settings_test']
app_path = os.path.join(self.test_dir, 'custom_settings_test')
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
self.assertTrue(os.path.exists(os.path.join(app_path, 'api.py')))
def test_startapp_unicode_name(self):
"directory: startapp creates the correct directory with unicode characters"
args = ['startapp', 'こんにちは']
app_path = os.path.join(self.test_dir, 'こんにちは')
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
with open(os.path.join(app_path, 'apps.py'), encoding='utf8') as f:
content = f.read()
self.assertIn("class こんにちはConfig(AppConfig)", content)
self.assertIn("name = 'こんにちは'", content)
def test_builtin_command(self):
"directory: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"directory: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"directory: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"directory: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_builtin_with_settings(self):
"directory: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"directory: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
##########################################################################
# MANAGE.PY TESTS
# This next series of test classes checks the environment processing
# of the generated manage.py script
##########################################################################
class ManageNoSettings(AdminScriptTestCase):
"A series of tests for manage.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, r"No module named '?(test_project\.)?settings'?", regex=True)
def test_builtin_with_bad_settings(self):
"no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
class ManageDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
super().setUp()
self.write_settings('settings.py')
def test_builtin_command(self):
"default: manage.py builtin commands succeed when default settings are appropriate"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_settings(self):
"default: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"default: manage.py builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"default: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"default: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"default: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_settings(self):
"default: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"default: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
super().setUp()
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts'])
def test_builtin_command(self):
"fulldefault: manage.py builtin commands succeed when default settings are appropriate"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_settings(self):
"fulldefault: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"fulldefault: manage.py builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"fulldefault: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_settings(self):
"fulldefault: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"fulldefault: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageMinimalSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
super().setUp()
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def test_builtin_command(self):
"minimal: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_settings(self):
"minimal: manage.py builtin commands fail if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_environment(self):
"minimal: manage.py builtin commands fail if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_bad_settings(self):
"minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"minimal: manage.py can't execute user commands without appropriate settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: manage.py can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: manage.py can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class ManageAlternateSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
super().setUp()
self.write_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: manage.py builtin commands fail with an error when no default settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, r"No module named '?(test_project\.)?settings'?", regex=True)
def test_builtin_with_settings(self):
"alternate: manage.py builtin commands work with settings provided as argument"
args = ['check', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertOutput(out, SYSTEM_CHECK_MSG)
self.assertNoOutput(err)
def test_builtin_with_environment(self):
"alternate: manage.py builtin commands work if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
self.assertOutput(out, SYSTEM_CHECK_MSG)
self.assertNoOutput(err)
def test_builtin_with_bad_settings(self):
"alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: manage.py can't execute user commands without settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, r"No module named '?(test_project\.)?settings'?", regex=True)
def test_custom_command_with_settings(self):
"alternate: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertOutput(
out,
"EXECUTE: noargs_command options=[('force_color', False), "
"('no_color', False), ('pythonpath', None), ('settings', "
"'alternate_settings'), ('traceback', False), ('verbosity', 1)]"
)
self.assertNoOutput(err)
def test_custom_command_with_environment(self):
"alternate: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertOutput(
out,
"EXECUTE: noargs_command options=[('force_color', False), "
"('no_color', False), ('pythonpath', None), ('settings', None), "
"('traceback', False), ('verbosity', 1)]"
)
self.assertNoOutput(err)
def test_custom_command_output_color(self):
"alternate: manage.py output syntax color can be deactivated with the `--no-color` option"
args = ['noargs_command', '--no-color', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertOutput(
out,
"EXECUTE: noargs_command options=[('force_color', False), "
"('no_color', True), ('pythonpath', None), ('settings', "
"'alternate_settings'), ('traceback', False), ('verbosity', 1)]"
)
self.assertNoOutput(err)
class ManageMultipleSettings(AdminScriptTestCase):
"""A series of tests for manage.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
super().setUp()
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def test_builtin_command(self):
"multiple: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_settings(self):
"multiple: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"multiple: manage.py can execute builtin commands if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"multiple: manage.py can't execute user commands using default settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"multiple: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"multiple: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageSettingsWithSettingsErrors(AdminScriptTestCase):
"""
Tests for manage.py when using the default settings.py file containing
runtime errors.
"""
def write_settings_with_import_error(self, filename):
settings_file_path = os.path.join(self.test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# Settings file automatically generated by admin_scripts test case\n')
settings_file.write('# The next line will cause an import error:\nimport foo42bar\n')
def test_import_error(self):
"""
import error: manage.py builtin commands shows useful diagnostic info
when settings with import errors is provided (#14130).
"""
self.write_settings_with_import_error('settings.py')
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named")
self.assertOutput(err, "foo42bar")
def test_attribute_error(self):
"""
manage.py builtin commands does not swallow attribute error due to bad
settings (#18845).
"""
self.write_settings('settings.py', sdict={'BAD_VAR': 'INSTALLED_APPS.crash'})
args = ['collectstatic', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "AttributeError: 'list' object has no attribute 'crash'")
def test_key_error(self):
self.write_settings('settings.py', sdict={'BAD_VAR': 'DATABASES["blah"]'})
args = ['collectstatic', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "KeyError: 'blah'")
def test_help(self):
"""
Test listing available commands output note when only core commands are
available.
"""
self.write_settings(
'settings.py',
extra='from django.core.exceptions import ImproperlyConfigured\n'
'raise ImproperlyConfigured()',
)
args = ['help']
out, err = self.run_manage(args)
self.assertOutput(out, 'only Django core commands are listed')
self.assertNoOutput(err)
class ManageCheck(AdminScriptTestCase):
def test_nonexistent_app(self):
"""check reports an error on a nonexistent app in INSTALLED_APPS."""
self.write_settings(
'settings.py',
apps=['admin_scriptz.broken_app'],
sdict={'USE_I18N': False},
)
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'ModuleNotFoundError')
self.assertOutput(err, 'No module named')
self.assertOutput(err, 'admin_scriptz')
def test_broken_app(self):
""" manage.py check reports an ImportError if an app's models.py
raises one on import """
self.write_settings('settings.py', apps=['admin_scripts.broken_app'])
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'ImportError')
def test_complex_app(self):
""" manage.py check does not raise an ImportError validating a
complex app with nested calls to load_app """
self.write_settings(
'settings.py',
apps=[
'admin_scripts.complex_app',
'admin_scripts.simple_app',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
],
sdict={
'DEBUG': True,
'MIDDLEWARE': [
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
],
'TEMPLATES': [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
],
}
)
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertEqual(out, 'System check identified no issues (0 silenced).\n')
def test_app_with_import(self):
""" manage.py check does not raise errors when an app imports a base
class that itself has an abstract base. """
self.write_settings(
'settings.py',
apps=[
'admin_scripts.app_with_import',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites',
],
sdict={'DEBUG': True},
)
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertEqual(out, 'System check identified no issues (0 silenced).\n')
def test_output_format(self):
""" All errors/warnings should be sorted by level and by message. """
self.write_settings(
'settings.py',
apps=[
'admin_scripts.app_raising_messages',
'django.contrib.auth',
'django.contrib.contenttypes',
],
sdict={'DEBUG': True},
)
args = ['check']
out, err = self.run_manage(args)
expected_err = (
"SystemCheckError: System check identified some issues:\n"
"\n"
"ERRORS:\n"
"?: An error\n"
"\tHINT: Error hint\n"
"\n"
"WARNINGS:\n"
"a: Second warning\n"
"obj: First warning\n"
"\tHINT: Hint\n"
"\n"
"System check identified 3 issues (0 silenced).\n"
)
self.assertEqual(err, expected_err)
self.assertNoOutput(out)
def test_warning_does_not_halt(self):
"""
When there are only warnings or less serious messages, then Django
should not prevent user from launching their project, so `check`
command should not raise `CommandError` exception.
In this test we also test output format.
"""
self.write_settings(
'settings.py',
apps=[
'admin_scripts.app_raising_warning',
'django.contrib.auth',
'django.contrib.contenttypes',
],
sdict={'DEBUG': True},
)
args = ['check']
out, err = self.run_manage(args)
expected_err = (
"System check identified some issues:\n" # No "CommandError: " part
"\n"
"WARNINGS:\n"
"?: A warning\n"
"\n"
"System check identified 1 issue (0 silenced).\n"
)
self.assertEqual(err, expected_err)
self.assertNoOutput(out)
class ManageRunserver(SimpleTestCase):
def setUp(self):
def monkey_run(*args, **options):
return
self.output = StringIO()
self.cmd = RunserverCommand(stdout=self.output)
self.cmd.run = monkey_run
def assertServerSettings(self, addr, port, ipv6=False, raw_ipv6=False):
self.assertEqual(self.cmd.addr, addr)
self.assertEqual(self.cmd.port, port)
self.assertEqual(self.cmd.use_ipv6, ipv6)
self.assertEqual(self.cmd._raw_ipv6, raw_ipv6)
def test_runserver_addrport(self):
call_command(self.cmd)
self.assertServerSettings('127.0.0.1', '8000')
call_command(self.cmd, addrport="1.2.3.4:8000")
self.assertServerSettings('1.2.3.4', '8000')
call_command(self.cmd, addrport="7000")
self.assertServerSettings('127.0.0.1', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_addrport_ipv6(self):
call_command(self.cmd, addrport="", use_ipv6=True)
self.assertServerSettings('::1', '8000', ipv6=True, raw_ipv6=True)
call_command(self.cmd, addrport="7000", use_ipv6=True)
self.assertServerSettings('::1', '7000', ipv6=True, raw_ipv6=True)
call_command(self.cmd, addrport="[2001:0db8:1234:5678::9]:7000")
self.assertServerSettings('2001:0db8:1234:5678::9', '7000', ipv6=True, raw_ipv6=True)
def test_runner_hostname(self):
call_command(self.cmd, addrport="localhost:8000")
self.assertServerSettings('localhost', '8000')
call_command(self.cmd, addrport="test.domain.local:7000")
self.assertServerSettings('test.domain.local', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_hostname_ipv6(self):
call_command(self.cmd, addrport="test.domain.local:7000", use_ipv6=True)
self.assertServerSettings('test.domain.local', '7000', ipv6=True)
def test_runner_custom_defaults(self):
self.cmd.default_addr = '0.0.0.0'
self.cmd.default_port = '5000'
call_command(self.cmd)
self.assertServerSettings('0.0.0.0', '5000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_custom_defaults_ipv6(self):
self.cmd.default_addr_ipv6 = '::'
call_command(self.cmd, use_ipv6=True)
self.assertServerSettings('::', '8000', ipv6=True, raw_ipv6=True)
def test_runner_ambiguous(self):
# Only 4 characters, all of which could be in an ipv6 address
call_command(self.cmd, addrport="beef:7654")
self.assertServerSettings('beef', '7654')
# Uses only characters that could be in an ipv6 address
call_command(self.cmd, addrport="deadbeef:7654")
self.assertServerSettings('deadbeef', '7654')
def test_no_database(self):
"""
Ensure runserver.check_migrations doesn't choke on empty DATABASES.
"""
tested_connections = ConnectionHandler({})
with mock.patch('django.core.management.base.connections', new=tested_connections):
self.cmd.check_migrations()
def test_readonly_database(self):
"""
runserver.check_migrations() doesn't choke when a database is read-only.
"""
with mock.patch.object(MigrationRecorder, 'has_table', return_value=False):
self.cmd.check_migrations()
# You have # ...
self.assertIn('unapplied migration(s)', self.output.getvalue())
class ManageRunserverMigrationWarning(TestCase):
def setUp(self):
self.stdout = StringIO()
self.runserver_command = RunserverCommand(stdout=self.stdout)
@override_settings(INSTALLED_APPS=["admin_scripts.app_waiting_migration"])
def test_migration_warning_one_app(self):
self.runserver_command.check_migrations()
output = self.stdout.getvalue()
self.assertIn('You have 1 unapplied migration(s)', output)
self.assertIn('apply the migrations for app(s): app_waiting_migration.', output)
@override_settings(
INSTALLED_APPS=[
"admin_scripts.app_waiting_migration",
"admin_scripts.another_app_waiting_migration",
],
)
def test_migration_warning_multiple_apps(self):
self.runserver_command.check_migrations()
output = self.stdout.getvalue()
self.assertIn('You have 2 unapplied migration(s)', output)
self.assertIn(
'apply the migrations for app(s): another_app_waiting_migration, '
'app_waiting_migration.', output
)
class ManageRunserverEmptyAllowedHosts(AdminScriptTestCase):
def setUp(self):
super().setUp()
self.write_settings('settings.py', sdict={
'ALLOWED_HOSTS': [],
'DEBUG': False,
})
def test_empty_allowed_hosts_error(self):
out, err = self.run_manage(['runserver'])
self.assertNoOutput(out)
self.assertOutput(err, 'CommandError: You must set settings.ALLOWED_HOSTS if DEBUG is False.')
class ManageTestserver(SimpleTestCase):
@mock.patch.object(TestserverCommand, 'handle', return_value='')
def test_testserver_handle_params(self, mock_handle):
out = StringIO()
call_command('testserver', 'blah.json', stdout=out)
mock_handle.assert_called_with(
'blah.json',
stdout=out, settings=None, pythonpath=None, verbosity=1,
traceback=False, addrport='', no_color=False, use_ipv6=False,
skip_checks=True, interactive=True, force_color=False,
)
@mock.patch('django.db.connection.creation.create_test_db', return_value='test_db')
@mock.patch.object(LoaddataCommand, 'handle', return_value='')
@mock.patch.object(RunserverCommand, 'handle', return_value='')
def test_params_to_runserver(self, mock_runserver_handle, mock_loaddata_handle, mock_create_test_db):
out = StringIO()
call_command('testserver', 'blah.json', stdout=out)
mock_runserver_handle.assert_called_with(
addrport='',
force_color=False,
insecure_serving=False,
no_color=False,
pythonpath=None,
settings=None,
shutdown_message=(
"\nServer stopped.\nNote that the test database, 'test_db', "
"has not been deleted. You can explore it on your own."
),
skip_checks=True,
traceback=False,
use_ipv6=False,
use_reloader=False,
use_static_handler=True,
use_threading=connection.features.test_db_allows_multiple_connections,
verbosity=1,
)
##########################################################################
# COMMAND PROCESSING TESTS
# user-space commands are correctly handled - in particular, arguments to
# the commands are correctly parsed and processed.
##########################################################################
class ColorCommand(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stdout.write('Hello, world!', self.style.ERROR)
self.stderr.write('Hello, world!', self.style.ERROR)
class CommandTypes(AdminScriptTestCase):
"Tests for the various types of base command types that can be defined."
def setUp(self):
super().setUp()
self.write_settings('settings.py')
def test_version(self):
"version is handled as a special case"
args = ['version']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, get_version())
def test_version_alternative(self):
"--version is equivalent to version"
args1, args2 = ['version'], ['--version']
# It's possible one outputs on stderr and the other on stdout, hence the set
self.assertEqual(set(self.run_manage(args1)), set(self.run_manage(args2)))
def test_help(self):
"help is handled as a special case"
args = ['help']
out, err = self.run_manage(args)
self.assertOutput(out, "Type 'manage.py help <subcommand>' for help on a specific subcommand.")
self.assertOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
def test_help_commands(self):
"help --commands shows the list of all available commands"
args = ['help', '--commands']
out, err = self.run_manage(args)
self.assertNotInOutput(out, 'usage:')
self.assertNotInOutput(out, 'Options:')
self.assertNotInOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
self.assertNotInOutput(out, '\n\n')
def test_help_alternative(self):
"--help is equivalent to help"
args1, args2 = ['help'], ['--help']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_help_short_altert(self):
"-h is handled as a short form of --help"
args1, args2 = ['--help'], ['-h']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_specific_help(self):
"--help can be used on a specific command"
args = ['check', '--help']
out, err = self.run_manage(args)
self.assertNoOutput(err)
# Command-specific options like --tag appear before options common to
# all commands like --version.
tag_location = out.find('--tag')
version_location = out.find('--version')
self.assertNotEqual(tag_location, -1)
self.assertNotEqual(version_location, -1)
self.assertLess(tag_location, version_location)
self.assertOutput(out, "Checks the entire Django project for potential problems.")
def test_color_style(self):
style = color.no_style()
self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!')
style = color.make_style('nocolor')
self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!')
style = color.make_style('dark')
self.assertIn('Hello, world!', style.ERROR('Hello, world!'))
self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!')
# Default palette has color.
style = color.make_style('')
self.assertIn('Hello, world!', style.ERROR('Hello, world!'))
self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!')
def test_command_color(self):
out = StringIO()
err = StringIO()
command = ColorCommand(stdout=out, stderr=err)
call_command(command)
if color.supports_color():
self.assertIn('Hello, world!\n', out.getvalue())
self.assertIn('Hello, world!\n', err.getvalue())
self.assertNotEqual(out.getvalue(), 'Hello, world!\n')
self.assertNotEqual(err.getvalue(), 'Hello, world!\n')
else:
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
def test_command_no_color(self):
"--no-color prevent colorization of the output"
out = StringIO()
err = StringIO()
command = ColorCommand(stdout=out, stderr=err, no_color=True)
call_command(command)
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
out = StringIO()
err = StringIO()
command = ColorCommand(stdout=out, stderr=err)
call_command(command, no_color=True)
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
def test_force_color_execute(self):
out = StringIO()
err = StringIO()
with mock.patch.object(sys.stdout, 'isatty', lambda: False):
command = ColorCommand(stdout=out, stderr=err)
call_command(command, force_color=True)
self.assertEqual(out.getvalue(), '\x1b[31;1mHello, world!\n\x1b[0m')
self.assertEqual(err.getvalue(), '\x1b[31;1mHello, world!\n\x1b[0m')
def test_force_color_command_init(self):
out = StringIO()
err = StringIO()
with mock.patch.object(sys.stdout, 'isatty', lambda: False):
command = ColorCommand(stdout=out, stderr=err, force_color=True)
call_command(command)
self.assertEqual(out.getvalue(), '\x1b[31;1mHello, world!\n\x1b[0m')
self.assertEqual(err.getvalue(), '\x1b[31;1mHello, world!\n\x1b[0m')
def test_no_color_force_color_mutually_exclusive_execute(self):
msg = "The --no-color and --force-color options can't be used together."
with self.assertRaisesMessage(CommandError, msg):
call_command(BaseCommand(), no_color=True, force_color=True)
def test_no_color_force_color_mutually_exclusive_command_init(self):
msg = "'no_color' and 'force_color' can't be used together."
with self.assertRaisesMessage(CommandError, msg):
call_command(BaseCommand(no_color=True, force_color=True))
def test_custom_stdout(self):
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stdout.write("Hello, World!")
out = StringIO()
command = Command(stdout=out)
call_command(command)
self.assertEqual(out.getvalue(), "Hello, World!\n")
out.truncate(0)
new_out = StringIO()
call_command(command, stdout=new_out)
self.assertEqual(out.getvalue(), "")
self.assertEqual(new_out.getvalue(), "Hello, World!\n")
def test_custom_stderr(self):
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stderr.write("Hello, World!")
err = StringIO()
command = Command(stderr=err)
call_command(command)
self.assertEqual(err.getvalue(), "Hello, World!\n")
err.truncate(0)
new_err = StringIO()
call_command(command, stderr=new_err)
self.assertEqual(err.getvalue(), "")
self.assertEqual(new_err.getvalue(), "Hello, World!\n")
def test_base_command(self):
"User BaseCommands can execute when a label is provided"
args = ['base_command', 'testlabel']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels)
def test_base_command_no_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command']
expected_labels = "()"
self._test_base_command(args, expected_labels)
def test_base_command_multiple_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command', 'testlabel', 'anotherlabel']
expected_labels = "('testlabel', 'anotherlabel')"
self._test_base_command(args, expected_labels)
def test_base_command_with_option(self):
"User BaseCommands can execute with options when a label is provided"
args = ['base_command', 'testlabel', '--option_a=x']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels, option_a="'x'")
def test_base_command_with_options(self):
"User BaseCommands can execute with multiple options when a label is provided"
args = ['base_command', 'testlabel', '-a', 'x', '--option_b=y']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels, option_a="'x'", option_b="'y'")
def test_base_command_with_wrong_option(self):
"User BaseCommands outputs command usage when wrong option is specified"
args = ['base_command', '--invalid']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "usage: manage.py base_command")
self.assertOutput(err, "error: unrecognized arguments: --invalid")
def _test_base_command(self, args, labels, option_a="'1'", option_b="'2'"):
out, err = self.run_manage(args)
expected_out = (
"EXECUTE:BaseCommand labels=%s, "
"options=[('force_color', False), ('no_color', False), "
"('option_a', %s), ('option_b', %s), ('option_c', '3'), "
"('pythonpath', None), ('settings', None), ('traceback', False), "
"('verbosity', 1)]") % (labels, option_a, option_b)
self.assertNoOutput(err)
self.assertOutput(out, expected_out)
def test_base_run_from_argv(self):
"""
Test run_from_argv properly terminates even with custom execute() (#19665)
Also test proper traceback display.
"""
err = StringIO()
command = BaseCommand(stderr=err)
def raise_command_error(*args, **kwargs):
raise CommandError("Custom error")
command.execute = lambda args: args # This will trigger TypeError
# If the Exception is not CommandError it should always
# raise the original exception.
with self.assertRaises(TypeError):
command.run_from_argv(['', ''])
# If the Exception is CommandError and --traceback is not present
# this command should raise a SystemExit and don't print any
# traceback to the stderr.
command.execute = raise_command_error
err.truncate(0)
with self.assertRaises(SystemExit):
command.run_from_argv(['', ''])
err_message = err.getvalue()
self.assertNotIn("Traceback", err_message)
self.assertIn("CommandError", err_message)
# If the Exception is CommandError and --traceback is present
# this command should raise the original CommandError as if it
# were not a CommandError.
err.truncate(0)
with self.assertRaises(CommandError):
command.run_from_argv(['', '', '--traceback'])
def test_run_from_argv_non_ascii_error(self):
"""
Non-ASCII message of CommandError does not raise any
UnicodeDecodeError in run_from_argv.
"""
def raise_command_error(*args, **kwargs):
raise CommandError("Erreur personnalisée")
command = BaseCommand(stderr=StringIO())
command.execute = raise_command_error
with self.assertRaises(SystemExit):
command.run_from_argv(['', ''])
def test_run_from_argv_closes_connections(self):
"""
A command called from the command line should close connections after
being executed (#21255).
"""
command = BaseCommand(stderr=StringIO())
command.check = lambda: []
command.handle = lambda *args, **kwargs: args
with mock.patch('django.core.management.base.connections') as mock_connections:
command.run_from_argv(['', ''])
# Test connections have been closed
self.assertTrue(mock_connections.close_all.called)
def test_noargs(self):
"NoArg Commands can be executed"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(
out,
"EXECUTE: noargs_command options=[('force_color', False), "
"('no_color', False), ('pythonpath', None), ('settings', None), "
"('traceback', False), ('verbosity', 1)]"
)
def test_noargs_with_args(self):
"NoArg Commands raise an error if an argument is provided"
args = ['noargs_command', 'argument']
out, err = self.run_manage(args)
self.assertOutput(err, "error: unrecognized arguments: argument")
def test_app_command(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'auth']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=")
self.assertOutput(
out,
", options=[('force_color', False), ('no_color', False), "
"('pythonpath', None), ('settings', None), ('traceback', False), "
"('verbosity', 1)]"
)
def test_app_command_no_apps(self):
"User AppCommands raise an error when no app name is provided"
args = ['app_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'error: Enter at least one application label.')
def test_app_command_multiple_apps(self):
"User AppCommands raise an error when multiple app names are provided"
args = ['app_command', 'auth', 'contenttypes']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=")
self.assertOutput(
out,
", options=[('force_color', False), ('no_color', False), "
"('pythonpath', None), ('settings', None), ('traceback', False), "
"('verbosity', 1)]"
)
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.contenttypes, options=")
self.assertOutput(
out,
", options=[('force_color', False), ('no_color', False), "
"('pythonpath', None), ('settings', None), ('traceback', False), "
"('verbosity', 1)]"
)
def test_app_command_invalid_app_label(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.")
def test_app_command_some_invalid_app_labels(self):
"User AppCommands can execute when some of the provided app names are invalid"
args = ['app_command', 'auth', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.")
def test_label_command(self):
"User LabelCommands can execute when a label is provided"
args = ['label_command', 'testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(
out,
"EXECUTE:LabelCommand label=testlabel, options=[('force_color', "
"False), ('no_color', False), ('pythonpath', None), ('settings', "
"None), ('traceback', False), ('verbosity', 1)]"
)
def test_label_command_no_label(self):
"User LabelCommands raise an error if no label is provided"
args = ['label_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Enter at least one label')
def test_label_command_multiple_label(self):
"User LabelCommands are executed multiple times if multiple labels are provided"
args = ['label_command', 'testlabel', 'anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(
out,
"EXECUTE:LabelCommand label=testlabel, options=[('force_color', "
"False), ('no_color', False), ('pythonpath', None), "
"('settings', None), ('traceback', False), ('verbosity', 1)]"
)
self.assertOutput(
out,
"EXECUTE:LabelCommand label=anotherlabel, options=[('force_color', "
"False), ('no_color', False), ('pythonpath', None), "
"('settings', None), ('traceback', False), ('verbosity', 1)]"
)
class Discovery(SimpleTestCase):
def test_precedence(self):
"""
Apps listed first in INSTALLED_APPS have precedence.
"""
with self.settings(INSTALLED_APPS=['admin_scripts.complex_app',
'admin_scripts.simple_app',
'django.contrib.auth',
'django.contrib.contenttypes']):
out = StringIO()
call_command('duplicate', stdout=out)
self.assertEqual(out.getvalue().strip(), 'complex_app')
with self.settings(INSTALLED_APPS=['admin_scripts.simple_app',
'admin_scripts.complex_app',
'django.contrib.auth',
'django.contrib.contenttypes']):
out = StringIO()
call_command('duplicate', stdout=out)
self.assertEqual(out.getvalue().strip(), 'simple_app')
class ArgumentOrder(AdminScriptTestCase):
"""Tests for 2-stage argument parsing scheme.
django-admin command arguments are parsed in 2 parts; the core arguments
(--settings, --traceback and --pythonpath) are parsed using a basic parser,
ignoring any unknown options. Then the full settings are
passed to the command parser, which extracts commands of interest to the
individual command.
"""
def setUp(self):
super().setUp()
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def test_setting_then_option(self):
""" Options passed after settings are correctly handled. """
args = ['base_command', 'testlabel', '--settings=alternate_settings', '--option_a=x']
self._test(args)
def test_setting_then_short_option(self):
""" Short options passed after settings are correctly handled. """
args = ['base_command', 'testlabel', '--settings=alternate_settings', '-a', 'x']
self._test(args)
def test_option_then_setting(self):
""" Options passed before settings are correctly handled. """
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings']
self._test(args)
def test_short_option_then_setting(self):
""" Short options passed before settings are correctly handled. """
args = ['base_command', 'testlabel', '-a', 'x', '--settings=alternate_settings']
self._test(args)
def test_option_then_setting_then_option(self):
""" Options are correctly handled when they are passed before and after
a setting. """
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings', '--option_b=y']
self._test(args, option_b="'y'")
def _test(self, args, option_b="'2'"):
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(
out,
"EXECUTE:BaseCommand labels=('testlabel',), options=["
"('force_color', False), ('no_color', False), ('option_a', 'x'), "
"('option_b', %s), ('option_c', '3'), ('pythonpath', None), "
"('settings', 'alternate_settings'), ('traceback', False), "
"('verbosity', 1)]" % option_b
)
@override_settings(ROOT_URLCONF='admin_scripts.urls')
class StartProject(LiveServerTestCase, AdminScriptTestCase):
available_apps = [
'admin_scripts',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
def test_wrong_args(self):
"Make sure passing the wrong kinds of arguments outputs an error and prints usage"
out, err = self.run_django_admin(['startproject'])
self.assertNoOutput(out)
self.assertOutput(err, "usage:")
self.assertOutput(err, "You must provide a project name.")
def test_simple_project(self):
"Make sure the startproject management command creates a project"
args = ['startproject', 'testproject']
testproject_dir = os.path.join(self.test_dir, 'testproject')
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_invalid_project_name(self):
"Make sure the startproject management command validates a project name"
for bad_name in ('7testproject', '../testproject'):
with self.subTest(project_name=bad_name):
args = ['startproject', bad_name]
testproject_dir = os.path.join(self.test_dir, bad_name)
out, err = self.run_django_admin(args)
self.assertOutput(
err,
"Error: '%s' is not a valid project name. Please make "
"sure the name is a valid identifier." % bad_name
)
self.assertFalse(os.path.exists(testproject_dir))
def test_importable_project_name(self):
"""
startproject validates that project name doesn't clash with existing
Python modules.
"""
bad_name = 'os'
args = ['startproject', bad_name]
testproject_dir = os.path.join(self.test_dir, bad_name)
out, err = self.run_django_admin(args)
self.assertOutput(
err,
"CommandError: 'os' conflicts with the name of an existing "
"Python module and cannot be used as a project name. Please try "
"another name."
)
self.assertFalse(os.path.exists(testproject_dir))
def test_simple_project_different_directory(self):
"Make sure the startproject management command creates a project in a specific directory"
args = ['startproject', 'testproject', 'othertestproject']
testproject_dir = os.path.join(self.test_dir, 'othertestproject')
os.mkdir(testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'manage.py')))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_custom_project_template(self):
"Make sure the startproject management command is able to use a different project template"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(self.test_dir, 'customtestproject')
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_template_dir_with_trailing_slash(self):
"Ticket 17475: Template dir passed has a trailing path separator"
template_path = os.path.join(custom_templates_dir, 'project_template' + os.sep)
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(self.test_dir, 'customtestproject')
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_custom_project_template_from_tarball_by_path(self):
"Make sure the startproject management command is able to use a different project template from a tarball"
template_path = os.path.join(custom_templates_dir, 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject']
testproject_dir = os.path.join(self.test_dir, 'tarballtestproject')
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_to_alternative_location(self):
"Startproject can use a project template from a tarball and create it in a specified location"
template_path = os.path.join(custom_templates_dir, 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject', 'altlocation']
testproject_dir = os.path.join(self.test_dir, 'altlocation')
os.mkdir(testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_by_url(self):
"""
The startproject management command is able to use a different project
template from a tarball via a URL.
"""
template_url = '%s/custom_templates/project_template.tgz' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(self.test_dir, 'urltestproject')
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_project_template_tarball_url(self):
"Startproject management command handles project template tar/zip balls from non-canonical urls"
template_url = '%s/custom_templates/project_template.tgz/' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(self.test_dir, 'urltestproject')
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_file_without_extension(self):
"Make sure the startproject management command is able to render custom files"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject', '-e', 'txt', '-n', 'Procfile']
testproject_dir = os.path.join(self.test_dir, 'customtestproject')
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
base_path = os.path.join(testproject_dir, 'additional_dir')
for f in ('Procfile', 'additional_file.py', 'requirements.txt'):
self.assertTrue(os.path.exists(os.path.join(base_path, f)))
with open(os.path.join(base_path, f)) as fh:
self.assertEqual(fh.read().strip(), '# some file for customtestproject test project')
def test_custom_project_template_context_variables(self):
"Make sure template context variables are rendered with proper values"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'another_project', 'project_dir']
testproject_dir = os.path.join(self.test_dir, 'project_dir')
os.mkdir(testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'manage.py')
with open(test_manage_py) as fp:
content = fp.read()
self.assertIn("project_name = 'another_project'", content)
self.assertIn("project_directory = '%s'" % testproject_dir, content)
def test_no_escaping_of_project_variables(self):
"Make sure template context variables are not html escaped"
# We're using a custom command so we need the alternate settings
self.write_settings('alternate_settings.py')
template_path = os.path.join(custom_templates_dir, 'project_template')
args = [
'custom_startproject', '--template', template_path,
'another_project', 'project_dir', '--extra', '<&>',
'--settings=alternate_settings',
]
testproject_dir = os.path.join(self.test_dir, 'project_dir')
os.mkdir(testproject_dir)
out, err = self.run_manage(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'additional_dir', 'extra.py')
with open(test_manage_py) as fp:
content = fp.read()
self.assertIn("<&>", content)
def test_custom_project_destination_missing(self):
"""
Make sure an exception is raised when the provided
destination directory doesn't exist
"""
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'yet_another_project', 'project_dir2']
testproject_dir = os.path.join(self.test_dir, 'project_dir2')
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Destination directory '%s' does not exist, please create it first." % testproject_dir)
self.assertFalse(os.path.exists(testproject_dir))
def test_custom_project_template_with_non_ascii_templates(self):
"""
The startproject management command is able to render templates with
non-ASCII content.
"""
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, '--extension=txt', 'customtestproject']
testproject_dir = os.path.join(self.test_dir, 'customtestproject')
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
path = os.path.join(testproject_dir, 'ticket-18091-non-ascii-template.txt')
with open(path, encoding='utf-8') as f:
self.assertEqual(f.read().splitlines(False), [
'Some non-ASCII text for testing ticket #18091:',
'üäö €'])
class StartApp(AdminScriptTestCase):
def test_invalid_name(self):
"""startapp validates that app name is a valid Python identifier."""
for bad_name in ('7testproject', '../testproject'):
with self.subTest(app_name=bad_name):
args = ['startapp', bad_name]
testproject_dir = os.path.join(self.test_dir, bad_name)
out, err = self.run_django_admin(args)
self.assertOutput(
err,
"CommandError: '{}' is not a valid app name. Please make "
"sure the name is a valid identifier.".format(bad_name)
)
self.assertFalse(os.path.exists(testproject_dir))
def test_importable_name(self):
"""
startapp validates that app name doesn't clash with existing Python
modules.
"""
bad_name = 'os'
args = ['startapp', bad_name]
testproject_dir = os.path.join(self.test_dir, bad_name)
out, err = self.run_django_admin(args)
self.assertOutput(
err,
"CommandError: 'os' conflicts with the name of an existing "
"Python module and cannot be used as an app name. Please try "
"another name."
)
self.assertFalse(os.path.exists(testproject_dir))
class DiffSettings(AdminScriptTestCase):
"""Tests for diffsettings management command."""
def test_basic(self):
"""Runs without error and emits settings diff."""
self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'})
args = ['diffsettings', '--settings=settings_to_diff']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "FOO = 'bar' ###")
def test_settings_configured(self):
out, err = self.run_manage(['diffsettings'], configured_settings=True)
self.assertNoOutput(err)
self.assertOutput(out, 'CUSTOM = 1 ###\nDEBUG = True')
def test_all(self):
"""The all option also shows settings with the default value."""
self.write_settings('settings_to_diff.py', sdict={'STATIC_URL': 'None'})
args = ['diffsettings', '--settings=settings_to_diff', '--all']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "### STATIC_URL = None")
def test_custom_default(self):
"""
The --default option specifies an alternate settings module for
comparison.
"""
self.write_settings('settings_default.py', sdict={'FOO': '"foo"', 'BAR': '"bar1"'})
self.write_settings('settings_to_diff.py', sdict={'FOO': '"foo"', 'BAR': '"bar2"'})
out, err = self.run_manage(['diffsettings', '--settings=settings_to_diff', '--default=settings_default'])
self.assertNoOutput(err)
self.assertNotInOutput(out, "FOO")
self.assertOutput(out, "BAR = 'bar2'")
def test_unified(self):
"""--output=unified emits settings diff in unified mode."""
self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'})
args = ['diffsettings', '--settings=settings_to_diff', '--output=unified']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "+ FOO = 'bar'")
self.assertOutput(out, "- SECRET_KEY = ''")
self.assertOutput(out, "+ SECRET_KEY = 'django_tests_secret_key'")
self.assertNotInOutput(out, " APPEND_SLASH = True")
def test_unified_all(self):
"""
--output=unified --all emits settings diff in unified mode and includes
settings with the default value.
"""
self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'})
args = ['diffsettings', '--settings=settings_to_diff', '--output=unified', '--all']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, " APPEND_SLASH = True")
self.assertOutput(out, "+ FOO = 'bar'")
self.assertOutput(out, "- SECRET_KEY = ''")
class Dumpdata(AdminScriptTestCase):
"""Tests for dumpdata management command."""
def setUp(self):
super().setUp()
self.write_settings('settings.py')
def test_pks_parsing(self):
"""Regression for #20509
Test would raise an exception rather than printing an error message.
"""
args = ['dumpdata', '--pks=1']
out, err = self.run_manage(args)
self.assertOutput(err, "You can only use --pks option with one model")
self.assertNoOutput(out)
class MainModule(AdminScriptTestCase):
"""python -m django works like django-admin."""
def test_runs_django_admin(self):
cmd_out, _ = self.run_django_admin(['--version'])
mod_out, _ = self.run_test('-m', ['django', '--version'])
self.assertEqual(mod_out, cmd_out)
def test_program_name_in_help(self):
out, err = self.run_test('-m', ['django', 'help'])
self.assertOutput(out, "Type 'python -m django help <subcommand>' for help on a specific subcommand.")
class DjangoAdminSuggestions(AdminScriptTestCase):
def setUp(self):
super().setUp()
self.write_settings('settings.py')
def test_suggestions(self):
args = ['rnserver', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'rnserver'. Did you mean runserver?")
def test_no_suggestions(self):
args = ['abcdef', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertNotInOutput(err, 'Did you mean')
|
c55ebce57e6c1d6f7f84eea19bc2944e29dc19bbf20410cc6f9d418844db10dc | import base64
import hashlib
import os
import shutil
import sys
import tempfile as sys_tempfile
import unittest
from io import BytesIO, StringIO
from urllib.parse import quote
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http.multipartparser import (
MultiPartParser, MultiPartParserError, parse_header,
)
from django.test import SimpleTestCase, TestCase, client, override_settings
from . import uploadhandler
from .models import FileModel
UNICODE_FILENAME = 'test-0123456789_中文_Orléans.jpg'
MEDIA_ROOT = sys_tempfile.mkdtemp()
UPLOAD_TO = os.path.join(MEDIA_ROOT, 'test_upload')
@override_settings(MEDIA_ROOT=MEDIA_ROOT, ROOT_URLCONF='file_uploads.urls', MIDDLEWARE=[])
class FileUploadTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
os.makedirs(MEDIA_ROOT, exist_ok=True)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super().tearDownClass()
def test_simple_upload(self):
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
response = self.client.post('/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".file1") as file1, file(suffix=".file2") as file2:
file1.write(b'a' * (2 ** 21))
file1.seek(0)
file2.write(b'a' * (10 * 2 ** 20))
file2.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': file1,
'file_field2': file2,
}
for key in list(post_data):
try:
post_data[key + '_hash'] = hashlib.sha1(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = hashlib.sha1(post_data[key].encode()).hexdigest()
response = self.client.post('/verify/', post_data)
self.assertEqual(response.status_code, 200)
def _test_base64_upload(self, content, encode=base64.b64encode):
payload = client.FakePayload("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="test.txt"',
'Content-Type: application/octet-stream',
'Content-Transfer-Encoding: base64',
'']))
payload.write(b'\r\n' + encode(content.encode()) + b'\r\n')
payload.write('--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo_content/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.json()['file'], content)
def test_base64_upload(self):
self._test_base64_upload("This data will be transmitted base64-encoded.")
def test_big_base64_upload(self):
self._test_base64_upload("Big data" * 68000) # > 512Kb
def test_big_base64_newlines_upload(self):
self._test_base64_upload("Big data" * 68000, encode=base64.encodebytes)
def test_unicode_file_name(self):
with sys_tempfile.TemporaryDirectory() as temp_dir:
# This file contains Chinese symbols and an accented char in the name.
with open(os.path.join(temp_dir, UNICODE_FILENAME), 'w+b') as file1:
file1.write(b'b' * (2 ** 10))
file1.seek(0)
response = self.client.post('/unicode_name/', {'file_unicode': file1})
self.assertEqual(response.status_code, 200)
def test_unicode_file_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file_unicode"; filename*=UTF-8\'\'%s' % quote(UNICODE_FILENAME),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
]))
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_unicode_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write(
'\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name*=UTF-8\'\'file_unicode; filename*=UTF-8\'\'%s' % quote(
UNICODE_FILENAME
),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
])
)
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_blank_filenames(self):
"""
Receiving file upload when filename is blank (before and after
sanitization) should be okay.
"""
# The second value is normalized to an empty name by
# MultiPartParser.IE_sanitize()
filenames = ['', 'C:\\Windows\\']
payload = client.FakePayload()
for i, name in enumerate(filenames):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
# Empty filenames should be ignored
received = response.json()
for i, name in enumerate(filenames):
self.assertIsNone(received.get('file%s' % i))
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-style.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = client.FakePayload()
for i, name in enumerate(scary_file_names):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
received = response.json()
for i, name in enumerate(scary_file_names):
got = received["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
long_str = 'f' * 300
cases = [
# field name, filename, expected
('long_filename', '%s.txt' % long_str, '%s.txt' % long_str[:251]),
('long_extension', 'foo.%s' % long_str, '.%s' % long_str[:254]),
('no_extension', long_str, long_str[:255]),
('no_filename', '.%s' % long_str, '.%s' % long_str[:254]),
('long_everything', '%s.%s' % (long_str, long_str), '.%s' % long_str[:254]),
]
payload = client.FakePayload()
for name, filename, _ in cases:
payload.write("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="{}"; filename="{}"',
'Content-Type: application/octet-stream',
'',
'Oops.',
''
]).format(name, filename))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
result = response.json()
for name, _, expected in cases:
got = result[name]
self.assertEqual(expected, got, 'Mismatch for {}'.format(name))
self.assertLess(len(got), 256,
"Got a long file name (%s characters)." % len(got))
def test_file_content(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'no content')
no_content_type.seek(0)
simple_file.write(b'text content')
simple_file.seek(0)
simple_file.content_type = 'text/plain'
string_io = StringIO('string content')
bytes_io = BytesIO(b'binary content')
response = self.client.post('/echo_content/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
'string': string_io,
'binary': bytes_io,
})
received = response.json()
self.assertEqual(received['no_content_type'], 'no content')
self.assertEqual(received['simple_file'], 'text content')
self.assertEqual(received['string'], 'string content')
self.assertEqual(received['binary'], 'binary content')
def test_content_type_extra(self):
"""Uploaded files may have content type parameters available."""
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'something')
no_content_type.seek(0)
simple_file.write(b'something')
simple_file.seek(0)
simple_file.content_type = 'text/plain; test-key=test_value'
response = self.client.post('/echo_content_type_extra/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
})
received = response.json()
self.assertEqual(received['no_content_type'], {})
self.assertEqual(received['simple_file'], {'test-key': 'test_value'})
def test_truncated_multipart_handled_gracefully(self):
"""
If passed an incomplete multipart message, MultiPartParser does not
attempt to read beyond the end of the stream, and simply will handle
the part that can be parsed gracefully.
"""
payload_str = "\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="foo.txt"',
'Content-Type: application/octet-stream',
'',
'file contents'
'--' + client.BOUNDARY + '--',
'',
])
payload = client.FakePayload(payload_str[:-10])
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
self.assertEqual(self.client.request(**r).json(), {})
def test_empty_multipart_handled_gracefully(self):
"""
If passed an empty multipart message, MultiPartParser will return
an empty QueryDict.
"""
r = {
'CONTENT_LENGTH': 0,
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(b''),
}
self.assertEqual(self.client.request(**r).json(), {})
def test_custom_upload_handler(self):
file = tempfile.NamedTemporaryFile
with file() as smallfile, file() as bigfile:
# A small file (under the 5M quota)
smallfile.write(b'a' * (2 ** 21))
smallfile.seek(0)
# A big file (over the quota)
bigfile.write(b'a' * (10 * 2 ** 20))
bigfile.seek(0)
# Small file posting should work.
self.assertIn('f', self.client.post('/quota/', {'f': smallfile}).json())
# Large files don't go through.
self.assertNotIn('f', self.client.post("/quota/", {'f': bigfile}).json())
def test_broken_custom_upload_handler(self):
with tempfile.NamedTemporaryFile() as file:
file.write(b'a' * (2 ** 21))
file.seek(0)
# AttributeError: You cannot alter upload handlers after the upload has been processed.
with self.assertRaises(AttributeError):
self.client.post('/quota/broken/', {'f': file})
def test_fileupload_getlist(self):
file = tempfile.NamedTemporaryFile
with file() as file1, file() as file2, file() as file2a:
file1.write(b'a' * (2 ** 23))
file1.seek(0)
file2.write(b'a' * (2 * 2 ** 18))
file2.seek(0)
file2a.write(b'a' * (5 * 2 ** 20))
file2a.seek(0)
response = self.client.post('/getlist_count/', {
'file1': file1,
'field1': 'test',
'field2': 'test3',
'field3': 'test5',
'field4': 'test6',
'field5': 'test7',
'file2': (file2, file2a)
})
got = response.json()
self.assertEqual(got.get('file1'), 1)
self.assertEqual(got.get('file2'), 2)
def test_fileuploads_closed_at_request_end(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/t/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# The files were parsed.
self.assertTrue(hasattr(request, '_files'))
file = request._files['file']
self.assertTrue(file.closed)
files = request._files.getlist('file2')
self.assertTrue(files[0].closed)
self.assertTrue(files[1].closed)
def test_no_parsing_triggered_by_fd_closing(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/f/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# The fd closing logic doesn't trigger parsing of the stream
self.assertFalse(hasattr(request, '_files'))
def test_file_error_blocking(self):
"""
The server should not block when there are upload errors (bug #8622).
This can happen if something -- i.e. an exception handler -- tries to
access POST while handling an error in parsing POST. This shouldn't
cause an infinite loop!
"""
class POSTAccessingHandler(client.ClientHandler):
"""A handler that'll access POST during an exception."""
def handle_uncaught_exception(self, request, resolver, exc_info):
ret = super().handle_uncaught_exception(request, resolver, exc_info)
request.POST # evaluate
return ret
# Maybe this is a little more complicated that it needs to be; but if
# the django.test.client.FakePayload.read() implementation changes then
# this test would fail. So we need to know exactly what kind of error
# it raises when there is an attempt to read more than the available bytes:
try:
client.FakePayload(b'a').read(2)
except Exception as err:
reference_error = err
# install the custom handler that tries to access request.POST
self.client.handler = POSTAccessingHandler()
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
try:
self.client.post('/upload_errors/', post_data)
except reference_error.__class__ as err:
self.assertFalse(
str(err) == str(reference_error),
"Caught a repeated exception that'll cause an infinite loop in file uploads."
)
except Exception as err:
# CustomUploadError is the error that should have been raised
self.assertEqual(err.__class__, uploadhandler.CustomUploadError)
def test_filename_case_preservation(self):
"""
The storage backend shouldn't mess with the case of the filenames
uploaded.
"""
# Synthesize the contents of a file upload with a mixed case filename
# so we don't have to carry such a file in the Django tests source code
# tree.
vars = {'boundary': 'oUrBoUnDaRyStRiNg'}
post_data = [
'--%(boundary)s',
'Content-Disposition: form-data; name="file_field"; filename="MiXeD_cAsE.txt"',
'Content-Type: application/octet-stream',
'',
'file contents\n'
'',
'--%(boundary)s--\r\n',
]
response = self.client.post(
'/filename_case/',
'\r\n'.join(post_data) % vars,
'multipart/form-data; boundary=%(boundary)s' % vars
)
self.assertEqual(response.status_code, 200)
id = int(response.content)
obj = FileModel.objects.get(pk=id)
# The name of the file uploaded and the file stored in the server-side
# shouldn't differ.
self.assertEqual(os.path.basename(obj.testfile.path), 'MiXeD_cAsE.txt')
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class DirectoryCreationTests(SimpleTestCase):
"""
Tests for error handling during directory creation
via _save_FIELD_file (ticket #6450)
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
os.makedirs(MEDIA_ROOT, exist_ok=True)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super().tearDownClass()
def setUp(self):
self.obj = FileModel()
@unittest.skipIf(sys.platform == 'win32', "Python on Windows doesn't have working os.chmod().")
def test_readonly_root(self):
"""Permission errors are not swallowed"""
os.chmod(MEDIA_ROOT, 0o500)
self.addCleanup(os.chmod, MEDIA_ROOT, 0o700)
with self.assertRaises(PermissionError):
self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', b'x'), save=False)
def test_not_a_directory(self):
# Create a file with the upload directory name
open(UPLOAD_TO, 'wb').close()
self.addCleanup(os.remove, UPLOAD_TO)
msg = '%s exists and is not a directory.' % UPLOAD_TO
with self.assertRaisesMessage(FileExistsError, msg):
with SimpleUploadedFile('foo.txt', b'x') as file:
self.obj.testfile.save('foo.txt', file, save=False)
class MultiParserTests(SimpleTestCase):
def test_empty_upload_handlers(self):
# We're not actually parsing here; just checking if the parser properly
# instantiates with empty upload handlers.
MultiPartParser({
'CONTENT_TYPE': 'multipart/form-data; boundary=_foo',
'CONTENT_LENGTH': '1'
}, StringIO('x'), [], 'utf-8')
def test_invalid_content_type(self):
with self.assertRaisesMessage(MultiPartParserError, 'Invalid Content-Type: text/plain'):
MultiPartParser({
'CONTENT_TYPE': 'text/plain',
'CONTENT_LENGTH': '1',
}, StringIO('x'), [], 'utf-8')
def test_negative_content_length(self):
with self.assertRaisesMessage(MultiPartParserError, 'Invalid content length: -1'):
MultiPartParser({
'CONTENT_TYPE': 'multipart/form-data; boundary=_foo',
'CONTENT_LENGTH': -1,
}, StringIO('x'), [], 'utf-8')
def test_bad_type_content_length(self):
multipart_parser = MultiPartParser({
'CONTENT_TYPE': 'multipart/form-data; boundary=_foo',
'CONTENT_LENGTH': 'a',
}, StringIO('x'), [], 'utf-8')
self.assertEqual(multipart_parser._content_length, 0)
def test_rfc2231_parsing(self):
test_data = (
(b"Content-Type: application/x-stuff; title*=us-ascii'en-us'This%20is%20%2A%2A%2Afun%2A%2A%2A",
"This is ***fun***"),
(b"Content-Type: application/x-stuff; title*=UTF-8''foo-%c3%a4.html",
"foo-ä.html"),
(b"Content-Type: application/x-stuff; title*=iso-8859-1''foo-%E4.html",
"foo-ä.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
def test_rfc2231_wrong_title(self):
"""
Test wrongly formatted RFC 2231 headers (missing double single quotes).
Parsing should not crash (#24209).
"""
test_data = (
(b"Content-Type: application/x-stuff; title*='This%20is%20%2A%2A%2Afun%2A%2A%2A",
b"'This%20is%20%2A%2A%2Afun%2A%2A%2A"),
(b"Content-Type: application/x-stuff; title*='foo.html",
b"'foo.html"),
(b"Content-Type: application/x-stuff; title*=bar.html",
b"bar.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
|
38056f4a71ad8ce124a289699456caa515a3d3a369dd6dc7f62cc872e375ccb6 | import asyncore
import base64
import mimetypes
import os
import shutil
import smtpd
import sys
import tempfile
import threading
from email import charset, message_from_binary_file, message_from_bytes
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr
from io import StringIO
from smtplib import SMTP, SMTPAuthenticationError, SMTPException
from ssl import SSLError
from django.core import mail
from django.core.mail import (
EmailMessage, EmailMultiAlternatives, mail_admins, mail_managers,
send_mail, send_mass_mail,
)
from django.core.mail.backends import console, dummy, filebased, locmem, smtp
from django.core.mail.message import BadHeaderError, sanitize_address
from django.test import SimpleTestCase, override_settings
from django.test.utils import requires_tz_support
from django.utils.translation import gettext_lazy
class HeadersCheckMixin:
def assertMessageHasHeaders(self, message, headers):
"""
Asserts that the `message` has all `headers`.
message: can be an instance of an email.Message subclass or a string
with the contents of an email message.
headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, bytes):
message = message_from_bytes(message)
msg_headers = set(message.items())
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing '
'the following headers: %s' % (headers - msg_headers),)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def get_decoded_attachments(self, django_message):
"""
Encode the specified django.core.mail.message.EmailMessage, then decode
it using Python's email.parser module and, for each attachment of the
message, return a list of tuples with (filename, content, mimetype).
"""
msg_bytes = django_message.message().as_bytes()
email_message = message_from_bytes(msg_bytes)
def iter_attachments():
for i in email_message.walk():
if i.get_content_disposition() == 'attachment':
filename = i.get_filename()
content = i.get_payload(decode=True)
mimetype = i.get_content_type()
yield filename, content, mimetype
return list(iter_attachments())
def test_ascii(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], '[email protected]')
self.assertEqual(message['To'], '[email protected]')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], '[email protected]')
self.assertEqual(message['To'], '[email protected], [email protected]')
def test_header_omitted_for_no_to_recipients(self):
message = EmailMessage('Subject', 'Content', '[email protected]', cc=['[email protected]']).message()
self.assertNotIn('To', message)
def test_recipients_with_empty_strings(self):
"""
Empty strings in various recipient arguments are always stripped
off the final recipient list.
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]', ''],
cc=['[email protected]', ''],
bcc=['', '[email protected]'],
reply_to=['', None],
)
self.assertEqual(
email.recipients(),
['[email protected]', '[email protected]', '[email protected]']
)
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'])
message = email.message()
self.assertEqual(message['Cc'], '[email protected]')
self.assertEqual(email.recipients(), ['[email protected]', '[email protected]'])
# Test multiple CC with multiple To
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'],
cc=['[email protected]', '[email protected]']
)
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(
email.recipients(),
['[email protected]', '[email protected]', '[email protected]', '[email protected]']
)
# Testing with Bcc
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'],
cc=['[email protected]', '[email protected]'], bcc=['[email protected]']
)
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(
email.recipients(),
['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]']
)
def test_cc_headers(self):
message = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
cc=['[email protected]'], headers={'Cc': '[email protected]'},
).message()
self.assertEqual(message['Cc'], '[email protected]')
def test_cc_in_headers_only(self):
message = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'Cc': '[email protected]'},
).message()
self.assertEqual(message['Cc'], '[email protected]')
def test_reply_to(self):
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
reply_to=['[email protected]'],
)
message = email.message()
self.assertEqual(message['Reply-To'], '[email protected]')
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
reply_to=['[email protected]', '[email protected]']
)
message = email.message()
self.assertEqual(message['Reply-To'], '[email protected], [email protected]')
def test_recipients_as_tuple(self):
email = EmailMessage(
'Subject', 'Content', '[email protected]', ('[email protected]', '[email protected]'),
cc=('[email protected]', '[email protected]'), bcc=('[email protected]',)
)
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(
email.recipients(),
['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]']
)
def test_recipients_as_string(self):
with self.assertRaisesMessage(TypeError, '"to" argument must be a list or tuple'):
EmailMessage(to='[email protected]')
with self.assertRaisesMessage(TypeError, '"cc" argument must be a list or tuple'):
EmailMessage(cc='[email protected]')
with self.assertRaisesMessage(TypeError, '"bcc" argument must be a list or tuple'):
EmailMessage(bcc='[email protected]')
with self.assertRaisesMessage(TypeError, '"reply_to" argument must be a list or tuple'):
EmailMessage(reply_to='[email protected]')
def test_header_injection(self):
email = EmailMessage('Subject\nInjection Test', 'Content', '[email protected]', ['[email protected]'])
with self.assertRaises(BadHeaderError):
email.message()
email = EmailMessage(
gettext_lazy('Subject\nInjection Test'), 'Content', '[email protected]', ['[email protected]']
)
with self.assertRaises(BadHeaderError):
email.message()
def test_space_continuation(self):
"""
Test for space continuation character in long (ASCII) subject headers (#7747)
"""
email = EmailMessage(
'Long subject lines that get wrapped should contain a space '
'continuation character to get expected behavior in Outlook and Thunderbird',
'Content', '[email protected]', ['[email protected]']
)
message = email.message()
self.assertEqual(
message['Subject'].encode(),
b'Long subject lines that get wrapped should contain a space continuation\n'
b' character to get expected behavior in Outlook and Thunderbird'
)
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', '[email protected]', ['[email protected]'], headers=headers)
self.assertMessageHasHeaders(email.message(), {
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', '[email protected]'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', '[email protected]'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
})
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
message = email.message()
self.assertEqual(message['From'], '[email protected]')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', '[email protected]',
['[email protected]', '[email protected]'],
headers={'To': '[email protected]'})
message = email.message()
self.assertEqual(message['To'], '[email protected]')
self.assertEqual(email.to, ['[email protected]', '[email protected]'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', '[email protected]',
['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['To'], '[email protected], [email protected]')
self.assertEqual(email.to, ['[email protected]', '[email protected]'])
def test_to_in_headers_only(self):
message = EmailMessage(
'Subject', 'Content', '[email protected]',
headers={'To': '[email protected]'},
).message()
self.assertEqual(message['To'], '[email protected]')
def test_reply_to_header(self):
"""
Specifying 'Reply-To' in headers should override reply_to.
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
reply_to=['[email protected]'], headers={'Reply-To': '[email protected]'},
)
message = email.message()
self.assertEqual(message['Reply-To'], '[email protected]')
def test_reply_to_in_headers_only(self):
message = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'Reply-To': '[email protected]'},
).message()
self.assertEqual(message['Reply-To'], '[email protected]')
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
message = email.message()
self.assertEqual(message['From'], '[email protected]')
message = email.message()
self.assertEqual(message['From'], '[email protected]')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]',
['"Firstname Sürname" <[email protected]>', '[email protected]'],
)
self.assertEqual(
email.message()['To'],
'=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>, [email protected]'
)
email = EmailMessage(
'Subject', 'Content', '[email protected]',
['"Sürname, Firstname" <[email protected]>', '[email protected]'],
)
self.assertEqual(
email.message()['To'],
'=?utf-8?q?S=C3=BCrname=2C_Firstname?= <[email protected]>, [email protected]'
)
def test_unicode_headers(self):
email = EmailMessage(
'Gżegżółka', 'Content', '[email protected]', ['[email protected]'],
headers={
'Sender': '"Firstname Sürname" <[email protected]>',
'Comments': 'My Sürname is non-ASCII',
},
)
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
from_email, to = '[email protected]', '"Sürname, Firstname" <[email protected]>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <[email protected]>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_safe_mime_multipart_with_attachments(self):
"""
EmailMultiAlternatives includes alternatives if the body is empty and
it has attachments.
"""
msg = EmailMultiAlternatives(body='')
html_content = '<p>This is <strong>html</strong></p>'
msg.attach_alternative(html_content, 'text/html')
msg.attach('example.txt', 'Text file content', 'text/plain')
self.assertIn(html_content, msg.message().as_string())
def test_none_body(self):
msg = EmailMessage('subject', None, '[email protected]', ['[email protected]'])
self.assertEqual(msg.body, '')
self.assertEqual(msg.message().get_payload(), '')
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', '[email protected]', ['[email protected]'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'),
('Subject', 'Subject'),
('From', '[email protected]'),
('To', '[email protected]')})
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, '[email protected]', ['[email protected]'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(payload0, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.'))
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(payload1, {
('MIME-Version', '1.0'),
('Content-Type', 'text/html; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(
payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>')
)
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '[email protected]'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_attachments_two_tuple(self):
msg = EmailMessage(attachments=[('filename1', 'content1')])
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'filename1')
self.assertEqual(content, b'content1')
self.assertEqual(mimetype, 'application/octet-stream')
def test_attachments_MIMEText(self):
txt = MIMEText('content1')
msg = EmailMessage(attachments=[txt])
payload = msg.message().get_payload()
self.assertEqual(payload[0], txt)
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '[email protected]'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_attach_file(self):
"""
Test attaching a file against different mimetypes and make sure that
a file will be attached and sent properly even if an invalid mimetype
is specified.
"""
files = (
# filename, actual mimetype
('file.txt', 'text/plain'),
('file.png', 'image/png'),
('file_txt', None),
('file_png', None),
('file_txt.png', 'image/png'),
('file_png.txt', 'text/plain'),
('file.eml', 'message/rfc822'),
)
test_mimetypes = ['text/plain', 'image/png', None]
for basename, real_mimetype in files:
for mimetype in test_mimetypes:
email = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
self.assertEqual(mimetypes.guess_type(basename)[0], real_mimetype)
self.assertEqual(email.attachments, [])
file_path = os.path.join(os.path.dirname(__file__), 'attachments', basename)
email.attach_file(file_path, mimetype=mimetype)
self.assertEqual(len(email.attachments), 1)
self.assertIn(basename, email.attachments[0])
msgs_sent_num = email.send()
self.assertEqual(msgs_sent_num, 1)
def test_attach_text_as_bytes(self):
msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
msg.attach('file.txt', b'file content')
sent_num = msg.send()
self.assertEqual(sent_num, 1)
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
self.assertEqual(content, b'file content')
self.assertEqual(mimetype, 'text/plain')
def test_attach_utf8_text_as_bytes(self):
"""
Non-ASCII characters encoded as valid UTF-8 are correctly transported
and decoded.
"""
msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
msg.attach('file.txt', b'\xc3\xa4') # UTF-8 encoded a umlaut.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
self.assertEqual(content, b'\xc3\xa4')
self.assertEqual(mimetype, 'text/plain')
def test_attach_non_utf8_text_as_bytes(self):
"""
Binary data that can't be decoded as UTF-8 overrides the MIME type
instead of decoding the data.
"""
msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
msg.attach('file.txt', b'\xff') # Invalid UTF-8.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
# Content should be passed through unmodified.
self.assertEqual(content, b'\xff')
self.assertEqual(mimetype, 'application/octet-stream')
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.locmem.EmailBackend'),
locmem.EmailBackend
)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.console.EmailBackend'),
console.EmailBackend
)
with tempfile.TemporaryDirectory() as tmp_dir:
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir),
filebased.EmailBackend
)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', '[email protected]')],
MANAGERS=[('nobody', '[email protected]')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', '[email protected]', ['[email protected]']),
('Subject2', 'Content2', '[email protected]', ['[email protected]']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage(
'Subject', 'From the future', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
self.assertNotIn(b'>From the future', email.message().as_bytes())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage(
'Subject', 'UTF-8 encoded body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
self.assertIn(b'Content-Transfer-Encoding: 7bit', msg.message().as_bytes())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage(
'Subject', 'Body with only ASCII characters.', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 7bit', s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage(
'Subject', 'Body with latin characters: àáä.', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
s = msg.message().as_string()
self.assertIn('Content-Transfer-Encoding: 8bit', s)
msg = EmailMessage(
'Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', '[email protected]',
['[email protected]'], headers={'From': '[email protected]'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
s = msg.message().as_string()
self.assertIn('Content-Transfer-Encoding: 8bit', s)
def test_dont_base64_encode_message_rfc822(self):
# Ticket #18967
# Shouldn't use base64 encoding for a child EmailMessage attachment.
# Create a child message first
child_msg = EmailMessage(
'Child Subject', 'Some body of child message', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
child_s = child_msg.message().as_string()
# Now create a parent
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
# Attach to parent as a string
parent_msg.attach(content=child_s, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn('Child Subject', parent_s)
# Feature test: try attaching email.Message object directly to the mail.
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
parent_msg.attach(content=child_msg.message(), mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn('Child Subject', parent_s)
# Feature test: try attaching Django's EmailMessage object directly to the mail.
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
parent_msg.attach(content=child_msg, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn('Child Subject', parent_s)
def test_custom_utf8_encoding(self):
"""A UTF-8 charset with a custom body encoding is respected."""
body = 'Body with latin characters: àáä.'
msg = EmailMessage('Subject', body, '[email protected]', ['[email protected]'])
encoding = charset.Charset('utf-8')
encoding.body_encoding = charset.QP
msg.encoding = encoding
message = msg.message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', 'quoted-printable'),
})
self.assertEqual(message.get_payload(), encoding.body_encode(body))
def test_sanitize_address(self):
"""
Email addresses are properly sanitized.
"""
# Simple ASCII address - string form
self.assertEqual(sanitize_address('[email protected]', 'ascii'), '[email protected]')
self.assertEqual(sanitize_address('[email protected]', 'utf-8'), '[email protected]')
# Simple ASCII address - tuple form
self.assertEqual(
sanitize_address(('A name', '[email protected]'), 'ascii'),
'A name <[email protected]>'
)
self.assertEqual(
sanitize_address(('A name', '[email protected]'), 'utf-8'),
'=?utf-8?q?A_name?= <[email protected]>'
)
# Unicode characters are are supported in RFC-6532.
self.assertEqual(
sanitize_address('tó@example.com', 'utf-8'),
'[email protected]'
)
self.assertEqual(
sanitize_address(('Tó Example', 'tó@example.com'), 'utf-8'),
'=?utf-8?q?T=C3=B3_Example?= <[email protected]>'
)
@requires_tz_support
class MailTimeZoneTests(SimpleTestCase):
@override_settings(EMAIL_USE_LOCALTIME=False, USE_TZ=True, TIME_ZONE='Africa/Algiers')
def test_date_header_utc(self):
"""
EMAIL_USE_LOCALTIME=False creates a datetime in UTC.
"""
email = EmailMessage('Subject', 'Body', '[email protected]', ['[email protected]'])
self.assertTrue(email.message()['Date'].endswith('-0000'))
@override_settings(EMAIL_USE_LOCALTIME=True, USE_TZ=True, TIME_ZONE='Africa/Algiers')
def test_date_header_localtime(self):
"""
EMAIL_USE_LOCALTIME=True creates a datetime in the local time zone.
"""
email = EmailMessage('Subject', 'Body', '[email protected]', ['[email protected]'])
self.assertTrue(email.message()['Date'].endswith('+0100')) # Africa/Algiers is UTC+1
class PythonGlobalState(SimpleTestCase):
"""
Tests for #12422 -- Django smarts (#2472/#11212) with charset of utf-8 text
parts shouldn't pollute global email Python package charset registry when
django.mail.message is imported.
"""
def test_utf8(self):
txt = MIMEText('UTF-8 encoded body', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_7bit(self):
txt = MIMEText('Body with only ASCII characters.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_8bit_latin(self):
txt = MIMEText('Body with latin characters: àáä.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_8bit_non_latin(self):
txt = MIMEText('Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
class BaseEmailBackendTests(HeadersCheckMixin):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests must provide a get_mailbox_content() method')
def flush_mailbox(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests may require a flush_mailbox() method')
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(
len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [m.as_string() for m in mailbox])
)
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "[email protected]")
self.assertEqual(message.get_all("to"), ["[email protected]"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(message.get_payload(decode=True).decode(), 'Je t\'aime très fort')
def test_send_long_lines(self):
"""
Email line length is limited to 998 chars by the RFC:
https://tools.ietf.org/html/rfc5322#section-2.1.1
Message body containing longer lines are converted to Quoted-Printable
to avoid having to insert newlines, which could be hairy to do properly.
"""
# Unencoded body length is < 998 (840) but > 998 when utf-8 encoded.
email = EmailMessage('Subject', 'В южных морях ' * 60, '[email protected]', ['[email protected]'])
email.send()
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', 'quoted-printable'),
})
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', '[email protected]', ['[email protected]'])
email2 = EmailMessage('Subject', 'Content2', '[email protected]', ['[email protected]'])
# send_messages() may take a list or a generator.
emails_lists = ([email1, email2], (email for email in [email1, email2]))
for emails_list in emails_lists:
num_sent = mail.get_connection().send_messages(emails_list)
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), 'Content1')
self.assertEqual(messages[1].get_payload(), 'Content2')
self.flush_mailbox()
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <[email protected]>',
["[email protected]"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>")
def test_plaintext_send_mail(self):
"""
Test send_mail without the html_message
regression test for adding html_message parameter to send_mail()
"""
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'])
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message.get_content_type(), 'text/plain')
def test_html_send_mail(self):
"""Test html_message argument to send_mail"""
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(MANAGERS=[('nobody', '[email protected]')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', '[email protected]')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', '[email protected]')],
MANAGERS=[('nobody', '[email protected]')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(gettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(gettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=[], MANAGERS=[])
def test_empty_admins(self):
"""
mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', '[email protected]'),
('To', '[email protected]'),
('Cc', '[email protected]')})
self.assertIn('\nDate: ', message.as_string())
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com', ['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
self.assertEqual(message.get('cc'), '[email protected]')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_lazy_addresses(self):
"""
Email sending should support lazy email addresses (#24416).
"""
_ = gettext_lazy
self.assertTrue(send_mail('Subject', 'Content', _('tester'), [_('django')]))
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'django')
self.flush_mailbox()
m = EmailMessage(
'Subject', 'Content', _('tester'), [_('to1'), _('to2')],
cc=[_('cc1'), _('cc2')],
bcc=[_('bcc')],
reply_to=[_('reply')],
)
self.assertEqual(m.recipients(), ['to1', 'to2', 'cc1', 'cc2', 'bcc'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'to1, to2')
self.assertEqual(message.get('cc'), 'cc1, cc2')
self.assertEqual(message.get('Reply-To'), 'reply')
def test_close_connection(self):
"""
Connection can be closed (even when not explicitly opened)
"""
conn = mail.get_connection(username='', password='')
conn.close()
def test_use_as_contextmanager(self):
"""
The connection can be used as a contextmanager.
"""
opened = [False]
closed = [False]
conn = mail.get_connection(username='', password='')
def open():
opened[0] = True
conn.open = open
def close():
closed[0] = True
conn.close = close
with conn as same_conn:
self.assertTrue(opened[0])
self.assertIs(same_conn, conn)
self.assertFalse(closed[0])
self.assertTrue(closed[0])
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super().tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', '[email protected]', ['[email protected]'])
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super().setUp()
self.tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super().tearDown()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'rb') as fp:
session = fp.read().split(b'\n' + (b'-' * 79) + b'\n')
messages.extend(message_from_bytes(m) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp:
message = message_from_binary_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class ConsoleBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super().setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super().tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = self.stream.getvalue().split('\n' + ('-' * 79) + '\n')
return [message_from_bytes(m.encode()) for m in messages if m]
def test_console_stream_kwarg(self):
"""
The console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection)
message = s.getvalue().split('\n' + ('-' * 79) + '\n')[0].encode()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', '[email protected]'),
('To', '[email protected]')})
self.assertIn(b'\nDate: ', message)
class FakeSMTPChannel(smtpd.SMTPChannel):
def collect_incoming_data(self, data):
try:
smtpd.SMTPChannel.collect_incoming_data(self, data)
except UnicodeDecodeError:
# Ignore decode error in SSL/TLS connection tests as the test only
# cares whether the connection attempt was made.
pass
def smtp_AUTH(self, arg):
if arg == 'CRAM-MD5':
# This is only the first part of the login process. But it's enough
# for our tests.
challenge = base64.b64encode(b'somerandomstring13579')
self.push('334 %s' % challenge.decode())
else:
self.push('502 Error: login "%s" not implemented' % arg)
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
channel_class = FakeSMTPChannel
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
smtpd.SMTPServer.__init__(self, *args, decode_data=True, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
def process_message(self, peer, mailfrom, rcpttos, data):
data = data.encode()
m = message_from_bytes(data)
maddr = parseaddr(m.get('from'))[1]
if mailfrom != maddr:
# According to the spec, mailfrom does not necessarily match the
# From header - this is the case where the local part isn't
# encoded, so try to correct that.
lp, domain = mailfrom.split('@', 1)
lp = Header(lp, 'utf-8').encode()
mailfrom = '@'.join([lp, domain])
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class FakeAUTHSMTPConnection(SMTP):
"""
A SMTP connection pretending support for the AUTH command. It does not, but
at least this can allow testing the first part of the AUTH process.
"""
def ehlo(self, name=''):
response = SMTP.ehlo(self, name=name)
self.esmtp_features.update({
'auth': 'CRAM-MD5 PLAIN LOGIN',
})
return response
class SMTPBackendTestsBase(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.server.start()
@classmethod
def tearDownClass(cls):
cls._settings_override.disable()
cls.server.stop()
super().tearDownClass()
class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
def setUp(self):
super().setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super().tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD='not empty password',
)
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD='not empty password',
)
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD='not empty password',
)
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_auth_attempted(self):
"""
Opening the backend with non empty username/password tries
to authenticate against the SMTP server.
"""
backend = smtp.EmailBackend(
username='not empty username', password='not empty password')
with self.assertRaisesMessage(SMTPException, 'SMTP AUTH extension not supported by server.'):
with backend:
pass
def test_server_open(self):
"""
open() returns whether it opened a connection.
"""
backend = smtp.EmailBackend(username='', password='')
self.assertIsNone(backend.connection)
opened = backend.open()
backend.close()
self.assertIs(opened, True)
def test_reopen_connection(self):
backend = smtp.EmailBackend()
# Simulate an already open connection.
backend.connection = True
self.assertIs(backend.open(), False)
def test_server_login(self):
"""
Even if the Python SMTP server doesn't support authentication, the
login process starts and the appropriate exception is raised.
"""
class CustomEmailBackend(smtp.EmailBackend):
connection_class = FakeAUTHSMTPConnection
backend = CustomEmailBackend(username='username', password='password')
with self.assertRaises(SMTPAuthenticationError):
with backend:
pass
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_override_settings(self):
backend = smtp.EmailBackend(use_tls=False)
self.assertFalse(backend.use_tls)
def test_email_tls_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_tls)
def test_ssl_tls_mutually_exclusive(self):
msg = (
'EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set '
'one of those settings to True.'
)
with self.assertRaisesMessage(ValueError, msg):
smtp.EmailBackend(use_ssl=True, use_tls=True)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_override_settings(self):
backend = smtp.EmailBackend(use_ssl=False)
self.assertFalse(backend.use_ssl)
def test_email_ssl_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_ssl)
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_certfile, 'foo')
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_override_settings(self):
backend = smtp.EmailBackend(ssl_certfile='bar')
self.assertEqual(backend.ssl_certfile, 'bar')
def test_email_ssl_certfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertIsNone(backend.ssl_certfile)
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_keyfile, 'foo')
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_override_settings(self):
backend = smtp.EmailBackend(ssl_keyfile='bar')
self.assertEqual(backend.ssl_keyfile, 'bar')
def test_email_ssl_keyfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertIsNone(backend.ssl_keyfile)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_attempts_starttls(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
with self.assertRaisesMessage(SMTPException, 'STARTTLS extension not supported by server.'):
with backend:
pass
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_attempts_ssl_connection(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
with self.assertRaises(SSLError):
with backend:
pass
def test_connection_timeout_default(self):
"""The connection's timeout value is None by default."""
connection = mail.get_connection('django.core.mail.backends.smtp.EmailBackend')
self.assertIsNone(connection.timeout)
def test_connection_timeout_custom(self):
"""The timeout parameter can be customized."""
class MyEmailBackend(smtp.EmailBackend):
def __init__(self, *args, **kwargs):
kwargs.setdefault('timeout', 42)
super().__init__(*args, **kwargs)
myemailbackend = MyEmailBackend()
myemailbackend.open()
self.assertEqual(myemailbackend.timeout, 42)
self.assertEqual(myemailbackend.connection.timeout, 42)
myemailbackend.close()
@override_settings(EMAIL_TIMEOUT=10)
def test_email_timeout_override_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.timeout, 10)
def test_email_msg_uses_crlf(self):
"""#23063 -- RFC-compliant messages are sent over SMTP."""
send = SMTP.send
try:
smtp_messages = []
def mock_send(self, s):
smtp_messages.append(s)
return send(self, s)
SMTP.send = mock_send
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
mail.get_connection().send_messages([email])
# Find the actual message
msg = None
for i, m in enumerate(smtp_messages):
if m[:4] == 'data':
msg = smtp_messages[i + 1]
break
self.assertTrue(msg)
msg = msg.decode()
# The message only contains CRLF and not combinations of CRLF, LF, and CR.
msg = msg.replace('\r\n', '')
self.assertNotIn('\r', msg)
self.assertNotIn('\n', msg)
finally:
SMTP.send = send
def test_send_messages_after_open_failed(self):
"""
send_messages() shouldn't try to send messages if open() raises an
exception after initializing the connection.
"""
backend = smtp.EmailBackend()
# Simulate connection initialization success and a subsequent
# connection exception.
backend.connection = True
backend.open = lambda: None
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
self.assertEqual(backend.send_messages([email]), 0)
def test_send_messages_empty_list(self):
backend = smtp.EmailBackend()
backend.connection = True
self.assertEqual(backend.send_messages([]), 0)
def test_send_messages_zero_sent(self):
"""A message isn't sent if it doesn't have any recipients."""
backend = smtp.EmailBackend()
backend.connection = True
email = EmailMessage('Subject', 'Content', '[email protected]', to=[])
sent = backend.send_messages([email])
self.assertEqual(sent, 0)
class SMTPBackendStoppedServerTests(SMTPBackendTestsBase):
"""
These tests require a separate class, because the FakeSMTPServer is shut
down in setUpClass(), and it cannot be restarted ("RuntimeError: threads
can only be started once").
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.backend = smtp.EmailBackend(username='', password='')
cls.server.stop()
def test_server_stopped(self):
"""
Closing the backend while the SMTP server is stopped doesn't raise an
exception.
"""
self.backend.close()
def test_fail_silently_on_connection_error(self):
"""
A socket connection error is silenced with fail_silently=True.
"""
with self.assertRaises(ConnectionError):
self.backend.open()
self.backend.fail_silently = True
self.backend.open()
|
de6c37109e2d20b296f4213c4d6dc8eeea0d3a34eeacdd9401e6ee4c511b6557 | import pickle
from django import forms
from django.core.exceptions import ValidationError
from django.db import models
from django.test import SimpleTestCase, TestCase
from django.utils.functional import lazy
from .models import (
Bar, Choiceful, Foo, RenamedField, VerboseNameField, Whiz, WhizDelayed,
WhizIter, WhizIterEmpty,
)
class Nested:
class Field(models.Field):
pass
class BasicFieldTests(SimpleTestCase):
def test_show_hidden_initial(self):
"""
Fields with choices respect show_hidden_initial as a kwarg to
formfield().
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=False)
self.assertFalse(form_field.show_hidden_initial)
def test_field_repr(self):
"""
__repr__() of a field displays its name.
"""
f = Foo._meta.get_field('a')
self.assertEqual(repr(f), '<django.db.models.fields.CharField: a>')
f = models.fields.CharField()
self.assertEqual(repr(f), '<django.db.models.fields.CharField>')
def test_field_repr_nested(self):
"""__repr__() uses __qualname__ for nested class support."""
self.assertEqual(repr(Nested.Field()), '<model_fields.tests.Nested.Field>')
def test_field_name(self):
"""
A defined field name (name="fieldname") is used instead of the model
model's attribute name (modelname).
"""
instance = RenamedField()
self.assertTrue(hasattr(instance, 'get_fieldname_display'))
self.assertFalse(hasattr(instance, 'get_modelname_display'))
def test_field_verbose_name(self):
m = VerboseNameField
for i in range(1, 23):
self.assertEqual(m._meta.get_field('field%d' % i).verbose_name, 'verbose field%d' % i)
self.assertEqual(m._meta.get_field('id').verbose_name, 'verbose pk')
def test_choices_form_class(self):
"""Can supply a custom choices form class to Field.formfield()"""
choices = [('a', 'a')]
field = models.CharField(choices=choices)
klass = forms.TypedMultipleChoiceField
self.assertIsInstance(field.formfield(choices_form_class=klass), klass)
def test_formfield_disabled(self):
"""Field.formfield() sets disabled for fields with choices."""
field = models.CharField(choices=[('a', 'b')])
form_field = field.formfield(disabled=True)
self.assertIs(form_field.disabled, True)
def test_field_str(self):
f = models.Field()
self.assertEqual(str(f), '<django.db.models.fields.Field>')
f = Foo._meta.get_field('a')
self.assertEqual(str(f), 'model_fields.Foo.a')
def test_field_ordering(self):
"""Fields are ordered based on their creation."""
f1 = models.Field()
f2 = models.Field(auto_created=True)
f3 = models.Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_field_instance_is_picklable(self):
"""Field instances can be pickled."""
field = models.Field(max_length=100, default='a string')
# Must be picklable with this cached property populated (#28188).
field._get_default
pickle.dumps(field)
def test_deconstruct_nested_field(self):
"""deconstruct() uses __qualname__ for nested class support."""
name, path, args, kwargs = Nested.Field().deconstruct()
self.assertEqual(path, 'model_fields.tests.Nested.Field')
class ChoicesTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.no_choices = Choiceful._meta.get_field('no_choices')
cls.empty_choices = Choiceful._meta.get_field('empty_choices')
cls.empty_choices_bool = Choiceful._meta.get_field('empty_choices_bool')
cls.empty_choices_text = Choiceful._meta.get_field('empty_choices_text')
cls.with_choices = Choiceful._meta.get_field('with_choices')
def test_choices(self):
self.assertIsNone(self.no_choices.choices)
self.assertEqual(self.empty_choices.choices, ())
self.assertEqual(self.with_choices.choices, [(1, 'A')])
def test_flatchoices(self):
self.assertEqual(self.no_choices.flatchoices, [])
self.assertEqual(self.empty_choices.flatchoices, [])
self.assertEqual(self.with_choices.flatchoices, [(1, 'A')])
def test_check(self):
self.assertEqual(Choiceful.check(), [])
def test_invalid_choice(self):
model_instance = None # Actual model instance not needed.
self.no_choices.validate(0, model_instance)
msg = "['Value 99 is not a valid choice.']"
with self.assertRaisesMessage(ValidationError, msg):
self.empty_choices.validate(99, model_instance)
with self.assertRaisesMessage(ValidationError, msg):
self.with_choices.validate(99, model_instance)
def test_formfield(self):
no_choices_formfield = self.no_choices.formfield()
self.assertIsInstance(no_choices_formfield, forms.IntegerField)
fields = (
self.empty_choices, self.with_choices, self.empty_choices_bool,
self.empty_choices_text,
)
for field in fields:
with self.subTest(field=field):
self.assertIsInstance(field.formfield(), forms.ChoiceField)
class GetFieldDisplayTests(SimpleTestCase):
def test_choices_and_field_display(self):
"""
get_choices() interacts with get_FIELD_display() to return the expected
values.
"""
self.assertEqual(Whiz(c=1).get_c_display(), 'First') # A nested value
self.assertEqual(Whiz(c=0).get_c_display(), 'Other') # A top level value
self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value
self.assertIsNone(Whiz(c=None).get_c_display()) # Blank value
self.assertEqual(Whiz(c='').get_c_display(), '') # Empty value
self.assertEqual(WhizDelayed(c=0).get_c_display(), 'Other') # Delayed choices
def test_iterator_choices(self):
"""
get_choices() works with Iterators.
"""
self.assertEqual(WhizIter(c=1).c, 1) # A nested value
self.assertEqual(WhizIter(c=9).c, 9) # Invalid value
self.assertIsNone(WhizIter(c=None).c) # Blank value
self.assertEqual(WhizIter(c='').c, '') # Empty value
def test_empty_iterator_choices(self):
"""
get_choices() works with empty iterators.
"""
self.assertEqual(WhizIterEmpty(c="a").c, "a") # A nested value
self.assertEqual(WhizIterEmpty(c="b").c, "b") # Invalid value
self.assertIsNone(WhizIterEmpty(c=None).c) # Blank value
self.assertEqual(WhizIterEmpty(c='').c, '') # Empty value
class GetChoicesTests(SimpleTestCase):
def test_empty_choices(self):
choices = []
f = models.CharField(choices=choices)
self.assertEqual(f.get_choices(include_blank=False), choices)
def test_blank_in_choices(self):
choices = [('', '<><>'), ('a', 'A')]
f = models.CharField(choices=choices)
self.assertEqual(f.get_choices(include_blank=True), choices)
def test_blank_in_grouped_choices(self):
choices = [
('f', 'Foo'),
('b', 'Bar'),
('Group', (
('', 'No Preference'),
('fg', 'Foo'),
('bg', 'Bar'),
)),
]
f = models.CharField(choices=choices)
self.assertEqual(f.get_choices(include_blank=True), choices)
def test_lazy_strings_not_evaluated(self):
lazy_func = lazy(lambda x: 0 / 0, int) # raises ZeroDivisionError if evaluated.
f = models.CharField(choices=[(lazy_func('group'), (('a', 'A'), ('b', 'B')))])
self.assertEqual(f.get_choices(include_blank=True)[0], ('', '---------'))
class GetChoicesOrderingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.foo1 = Foo.objects.create(a='a', d='12.34')
cls.foo2 = Foo.objects.create(a='b', d='12.34')
cls.bar1 = Bar.objects.create(a=cls.foo1, b='a')
cls.bar2 = Bar.objects.create(a=cls.foo2, b='a')
cls.field = Bar._meta.get_field('a')
def assertChoicesEqual(self, choices, objs):
self.assertEqual(choices, [(obj.pk, str(obj)) for obj in objs])
def test_get_choices(self):
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, ordering=('a',)),
[self.foo1, self.foo2]
)
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, ordering=('-a',)),
[self.foo2, self.foo1]
)
def test_get_choices_reverse_related_field(self):
self.assertChoicesEqual(
self.field.remote_field.get_choices(include_blank=False, ordering=('a',)),
[self.bar1, self.bar2]
)
self.assertChoicesEqual(
self.field.remote_field.get_choices(include_blank=False, ordering=('-a',)),
[self.bar2, self.bar1]
)
|
4924ca1d321816c06a714b32734c3b44c6048033e92bd6aa71a07f8d7b360b59 | import os
import tempfile
import uuid
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.fields.files import ImageField, ImageFieldFile
from django.db.models.fields.related import (
ForeignKey, ForeignObject, ManyToManyField, OneToOneField,
)
try:
from PIL import Image
except ImportError:
Image = None
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1).pk
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, models.CASCADE, default=get_foo, related_name='bars')
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1, 'First'),
(2, 'Second'),
)
),
('Group 2', (
(3, 'Third'),
(4, 'Fourth'),
)
),
(0, 'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class WhizDelayed(models.Model):
c = models.IntegerField(choices=(), null=True)
# Contrived way of adding choices later.
WhizDelayed._meta.get_field('c').choices = Whiz.CHOICES
class WhizIter(models.Model):
c = models.IntegerField(choices=iter(Whiz.CHOICES), null=True)
class WhizIterEmpty(models.Model):
c = models.CharField(choices=iter(()), blank=True, max_length=1)
class Choiceful(models.Model):
no_choices = models.IntegerField(null=True)
empty_choices = models.IntegerField(choices=(), null=True)
with_choices = models.IntegerField(choices=[(1, 'A')], null=True)
empty_choices_bool = models.BooleanField(choices=())
empty_choices_text = models.TextField(choices=())
class BigD(models.Model):
d = models.DecimalField(max_digits=32, decimal_places=30)
class FloatModel(models.Model):
size = models.FloatField()
class BigS(models.Model):
s = models.SlugField(max_length=255)
class UnicodeSlugField(models.Model):
s = models.SlugField(max_length=255, allow_unicode=True)
class SmallIntegerModel(models.Model):
value = models.SmallIntegerField()
class IntegerModel(models.Model):
value = models.IntegerField()
class BigIntegerModel(models.Model):
value = models.BigIntegerField()
null_value = models.BigIntegerField(null=True, blank=True)
class PositiveSmallIntegerModel(models.Model):
value = models.PositiveSmallIntegerField()
class PositiveIntegerModel(models.Model):
value = models.PositiveIntegerField()
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
class NullBooleanModel(models.Model):
nbfield = models.BooleanField(null=True, blank=True)
nbfield_old = models.NullBooleanField()
class BooleanModel(models.Model):
bfield = models.BooleanField()
string = models.CharField(max_length=10, default='abc')
class DateTimeModel(models.Model):
d = models.DateField()
dt = models.DateTimeField()
t = models.TimeField()
class DurationModel(models.Model):
field = models.DurationField()
class NullDurationModel(models.Model):
field = models.DurationField(null=True)
class PrimaryKeyCharModel(models.Model):
string = models.CharField(max_length=10, primary_key=True)
class FksToBooleans(models.Model):
"""Model with FKs to models with {Null,}BooleanField's, #15040"""
bf = models.ForeignKey(BooleanModel, models.CASCADE)
nbf = models.ForeignKey(NullBooleanModel, models.CASCADE)
class FkToChar(models.Model):
"""Model with FK to a model with a CharField primary key, #19299"""
out = models.ForeignKey(PrimaryKeyCharModel, models.CASCADE)
class RenamedField(models.Model):
modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),))
class VerboseNameField(models.Model):
id = models.AutoField("verbose pk", primary_key=True)
field1 = models.BigIntegerField("verbose field1")
field2 = models.BooleanField("verbose field2", default=False)
field3 = models.CharField("verbose field3", max_length=10)
field4 = models.DateField("verbose field4")
field5 = models.DateTimeField("verbose field5")
field6 = models.DecimalField("verbose field6", max_digits=6, decimal_places=1)
field7 = models.EmailField("verbose field7")
field8 = models.FileField("verbose field8", upload_to="unused")
field9 = models.FilePathField("verbose field9")
field10 = models.FloatField("verbose field10")
# Don't want to depend on Pillow in this test
# field_image = models.ImageField("verbose field")
field11 = models.IntegerField("verbose field11")
field12 = models.GenericIPAddressField("verbose field12", protocol="ipv4")
field13 = models.NullBooleanField("verbose field13")
field14 = models.PositiveIntegerField("verbose field14")
field15 = models.PositiveSmallIntegerField("verbose field15")
field16 = models.SlugField("verbose field16")
field17 = models.SmallIntegerField("verbose field17")
field18 = models.TextField("verbose field18")
field19 = models.TimeField("verbose field19")
field20 = models.URLField("verbose field20")
field21 = models.UUIDField("verbose field21")
field22 = models.DurationField("verbose field22")
class GenericIPAddress(models.Model):
ip = models.GenericIPAddressField(null=True, protocol='ipv4')
###############################################################################
# These models aren't used in any test, just here to ensure they validate
# successfully.
# See ticket #16570.
class DecimalLessThanOne(models.Model):
d = models.DecimalField(max_digits=3, decimal_places=3)
# See ticket #18389.
class FieldClassAttributeModel(models.Model):
field_class = models.CharField
###############################################################################
class DataModel(models.Model):
short_data = models.BinaryField(max_length=10, default=b'\x08')
data = models.BinaryField()
###############################################################################
# FileField
class Document(models.Model):
myfile = models.FileField(upload_to='unused', unique=True)
###############################################################################
# ImageField
# If Pillow available, do these tests.
if Image:
class TestImageFieldFile(ImageFieldFile):
"""
Custom Field File class that records whether or not the underlying file
was opened.
"""
def __init__(self, *args, **kwargs):
self.was_opened = False
super().__init__(*args, **kwargs)
def open(self):
self.was_opened = True
super().open()
class TestImageField(ImageField):
attr_class = TestImageFieldFile
# Set up a temp directory for file storage.
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
temp_upload_to_dir = os.path.join(temp_storage.location, 'tests')
class Person(models.Model):
"""
Model that defines an ImageField with no dimension fields.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests')
class AbstractPersonWithHeight(models.Model):
"""
Abstract model that defines an ImageField with only one dimension field
to make sure the dimension update is correctly run on concrete subclass
instance post-initialization.
"""
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height')
mugshot_height = models.PositiveSmallIntegerField()
class Meta:
abstract = True
class PersonWithHeight(AbstractPersonWithHeight):
"""
Concrete model that subclass an abstract one with only on dimension
field.
"""
name = models.CharField(max_length=50)
class PersonWithHeightAndWidth(models.Model):
"""
Model that defines height and width fields after the ImageField.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
class PersonDimensionsFirst(models.Model):
"""
Model that defines height and width fields before the ImageField.
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
class PersonTwoImages(models.Model):
"""
Model that:
* Defines two ImageFields
* Defines the height/width fields before the ImageFields
* Has a nullable ImageField
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
headshot_height = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot_width = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot = TestImageField(blank=True, null=True,
storage=temp_storage, upload_to='tests',
height_field='headshot_height',
width_field='headshot_width')
class AllFieldsModel(models.Model):
big_integer = models.BigIntegerField()
binary = models.BinaryField()
boolean = models.BooleanField(default=False)
char = models.CharField(max_length=10)
date = models.DateField()
datetime = models.DateTimeField()
decimal = models.DecimalField(decimal_places=2, max_digits=2)
duration = models.DurationField()
email = models.EmailField()
file_path = models.FilePathField()
floatf = models.FloatField()
integer = models.IntegerField()
generic_ip = models.GenericIPAddressField()
null_boolean = models.NullBooleanField()
positive_integer = models.PositiveIntegerField()
positive_small_integer = models.PositiveSmallIntegerField()
slug = models.SlugField()
small_integer = models.SmallIntegerField()
text = models.TextField()
time = models.TimeField()
url = models.URLField()
uuid = models.UUIDField()
fo = ForeignObject(
'self',
on_delete=models.CASCADE,
from_fields=['abstract_non_concrete_id'],
to_fields=['id'],
related_name='reverse'
)
fk = ForeignKey(
'self',
models.CASCADE,
related_name='reverse2'
)
m2m = ManyToManyField('self')
oto = OneToOneField('self', models.CASCADE)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
gfk = GenericForeignKey()
gr = GenericRelation(DataModel)
class ManyToMany(models.Model):
m2m = models.ManyToManyField('self')
###############################################################################
class UUIDModel(models.Model):
field = models.UUIDField()
class NullableUUIDModel(models.Model):
field = models.UUIDField(blank=True, null=True)
class PrimaryKeyUUIDModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
class RelatedToUUIDModel(models.Model):
uuid_fk = models.ForeignKey('PrimaryKeyUUIDModel', models.CASCADE)
class UUIDChild(PrimaryKeyUUIDModel):
pass
class UUIDGrandchild(UUIDChild):
pass
|
ff6217f18765894544c7b220038f8fec080e8571dc83dd0b9645a57ea5c5a04f | from unittest import mock, skipUnless
from django.db import connection
from django.db.models import Index
from django.db.utils import DatabaseError
from django.test import TransactionTestCase, skipUnlessDBFeature
from .models import Article, ArticleReporter, City, District, Reporter
class IntrospectionTests(TransactionTestCase):
available_apps = ['introspection']
def test_table_names(self):
tl = connection.introspection.table_names()
self.assertEqual(tl, sorted(tl))
self.assertIn(Reporter._meta.db_table, tl, "'%s' isn't in table_list()." % Reporter._meta.db_table)
self.assertIn(Article._meta.db_table, tl, "'%s' isn't in table_list()." % Article._meta.db_table)
def test_django_table_names(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);')
tl = connection.introspection.django_table_names()
cursor.execute("DROP TABLE django_ixn_test_table;")
self.assertNotIn('django_ixn_test_table', tl,
"django_table_names() returned a non-Django table")
def test_django_table_names_retval_type(self):
# Table name is a list #15216
tl = connection.introspection.django_table_names(only_existing=True)
self.assertIs(type(tl), list)
tl = connection.introspection.django_table_names(only_existing=False)
self.assertIs(type(tl), list)
def test_table_names_with_views(self):
with connection.cursor() as cursor:
try:
cursor.execute(
'CREATE VIEW introspection_article_view AS SELECT headline '
'from introspection_article;')
except DatabaseError as e:
if 'insufficient privileges' in str(e):
self.fail("The test user has no CREATE VIEW privileges")
else:
raise
try:
self.assertIn('introspection_article_view', connection.introspection.table_names(include_views=True))
self.assertNotIn('introspection_article_view', connection.introspection.table_names())
finally:
with connection.cursor() as cursor:
cursor.execute('DROP VIEW introspection_article_view')
def test_unmanaged_through_model(self):
tables = connection.introspection.django_table_names()
self.assertNotIn(ArticleReporter._meta.db_table, tables)
def test_installed_models(self):
tables = [Article._meta.db_table, Reporter._meta.db_table]
models = connection.introspection.installed_models(tables)
self.assertEqual(models, {Article, Reporter})
def test_sequence_list(self):
sequences = connection.introspection.sequence_list()
reporter_seqs = [seq for seq in sequences if seq['table'] == Reporter._meta.db_table]
self.assertEqual(len(reporter_seqs), 1, 'Reporter sequence not found in sequence_list()')
self.assertEqual(reporter_seqs[0]['column'], 'id')
def test_get_table_description_names(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual([r[0] for r in desc],
[f.column for f in Reporter._meta.fields])
def test_get_table_description_types(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[connection.introspection.get_field_type(r[1], r) for r in desc],
[
'AutoField' if connection.features.can_introspect_autofield else 'IntegerField',
'CharField',
'CharField',
'CharField',
'BigIntegerField' if connection.features.can_introspect_big_integer_field else 'IntegerField',
'BinaryField' if connection.features.can_introspect_binary_field else 'TextField',
'SmallIntegerField' if connection.features.can_introspect_small_integer_field else 'IntegerField',
'DurationField' if connection.features.can_introspect_duration_field else 'BigIntegerField',
]
)
def test_get_table_description_col_lengths(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[r[3] for r in desc if connection.introspection.get_field_type(r[1], r) == 'CharField'],
[30, 30, 254]
)
def test_get_table_description_nullable(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
nullable_by_backend = connection.features.interprets_empty_strings_as_nulls
self.assertEqual(
[r[6] for r in desc],
[False, nullable_by_backend, nullable_by_backend, nullable_by_backend, True, True, False, False]
)
@skipUnlessDBFeature('can_introspect_autofield')
def test_bigautofield(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, City._meta.db_table)
self.assertIn(
connection.features.introspected_big_auto_field_type,
[connection.introspection.get_field_type(r[1], r) for r in desc],
)
# Regression test for #9991 - 'real' types in postgres
@skipUnlessDBFeature('has_real_datatype')
def test_postgresql_real_type(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);")
desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table')
cursor.execute('DROP TABLE django_ixn_real_test_table;')
self.assertEqual(connection.introspection.get_field_type(desc[0][1], desc[0]), 'FloatField')
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_relations(self):
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
# That's {field_name: (field_name_other_table, other_table)}
expected_relations = {
'reporter_id': ('id', Reporter._meta.db_table),
'response_to_id': ('id', Article._meta.db_table),
}
self.assertEqual(relations, expected_relations)
# Removing a field shouldn't disturb get_relations (#17785)
body = Article._meta.get_field('body')
with connection.schema_editor() as editor:
editor.remove_field(Article, body)
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
with connection.schema_editor() as editor:
editor.add_field(Article, body)
self.assertEqual(relations, expected_relations)
@skipUnless(connection.vendor == 'sqlite', "This is an sqlite-specific issue")
def test_get_relations_alt_format(self):
"""
With SQLite, foreign keys can be added with different syntaxes and
formatting.
"""
create_table_statements = [
"CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY(art_id) REFERENCES {}(id));",
"CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY (art_id) REFERENCES {}(id));"
]
for statement in create_table_statements:
with connection.cursor() as cursor:
cursor.fetchone = mock.Mock(return_value=[statement.format(Article._meta.db_table), 'table'])
relations = connection.introspection.get_relations(cursor, 'mocked_table')
self.assertEqual(relations, {'art_id': ('id', Article._meta.db_table)})
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_key_columns(self):
with connection.cursor() as cursor:
key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table)
self.assertEqual(set(key_columns), {
('reporter_id', Reporter._meta.db_table, 'id'),
('response_to_id', Article._meta.db_table, 'id'),
})
def test_get_primary_key_column(self):
with connection.cursor() as cursor:
primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table)
pk_fk_column = connection.introspection.get_primary_key_column(cursor, District._meta.db_table)
self.assertEqual(primary_key_column, 'id')
self.assertEqual(pk_fk_column, 'city_id')
def test_get_constraints_index_types(self):
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, Article._meta.db_table)
index = {}
index2 = {}
for val in constraints.values():
if val['columns'] == ['headline', 'pub_date']:
index = val
if val['columns'] == ['headline', 'response_to_id', 'pub_date', 'reporter_id']:
index2 = val
self.assertEqual(index['type'], Index.suffix)
self.assertEqual(index2['type'], Index.suffix)
@skipUnlessDBFeature('supports_index_column_ordering')
def test_get_constraints_indexes_orders(self):
"""
Indexes have the 'orders' key with a list of 'ASC'/'DESC' values.
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, Article._meta.db_table)
indexes_verified = 0
expected_columns = [
['reporter_id'],
['headline', 'pub_date'],
['response_to_id'],
['headline', 'response_to_id', 'pub_date', 'reporter_id'],
]
for val in constraints.values():
if val['index'] and not (val['primary_key'] or val['unique']):
self.assertIn(val['columns'], expected_columns)
self.assertEqual(val['orders'], ['ASC'] * len(val['columns']))
indexes_verified += 1
self.assertEqual(indexes_verified, 4)
|
00c953207a7ff2474c81519ff9fd8578169c4b4b7b7a27a4057ac1e55ba52d60 | import datetime
import decimal
import gettext as gettext_module
import os
import pickle
import re
import tempfile
from contextlib import contextmanager
from importlib import import_module
from pathlib import Path
from threading import local
from unittest import mock
import _thread
from django import forms
from django.apps import AppConfig
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.conf.urls.i18n import i18n_patterns
from django.template import Context, Template
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.utils import translation
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.formats import (
date_format, get_format, get_format_modules, iter_format_modules, localize,
localize_input, reset_format_cache, sanitize_separators, time_format,
)
from django.utils.numberformat import format as nformat
from django.utils.safestring import SafeString, mark_safe
from django.utils.translation import (
LANGUAGE_SESSION_KEY, activate, check_for_language, deactivate,
get_language, get_language_bidi, get_language_from_request,
get_language_info, gettext, gettext_lazy, ngettext, ngettext_lazy,
npgettext, npgettext_lazy, pgettext, to_language, to_locale, trans_null,
trans_real, ugettext, ugettext_lazy, ugettext_noop, ungettext,
ungettext_lazy,
)
from django.utils.translation.reloader import (
translation_file_changed, watch_for_translation_changes,
)
from .forms import CompanyForm, I18nForm, SelectDateForm
from .models import Company, TestModel
here = os.path.dirname(os.path.abspath(__file__))
extended_locale_paths = settings.LOCALE_PATHS + [
os.path.join(here, 'other', 'locale'),
]
class AppModuleStub:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@contextmanager
def patch_formats(lang, **settings):
from django.utils.formats import _format_cache
# Populate _format_cache with temporary values
for key, value in settings.items():
_format_cache[(key, lang)] = value
try:
yield
finally:
reset_format_cache()
class TranslationTests(SimpleTestCase):
@translation.override('de')
def test_legacy_aliases(self):
"""
Pre-Django 2.0 aliases with u prefix are still available.
"""
msg = (
'django.utils.translation.ugettext_noop() is deprecated in favor '
'of django.utils.translation.gettext_noop().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
self.assertEqual(ugettext_noop("Image"), "Image")
msg = (
'django.utils.translation.ugettext() is deprecated in favor of '
'django.utils.translation.gettext().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
self.assertEqual(ugettext("Image"), "Bild")
msg = (
'django.utils.translation.ugettext_lazy() is deprecated in favor '
'of django.utils.translation.gettext_lazy().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
self.assertEqual(ugettext_lazy("Image"), gettext_lazy("Image"))
msg = (
'django.utils.translation.ungettext() is deprecated in favor of '
'django.utils.translation.ngettext().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
self.assertEqual(ungettext("%d year", "%d years", 0) % 0, "0 Jahre")
msg = (
'django.utils.translation.ungettext_lazy() is deprecated in favor '
'of django.utils.translation.ngettext_lazy().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
self.assertEqual(
ungettext_lazy("%d year", "%d years", 0) % 0,
ngettext_lazy("%d year", "%d years", 0) % 0,
)
@translation.override('fr')
def test_plural(self):
"""
Test plurals with ngettext. French differs from English in that 0 is singular.
"""
self.assertEqual(ngettext("%d year", "%d years", 0) % 0, "0 année")
self.assertEqual(ngettext("%d year", "%d years", 2) % 2, "2 années")
self.assertEqual(ngettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}, "0 octet")
self.assertEqual(ngettext("%(size)d byte", "%(size)d bytes", 2) % {'size': 2}, "2 octets")
def test_plural_null(self):
g = trans_null.ngettext
self.assertEqual(g('%d year', '%d years', 0) % 0, '0 years')
self.assertEqual(g('%d year', '%d years', 1) % 1, '1 year')
self.assertEqual(g('%d year', '%d years', 2) % 2, '2 years')
def test_override(self):
activate('de')
try:
with translation.override('pl'):
self.assertEqual(get_language(), 'pl')
self.assertEqual(get_language(), 'de')
with translation.override(None):
self.assertIsNone(get_language())
with translation.override('pl'):
pass
self.assertIsNone(get_language())
self.assertEqual(get_language(), 'de')
finally:
deactivate()
def test_override_decorator(self):
@translation.override('pl')
def func_pl():
self.assertEqual(get_language(), 'pl')
@translation.override(None)
def func_none():
self.assertIsNone(get_language())
try:
activate('de')
func_pl()
self.assertEqual(get_language(), 'de')
func_none()
self.assertEqual(get_language(), 'de')
finally:
deactivate()
def test_override_exit(self):
"""
The language restored is the one used when the function was
called, not the one used when the decorator was initialized (#23381).
"""
activate('fr')
@translation.override('pl')
def func_pl():
pass
deactivate()
try:
activate('en')
func_pl()
self.assertEqual(get_language(), 'en')
finally:
deactivate()
def test_lazy_objects(self):
"""
Format string interpolation should work with *_lazy objects.
"""
s = gettext_lazy('Add %(name)s')
d = {'name': 'Ringo'}
self.assertEqual('Add Ringo', s % d)
with translation.override('de', deactivate=True):
self.assertEqual('Ringo hinzuf\xfcgen', s % d)
with translation.override('pl'):
self.assertEqual('Dodaj Ringo', s % d)
# It should be possible to compare *_lazy objects.
s1 = gettext_lazy('Add %(name)s')
self.assertEqual(s, s1)
s2 = gettext_lazy('Add %(name)s')
s3 = gettext_lazy('Add %(name)s')
self.assertEqual(s2, s3)
self.assertEqual(s, s2)
s4 = gettext_lazy('Some other string')
self.assertNotEqual(s, s4)
def test_lazy_pickle(self):
s1 = gettext_lazy("test")
self.assertEqual(str(s1), "test")
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(str(s2), "test")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_ngettext_lazy(self):
simple_with_format = ngettext_lazy('%d good result', '%d good results')
simple_context_with_format = npgettext_lazy('Exclamation', '%d good result', '%d good results')
simple_without_format = ngettext_lazy('good result', 'good results')
with translation.override('de'):
self.assertEqual(simple_with_format % 1, '1 gutes Resultat')
self.assertEqual(simple_with_format % 4, '4 guten Resultate')
self.assertEqual(simple_context_with_format % 1, '1 gutes Resultat!')
self.assertEqual(simple_context_with_format % 4, '4 guten Resultate!')
self.assertEqual(simple_without_format % 1, 'gutes Resultat')
self.assertEqual(simple_without_format % 4, 'guten Resultate')
complex_nonlazy = ngettext_lazy('Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4)
complex_deferred = ngettext_lazy(
'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num'
)
complex_context_nonlazy = npgettext_lazy(
'Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4
)
complex_context_deferred = npgettext_lazy(
'Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num'
)
with translation.override('de'):
self.assertEqual(complex_nonlazy % {'num': 4, 'name': 'Jim'}, 'Hallo Jim, 4 guten Resultate')
self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 1}, 'Hallo Jim, 1 gutes Resultat')
self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 5}, 'Hallo Jim, 5 guten Resultate')
with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'):
complex_deferred % {'name': 'Jim'}
self.assertEqual(complex_context_nonlazy % {'num': 4, 'name': 'Jim'}, 'Willkommen Jim, 4 guten Resultate')
self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 1}, 'Willkommen Jim, 1 gutes Resultat')
self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 5}, 'Willkommen Jim, 5 guten Resultate')
with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'):
complex_context_deferred % {'name': 'Jim'}
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_ngettext_lazy_format_style(self):
simple_with_format = ngettext_lazy('{} good result', '{} good results')
simple_context_with_format = npgettext_lazy('Exclamation', '{} good result', '{} good results')
with translation.override('de'):
self.assertEqual(simple_with_format.format(1), '1 gutes Resultat')
self.assertEqual(simple_with_format.format(4), '4 guten Resultate')
self.assertEqual(simple_context_with_format.format(1), '1 gutes Resultat!')
self.assertEqual(simple_context_with_format.format(4), '4 guten Resultate!')
complex_nonlazy = ngettext_lazy('Hi {name}, {num} good result', 'Hi {name}, {num} good results', 4)
complex_deferred = ngettext_lazy(
'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 'num'
)
complex_context_nonlazy = npgettext_lazy(
'Greeting', 'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 4
)
complex_context_deferred = npgettext_lazy(
'Greeting', 'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 'num'
)
with translation.override('de'):
self.assertEqual(complex_nonlazy.format(num=4, name='Jim'), 'Hallo Jim, 4 guten Resultate')
self.assertEqual(complex_deferred.format(name='Jim', num=1), 'Hallo Jim, 1 gutes Resultat')
self.assertEqual(complex_deferred.format(name='Jim', num=5), 'Hallo Jim, 5 guten Resultate')
with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'):
complex_deferred.format(name='Jim')
self.assertEqual(complex_context_nonlazy.format(num=4, name='Jim'), 'Willkommen Jim, 4 guten Resultate')
self.assertEqual(complex_context_deferred.format(name='Jim', num=1), 'Willkommen Jim, 1 gutes Resultat')
self.assertEqual(complex_context_deferred.format(name='Jim', num=5), 'Willkommen Jim, 5 guten Resultate')
with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'):
complex_context_deferred.format(name='Jim')
def test_ngettext_lazy_bool(self):
self.assertTrue(ngettext_lazy('%d good result', '%d good results'))
self.assertFalse(ngettext_lazy('', ''))
def test_ngettext_lazy_pickle(self):
s1 = ngettext_lazy('%d good result', '%d good results')
self.assertEqual(s1 % 1, '1 good result')
self.assertEqual(s1 % 8, '8 good results')
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(s2 % 1, '1 good result')
self.assertEqual(s2 % 8, '8 good results')
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_pgettext(self):
trans_real._active = local()
trans_real._translations = {}
with translation.override('de'):
self.assertEqual(pgettext("unexisting", "May"), "May")
self.assertEqual(pgettext("month name", "May"), "Mai")
self.assertEqual(pgettext("verb", "May"), "Kann")
self.assertEqual(npgettext("search", "%d result", "%d results", 4) % 4, "4 Resultate")
def test_empty_value(self):
"""Empty value must stay empty after being translated (#23196)."""
with translation.override('de'):
self.assertEqual('', gettext(''))
s = mark_safe('')
self.assertEqual(s, gettext(s))
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_safe_status(self):
"""
Translating a string requiring no auto-escaping with gettext or pgettext
shouldn't change the "safe" status.
"""
trans_real._active = local()
trans_real._translations = {}
s1 = mark_safe('Password')
s2 = mark_safe('May')
with translation.override('de', deactivate=True):
self.assertIs(type(gettext(s1)), SafeString)
self.assertIs(type(pgettext('month name', s2)), SafeString)
self.assertEqual('aPassword', SafeString('a') + s1)
self.assertEqual('Passworda', s1 + SafeString('a'))
self.assertEqual('Passworda', s1 + mark_safe('a'))
self.assertEqual('aPassword', mark_safe('a') + s1)
self.assertEqual('as', mark_safe('a') + mark_safe('s'))
def test_maclines(self):
"""
Translations on files with Mac or DOS end of lines will be converted
to unix EOF in .po catalogs.
"""
ca_translation = trans_real.translation('ca')
ca_translation._catalog['Mac\nEOF\n'] = 'Catalan Mac\nEOF\n'
ca_translation._catalog['Win\nEOF\n'] = 'Catalan Win\nEOF\n'
with translation.override('ca', deactivate=True):
self.assertEqual('Catalan Mac\nEOF\n', gettext('Mac\rEOF\r'))
self.assertEqual('Catalan Win\nEOF\n', gettext('Win\r\nEOF\r\n'))
def test_to_locale(self):
tests = (
('en', 'en'),
('EN', 'en'),
('en-us', 'en_US'),
('EN-US', 'en_US'),
# With > 2 characters after the dash.
('sr-latn', 'sr_Latn'),
('sr-LATN', 'sr_Latn'),
# With private use subtag (x-informal).
('nl-nl-x-informal', 'nl_NL-x-informal'),
('NL-NL-X-INFORMAL', 'nl_NL-x-informal'),
('sr-latn-x-informal', 'sr_Latn-x-informal'),
('SR-LATN-X-INFORMAL', 'sr_Latn-x-informal'),
)
for lang, locale in tests:
with self.subTest(lang=lang):
self.assertEqual(to_locale(lang), locale)
def test_to_language(self):
self.assertEqual(to_language('en_US'), 'en-us')
self.assertEqual(to_language('sr_Lat'), 'sr-lat')
def test_language_bidi(self):
self.assertIs(get_language_bidi(), False)
with translation.override(None):
self.assertIs(get_language_bidi(), False)
def test_language_bidi_null(self):
self.assertIs(trans_null.get_language_bidi(), False)
with override_settings(LANGUAGE_CODE='he'):
self.assertIs(get_language_bidi(), True)
class TranslationThreadSafetyTests(SimpleTestCase):
def setUp(self):
self._old_language = get_language()
self._translations = trans_real._translations
# here we rely on .split() being called inside the _fetch()
# in trans_real.translation()
class sideeffect_str(str):
def split(self, *args, **kwargs):
res = str.split(self, *args, **kwargs)
trans_real._translations['en-YY'] = None
return res
trans_real._translations = {sideeffect_str('en-XX'): None}
def tearDown(self):
trans_real._translations = self._translations
activate(self._old_language)
def test_bug14894_translation_activate_thread_safety(self):
translation_count = len(trans_real._translations)
# May raise RuntimeError if translation.activate() isn't thread-safe.
translation.activate('pl')
# make sure sideeffect_str actually added a new translation
self.assertLess(translation_count, len(trans_real._translations))
@override_settings(USE_L10N=True)
class FormattingTests(SimpleTestCase):
def setUp(self):
super().setUp()
self.n = decimal.Decimal('66666.666')
self.f = 99999.999
self.d = datetime.date(2009, 12, 31)
self.dt = datetime.datetime(2009, 12, 31, 20, 50)
self.t = datetime.time(10, 15, 48)
self.long = 10000
self.ctxt = Context({
'n': self.n,
't': self.t,
'd': self.d,
'dt': self.dt,
'f': self.f,
'l': self.long,
})
def test_all_format_strings(self):
all_locales = LANG_INFO.keys()
some_date = datetime.date(2017, 10, 14)
some_datetime = datetime.datetime(2017, 10, 14, 10, 23)
for locale in all_locales:
with self.subTest(locale=locale), translation.override(locale):
self.assertIn('2017', date_format(some_date)) # Uses DATE_FORMAT by default
self.assertIn('23', time_format(some_datetime)) # Uses TIME_FORMAT by default
self.assertIn('2017', date_format(some_datetime, format=get_format('DATETIME_FORMAT')))
self.assertIn('2017', date_format(some_date, format=get_format('YEAR_MONTH_FORMAT')))
self.assertIn('14', date_format(some_date, format=get_format('MONTH_DAY_FORMAT')))
self.assertIn('2017', date_format(some_date, format=get_format('SHORT_DATE_FORMAT')))
self.assertIn('2017', date_format(some_datetime, format=get_format('SHORT_DATETIME_FORMAT')))
def test_locale_independent(self):
"""
Localization of numbers
"""
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=','))
self.assertEqual('66666A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B'))
self.assertEqual('66666', nformat(self.n, decimal_sep='X', decimal_pos=0, grouping=1, thousand_sep='Y'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(
'66,666.66',
nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=',')
)
self.assertEqual(
'6B6B6B6B6A6',
nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B')
)
self.assertEqual('-66666.6', nformat(-66666.666, decimal_sep='.', decimal_pos=1))
self.assertEqual('-66666.0', nformat(int('-66666'), decimal_sep='.', decimal_pos=1))
self.assertEqual('10000.0', nformat(self.long, decimal_sep='.', decimal_pos=1))
self.assertEqual(
'10,00,00,000.00',
nformat(100000000.00, decimal_sep='.', decimal_pos=2, grouping=(3, 2, 0), thousand_sep=',')
)
self.assertEqual(
'1,0,00,000,0000.00',
nformat(10000000000.00, decimal_sep='.', decimal_pos=2, grouping=(4, 3, 2, 1, 0), thousand_sep=',')
)
self.assertEqual(
'10000,00,000.00',
nformat(1000000000.00, decimal_sep='.', decimal_pos=2, grouping=(3, 2, -1), thousand_sep=',')
)
# This unusual grouping/force_grouping combination may be triggered by the intcomma filter (#17414)
self.assertEqual(
'10000',
nformat(self.long, decimal_sep='.', decimal_pos=0, grouping=0, force_grouping=True)
)
# date filter
self.assertEqual('31.12.2009 в 20:50', Template('{{ dt|date:"d.m.Y в H:i" }}').render(self.ctxt))
self.assertEqual('⌚ 10:15', Template('{{ t|time:"⌚ H:i" }}').render(self.ctxt))
@override_settings(USE_L10N=False)
def test_l10n_disabled(self):
"""
Catalan locale with format i18n disabled translations will be used,
but not formats
"""
with translation.override('ca', deactivate=True):
self.maxDiff = 3000
self.assertEqual('N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual('.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('10:15 a.m.', time_format(self.t))
self.assertEqual('des. 31, 2009', date_format(self.d))
self.assertEqual('desembre 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
self.assertEqual('66666.666', localize(self.n))
self.assertEqual('99999.999', localize(self.f))
self.assertEqual('10000', localize(self.long))
self.assertEqual('des. 31, 2009', localize(self.d))
self.assertEqual('des. 31, 2009, 8:50 p.m.', localize(self.dt))
self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('des. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('des. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('10:15 a.m.', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(
'12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt)
)
form = I18nForm({
'decimal_field': '66666,666',
'float_field': '99999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1.234',
})
self.assertFalse(form.is_valid())
self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['float_field'])
self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['decimal_field'])
self.assertEqual(['Introdu\xefu una data v\xe0lida.'], form.errors['date_field'])
self.assertEqual(['Introdu\xefu una data/hora v\xe0lides.'], form.errors['datetime_field'])
self.assertEqual(['Introdu\xefu un n\xfamero sencer.'], form.errors['integer_field'])
form2 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form2.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form2.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">gener</option>'
'<option value="2">febrer</option>'
'<option value="3">mar\xe7</option>'
'<option value="4">abril</option>'
'<option value="5">maig</option>'
'<option value="6">juny</option>'
'<option value="7">juliol</option>'
'<option value="8">agost</option>'
'<option value="9">setembre</option>'
'<option value="10">octubre</option>'
'<option value="11">novembre</option>'
'<option value="12" selected>desembre</option>'
'</select>'
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# We shouldn't change the behavior of the floatformat filter re:
# thousand separator and grouping when USE_L10N is False even
# if the USE_THOUSAND_SEPARATOR, NUMBER_GROUPING and
# THOUSAND_SEPARATOR settings are specified
with self.settings(USE_THOUSAND_SEPARATOR=True, NUMBER_GROUPING=1, THOUSAND_SEPARATOR='!'):
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
def test_false_like_locale_formats(self):
"""
The active locale's formats take precedence over the default settings
even if they would be interpreted as False in a conditional test
(e.g. 0 or empty string) (#16938).
"""
with translation.override('fr'):
with self.settings(USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR='!'):
self.assertEqual('\xa0', get_format('THOUSAND_SEPARATOR'))
# Even a second time (after the format has been cached)...
self.assertEqual('\xa0', get_format('THOUSAND_SEPARATOR'))
with self.settings(FIRST_DAY_OF_WEEK=0):
self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK'))
# Even a second time (after the format has been cached)...
self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK'))
def test_l10n_enabled(self):
self.maxDiff = 3000
# Catalan locale
with translation.override('ca', deactivate=True):
self.assertEqual(r'j \d\e F \d\e Y', get_format('DATE_FORMAT'))
self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual(',', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('10:15', time_format(self.t))
self.assertEqual('31 de desembre de 2009', date_format(self.d))
self.assertEqual('desembre del 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('31/12/2009 20:50', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66.666,666', localize(self.n))
self.assertEqual('99.999,999', localize(self.f))
self.assertEqual('10.000', localize(self.long))
self.assertEqual('True', localize(True))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666,666', localize(self.n))
self.assertEqual('99999,999', localize(self.f))
self.assertEqual('10000', localize(self.long))
self.assertEqual('31 de desembre de 2009', localize(self.d))
self.assertEqual('31 de desembre de 2009 a les 20:50', localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99.999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('10.000', Template('{{ l }}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
form3 = I18nForm({
'decimal_field': '66.666,666',
'float_field': '99.999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1.234',
})
self.assertTrue(form3.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form3.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form3.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form3.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form3.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form3.cleaned_data['time_field'])
self.assertEqual(1234, form3.cleaned_data['integer_field'])
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('31 de desembre de 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('31 de desembre de 2009 a les 20:50', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666,67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000,0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('10:15', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual('31/12/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(
'31/12/2009 20:50',
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt)
)
self.assertEqual(date_format(datetime.datetime.now(), "DATE_FORMAT"),
Template('{% now "DATE_FORMAT" %}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
form4 = I18nForm({
'decimal_field': '66666,666',
'float_field': '99999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1234',
})
self.assertTrue(form4.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form4.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form4.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form4.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form4.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form4.cleaned_data['time_field'])
self.assertEqual(1234, form4.cleaned_data['integer_field'])
form5 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form5.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
'</select>'
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">gener</option>'
'<option value="2">febrer</option>'
'<option value="3">mar\xe7</option>'
'<option value="4">abril</option>'
'<option value="5">maig</option>'
'<option value="6">juny</option>'
'<option value="7">juliol</option>'
'<option value="8">agost</option>'
'<option value="9">setembre</option>'
'<option value="10">octubre</option>'
'<option value="11">novembre</option>'
'<option value="12" selected>desembre</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# Russian locale (with E as month)
with translation.override('ru', deactivate=True):
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
'</select>'
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">\u042f\u043d\u0432\u0430\u0440\u044c</option>'
'<option value="2">\u0424\u0435\u0432\u0440\u0430\u043b\u044c</option>'
'<option value="3">\u041c\u0430\u0440\u0442</option>'
'<option value="4">\u0410\u043f\u0440\u0435\u043b\u044c</option>'
'<option value="5">\u041c\u0430\u0439</option>'
'<option value="6">\u0418\u044e\u043d\u044c</option>'
'<option value="7">\u0418\u044e\u043b\u044c</option>'
'<option value="8">\u0410\u0432\u0433\u0443\u0441\u0442</option>'
'<option value="9">\u0421\u0435\u043d\u0442\u044f\u0431\u0440\u044c</option>'
'<option value="10">\u041e\u043a\u0442\u044f\u0431\u0440\u044c</option>'
'<option value="11">\u041d\u043e\u044f\u0431\u0440\u044c</option>'
'<option value="12" selected>\u0414\u0435\u043a\u0430\u0431\u0440\u044c</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# English locale
with translation.override('en', deactivate=True):
self.assertEqual('N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual('.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('Dec. 31, 2009', date_format(self.d))
self.assertEqual('December 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.666', localize(self.n))
self.assertEqual('99,999.999', localize(self.f))
self.assertEqual('10,000', localize(self.long))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.666', localize(self.n))
self.assertEqual('99999.999', localize(self.f))
self.assertEqual('10000', localize(self.long))
self.assertEqual('Dec. 31, 2009', localize(self.d))
self.assertEqual('Dec. 31, 2009, 8:50 p.m.', localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99,999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('10,000', Template('{{ l }}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('Dec. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('Dec. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(
'12/31/2009 8:50 p.m.',
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt)
)
form5 = I18nForm({
'decimal_field': '66666.666',
'float_field': '99999.999',
'date_field': '12/31/2009',
'datetime_field': '12/31/2009 20:50',
'time_field': '20:50',
'integer_field': '1234',
})
self.assertTrue(form5.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form5.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form5.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form5.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form5.cleaned_data['time_field'])
self.assertEqual(1234, form5.cleaned_data['integer_field'])
form6 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form6.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form6.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">January</option>'
'<option value="2">February</option>'
'<option value="3">March</option>'
'<option value="4">April</option>'
'<option value="5">May</option>'
'<option value="6">June</option>'
'<option value="7">July</option>'
'<option value="8">August</option>'
'<option value="9">September</option>'
'<option value="10">October</option>'
'<option value="11">November</option>'
'<option value="12" selected>December</option>'
'</select>'
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
def test_sub_locales(self):
"""
Check if sublocales fall back to the main locale
"""
with self.settings(USE_THOUSAND_SEPARATOR=True):
with translation.override('de-at', deactivate=True):
self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt))
with translation.override('es-us', deactivate=True):
self.assertEqual('31 de Diciembre de 2009', date_format(self.d))
def test_localized_input(self):
"""
Tests if form input is correctly localized
"""
self.maxDiff = 1200
with translation.override('de-at', deactivate=True):
form6 = CompanyForm({
'name': 'acme',
'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0),
'cents_paid': decimal.Decimal('59.47'),
'products_delivered': 12000,
})
self.assertTrue(form6.is_valid())
self.assertHTMLEqual(
form6.as_ul(),
'<li><label for="id_name">Name:</label>'
'<input id="id_name" type="text" name="name" value="acme" maxlength="50" required></li>'
'<li><label for="id_date_added">Date added:</label>'
'<input type="text" name="date_added" value="31.12.2009 06:00:00" id="id_date_added" required></li>'
'<li><label for="id_cents_paid">Cents paid:</label>'
'<input type="text" name="cents_paid" value="59,47" id="id_cents_paid" required></li>'
'<li><label for="id_products_delivered">Products delivered:</label>'
'<input type="text" name="products_delivered" value="12000" id="id_products_delivered" required>'
'</li>'
)
self.assertEqual(localize_input(datetime.datetime(2009, 12, 31, 6, 0, 0)), '31.12.2009 06:00:00')
self.assertEqual(datetime.datetime(2009, 12, 31, 6, 0, 0), form6.cleaned_data['date_added'])
with self.settings(USE_THOUSAND_SEPARATOR=True):
# Checking for the localized "products_delivered" field
self.assertInHTML(
'<input type="text" name="products_delivered" '
'value="12.000" id="id_products_delivered" required>',
form6.as_ul()
)
def test_localized_input_func(self):
tests = (
(True, 'True'),
(datetime.date(1, 1, 1), '0001-01-01'),
(datetime.datetime(1, 1, 1), '0001-01-01 00:00:00'),
)
with self.settings(USE_THOUSAND_SEPARATOR=True):
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(localize_input(value), expected)
def test_sanitize_separators(self):
"""
Tests django.utils.formats.sanitize_separators.
"""
# Non-strings are untouched
self.assertEqual(sanitize_separators(123), 123)
with translation.override('ru', deactivate=True):
# Russian locale has non-breaking space (\xa0) as thousand separator
# Usual space is accepted too when sanitizing inputs
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(sanitize_separators('1\xa0234\xa0567'), '1234567')
self.assertEqual(sanitize_separators('77\xa0777,777'), '77777.777')
self.assertEqual(sanitize_separators('12 345'), '12345')
self.assertEqual(sanitize_separators('77 777,777'), '77777.777')
with self.settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=False):
self.assertEqual(sanitize_separators('12\xa0345'), '12\xa0345')
with self.settings(USE_THOUSAND_SEPARATOR=True):
with patch_formats(get_language(), THOUSAND_SEPARATOR='.', DECIMAL_SEPARATOR=','):
self.assertEqual(sanitize_separators('10.234'), '10234')
# Suspicion that user entered dot as decimal separator (#22171)
self.assertEqual(sanitize_separators('10.10'), '10.10')
with self.settings(USE_L10N=False, DECIMAL_SEPARATOR=','):
self.assertEqual(sanitize_separators('1001,10'), '1001.10')
self.assertEqual(sanitize_separators('1001.10'), '1001.10')
with self.settings(
USE_L10N=False, DECIMAL_SEPARATOR=',', USE_THOUSAND_SEPARATOR=True,
THOUSAND_SEPARATOR='.'
):
self.assertEqual(sanitize_separators('1.001,10'), '1001.10')
self.assertEqual(sanitize_separators('1001,10'), '1001.10')
self.assertEqual(sanitize_separators('1001.10'), '1001.10')
self.assertEqual(sanitize_separators('1,001.10'), '1.001.10') # Invalid output
def test_iter_format_modules(self):
"""
Tests the iter_format_modules function.
"""
# Importing some format modules so that we can compare the returned
# modules with these expected modules
default_mod = import_module('django.conf.locale.de.formats')
test_mod = import_module('i18n.other.locale.de.formats')
test_mod2 = import_module('i18n.other2.locale.de.formats')
with translation.override('de-at', deactivate=True):
# Should return the correct default module when no setting is set
self.assertEqual(list(iter_format_modules('de')), [default_mod])
# When the setting is a string, should return the given module and
# the default module
self.assertEqual(
list(iter_format_modules('de', 'i18n.other.locale')),
[test_mod, default_mod])
# When setting is a list of strings, should return the given
# modules and the default module
self.assertEqual(
list(iter_format_modules('de', ['i18n.other.locale', 'i18n.other2.locale'])),
[test_mod, test_mod2, default_mod])
def test_iter_format_modules_stability(self):
"""
Tests the iter_format_modules function always yields format modules in
a stable and correct order in presence of both base ll and ll_CC formats.
"""
en_format_mod = import_module('django.conf.locale.en.formats')
en_gb_format_mod = import_module('django.conf.locale.en_GB.formats')
self.assertEqual(list(iter_format_modules('en-gb')), [en_gb_format_mod, en_format_mod])
def test_get_format_modules_lang(self):
with translation.override('de', deactivate=True):
self.assertEqual('.', get_format('DECIMAL_SEPARATOR', lang='en'))
def test_get_format_modules_stability(self):
with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'):
with translation.override('de', deactivate=True):
old = "%r" % get_format_modules(reverse=True)
new = "%r" % get_format_modules(reverse=True) # second try
self.assertEqual(new, old, 'Value returned by get_formats_modules() must be preserved between calls.')
def test_localize_templatetag_and_filter(self):
"""
Test the {% localize %} templatetag and the localize/unlocalize filters.
"""
context = Context({'int': 1455, 'float': 3.14, 'date': datetime.date(2016, 12, 31)})
template1 = Template(
'{% load l10n %}{% localize %}{{ int }}/{{ float }}/{{ date }}{% endlocalize %}; '
'{% localize on %}{{ int }}/{{ float }}/{{ date }}{% endlocalize %}'
)
template2 = Template(
'{% load l10n %}{{ int }}/{{ float }}/{{ date }}; '
'{% localize off %}{{ int }}/{{ float }}/{{ date }};{% endlocalize %} '
'{{ int }}/{{ float }}/{{ date }}'
)
template3 = Template(
'{% load l10n %}{{ int }}/{{ float }}/{{ date }}; '
'{{ int|unlocalize }}/{{ float|unlocalize }}/{{ date|unlocalize }}'
)
template4 = Template(
'{% load l10n %}{{ int }}/{{ float }}/{{ date }}; '
'{{ int|localize }}/{{ float|localize }}/{{ date|localize }}'
)
expected_localized = '1.455/3,14/31. Dezember 2016'
expected_unlocalized = '1455/3.14/Dez. 31, 2016'
output1 = '; '.join([expected_localized, expected_localized])
output2 = '; '.join([expected_localized, expected_unlocalized, expected_localized])
output3 = '; '.join([expected_localized, expected_unlocalized])
output4 = '; '.join([expected_unlocalized, expected_localized])
with translation.override('de', deactivate=True):
with self.settings(USE_L10N=False, USE_THOUSAND_SEPARATOR=True):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template4.render(context), output4)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template2.render(context), output2)
self.assertEqual(template3.render(context), output3)
def test_localized_as_text_as_hidden_input(self):
"""
Tests if form input with 'as_hidden' or 'as_text' is correctly localized. Ticket #18777
"""
self.maxDiff = 1200
with translation.override('de-at', deactivate=True):
template = Template('{% load l10n %}{{ form.date_added }}; {{ form.cents_paid }}')
template_as_text = Template('{% load l10n %}{{ form.date_added.as_text }}; {{ form.cents_paid.as_text }}')
template_as_hidden = Template(
'{% load l10n %}{{ form.date_added.as_hidden }}; {{ form.cents_paid.as_hidden }}'
)
form = CompanyForm({
'name': 'acme',
'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0),
'cents_paid': decimal.Decimal('59.47'),
'products_delivered': 12000,
})
context = Context({'form': form})
self.assertTrue(form.is_valid())
self.assertHTMLEqual(
template.render(context),
'<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" required>;'
'<input id="id_cents_paid" name="cents_paid" type="text" value="59,47" required>'
)
self.assertHTMLEqual(
template_as_text.render(context),
'<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" required>;'
' <input id="id_cents_paid" name="cents_paid" type="text" value="59,47" required>'
)
self.assertHTMLEqual(
template_as_hidden.render(context),
'<input id="id_date_added" name="date_added" type="hidden" value="31.12.2009 06:00:00">;'
'<input id="id_cents_paid" name="cents_paid" type="hidden" value="59,47">'
)
def test_format_arbitrary_settings(self):
self.assertEqual(get_format('DEBUG'), 'DEBUG')
def test_get_custom_format(self):
with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'):
with translation.override('fr', deactivate=True):
self.assertEqual('d/m/Y CUSTOM', get_format('CUSTOM_DAY_FORMAT'))
def test_admin_javascript_supported_input_formats(self):
"""
The first input format for DATE_INPUT_FORMATS, TIME_INPUT_FORMATS, and
DATETIME_INPUT_FORMATS must not contain %f since that's unsupported by
the admin's time picker widget.
"""
regex = re.compile('%([^BcdHImMpSwxXyY%])')
for language_code, language_name in settings.LANGUAGES:
for format_name in ('DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'):
with self.subTest(language=language_code, format=format_name):
formatter = get_format(format_name, lang=language_code)[0]
self.assertEqual(
regex.findall(formatter), [],
"%s locale's %s uses an unsupported format code." % (language_code, format_name)
)
class MiscTests(SimpleTestCase):
rf = RequestFactory()
@override_settings(LANGUAGE_CODE='de')
def test_english_fallback(self):
"""
With a non-English LANGUAGE_CODE and if the active language is English
or one of its variants, the untranslated string should be returned
(instead of falling back to LANGUAGE_CODE) (See #24413).
"""
self.assertEqual(gettext("Image"), "Bild")
with translation.override('en'):
self.assertEqual(gettext("Image"), "Image")
with translation.override('en-us'):
self.assertEqual(gettext("Image"), "Image")
with translation.override('en-ca'):
self.assertEqual(gettext("Image"), "Image")
def test_parse_spec_http_header(self):
"""
Testing HTTP header parsing. First, we test that we can parse the
values according to the spec (and that we extract all the pieces in
the right order).
"""
tests = [
# Good headers
('de', [('de', 1.0)]),
('en-AU', [('en-au', 1.0)]),
('es-419', [('es-419', 1.0)]),
('*;q=1.00', [('*', 1.0)]),
('en-AU;q=0.123', [('en-au', 0.123)]),
('en-au;q=0.5', [('en-au', 0.5)]),
('en-au;q=1.0', [('en-au', 1.0)]),
('da, en-gb;q=0.25, en;q=0.5', [('da', 1.0), ('en', 0.5), ('en-gb', 0.25)]),
('en-au-xx', [('en-au-xx', 1.0)]),
('de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125',
[('de', 1.0), ('en-au', 0.75), ('en-us', 0.5), ('en', 0.25), ('es', 0.125), ('fa', 0.125)]),
('*', [('*', 1.0)]),
('de;q=0.', [('de', 0.0)]),
('en; q=1,', [('en', 1.0)]),
('en; q=1.0, * ; q=0.5', [('en', 1.0), ('*', 0.5)]),
# Bad headers
('en-gb;q=1.0000', []),
('en;q=0.1234', []),
('en;q=.2', []),
('abcdefghi-au', []),
('**', []),
('en,,gb', []),
('en-au;q=0.1.0', []),
(('X' * 97) + 'Z,en', []),
('da, en-gb;q=0.8, en;q=0.7,#', []),
('de;q=2.0', []),
('de;q=0.a', []),
('12-345', []),
('', []),
('en;q=1e0', []),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(trans_real.parse_accept_lang_header(value), tuple(expected))
def test_parse_literal_http_header(self):
"""
Now test that we parse a literal HTTP header correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt'}
self.assertEqual('pt', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es,de'}
self.assertEqual('es', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-ar,de'}
self.assertEqual('es-ar', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-us'}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh-hans)
# the user sets zh-hans as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans,de'}
self.assertEqual(g(r), 'zh-hans')
r.META = {'HTTP_ACCEPT_LANGUAGE': 'NL'}
self.assertEqual('nl', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'fy'}
self.assertEqual('fy', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'ia'}
self.assertEqual('ia', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'sr-latn'}
self.assertEqual('sr-latn', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans'}
self.assertEqual('zh-hans', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hant'}
self.assertEqual('zh-hant', g(r))
@override_settings(
LANGUAGES=[
('en', 'English'),
('zh-hans', 'Simplified Chinese'),
('zh-hant', 'Traditional Chinese'),
]
)
def test_support_for_deprecated_chinese_language_codes(self):
"""
Some browsers (Firefox, IE, etc.) use deprecated language codes. As these
language codes will be removed in Django 1.9, these will be incorrectly
matched. For example zh-tw (traditional) will be interpreted as zh-hans
(simplified), which is wrong. So we should also accept these deprecated
language codes.
refs #18419 -- this is explicitly for browser compatibility
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-cn,en'}
self.assertEqual(g(r), 'zh-hans')
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-tw,en'}
self.assertEqual(g(r), 'zh-hant')
def test_special_fallback_language(self):
"""
Some languages may have special fallbacks that don't follow the simple
'fr-ca' -> 'fr' logic (notably Chinese codes).
"""
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-my,en'}
self.assertEqual(get_language_from_request(r), 'zh-hans')
def test_parse_language_cookie(self):
"""
Now test that we parse language preferences stored in a cookie correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt-br'}
r.META = {}
self.assertEqual('pt-br', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt'}
r.META = {}
self.assertEqual('pt', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual('es', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es-us'}
r.META = {}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh-hans)
# the user sets zh-hans as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'zh-hans'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual(g(r), 'zh-hans')
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en', 'English'),
('de', 'German'),
('de-at', 'Austrian German'),
('pt-br', 'Portuguese (Brazil)'),
],
)
def test_get_supported_language_variant_real(self):
g = trans_real.get_supported_language_variant
self.assertEqual(g('en'), 'en')
self.assertEqual(g('en-gb'), 'en')
self.assertEqual(g('de'), 'de')
self.assertEqual(g('de-at'), 'de-at')
self.assertEqual(g('de-ch'), 'de')
self.assertEqual(g('pt-br'), 'pt-br')
self.assertEqual(g('pt'), 'pt-br')
self.assertEqual(g('pt-pt'), 'pt-br')
with self.assertRaises(LookupError):
g('pt', strict=True)
with self.assertRaises(LookupError):
g('pt-pt', strict=True)
with self.assertRaises(LookupError):
g('xyz')
with self.assertRaises(LookupError):
g('xy-zz')
def test_get_supported_language_variant_null(self):
g = trans_null.get_supported_language_variant
self.assertEqual(g(settings.LANGUAGE_CODE), settings.LANGUAGE_CODE)
with self.assertRaises(LookupError):
g('pt')
with self.assertRaises(LookupError):
g('de')
with self.assertRaises(LookupError):
g('de-at')
with self.assertRaises(LookupError):
g('de', strict=True)
with self.assertRaises(LookupError):
g('de-at', strict=True)
with self.assertRaises(LookupError):
g('xyz')
@override_settings(
LANGUAGES=[
('en', 'English'),
('de', 'German'),
('de-at', 'Austrian German'),
('pl', 'Polish'),
],
)
def test_get_language_from_path_real(self):
g = trans_real.get_language_from_path
self.assertEqual(g('/pl/'), 'pl')
self.assertEqual(g('/pl'), 'pl')
self.assertIsNone(g('/xyz/'))
self.assertEqual(g('/en/'), 'en')
self.assertEqual(g('/en-gb/'), 'en')
self.assertEqual(g('/de/'), 'de')
self.assertEqual(g('/de-at/'), 'de-at')
self.assertEqual(g('/de-ch/'), 'de')
self.assertIsNone(g('/de-simple-page/'))
def test_get_language_from_path_null(self):
g = trans_null.get_language_from_path
self.assertIsNone(g('/pl/'))
self.assertIsNone(g('/pl'))
self.assertIsNone(g('/xyz/'))
def test_cache_resetting(self):
"""
After setting LANGUAGE, the cache should be cleared and languages
previously valid should not be used (#14170).
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
with self.settings(LANGUAGES=[('en', 'English')]):
self.assertNotEqual('pt-br', g(r))
def test_i18n_patterns_returns_list(self):
with override_settings(USE_I18N=False):
self.assertIsInstance(i18n_patterns([]), list)
with override_settings(USE_I18N=True):
self.assertIsInstance(i18n_patterns([]), list)
class ResolutionOrderI18NTests(SimpleTestCase):
def setUp(self):
super().setUp()
activate('de')
def tearDown(self):
deactivate()
super().tearDown()
def assertGettext(self, msgid, msgstr):
result = gettext(msgid)
self.assertIn(
msgstr, result,
"The string '%s' isn't in the translation of '%s'; the actual result is '%s'."
% (msgstr, msgid, result)
)
class AppResolutionOrderI18NTests(ResolutionOrderI18NTests):
@override_settings(LANGUAGE_CODE='de')
def test_app_translation(self):
# Original translation.
self.assertGettext('Date/time', 'Datum/Zeit')
# Different translation.
with self.modify_settings(INSTALLED_APPS={'append': 'i18n.resolution'}):
# Force refreshing translations.
activate('de')
# Doesn't work because it's added later in the list.
self.assertGettext('Date/time', 'Datum/Zeit')
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.admin.apps.SimpleAdminConfig'}):
# Force refreshing translations.
activate('de')
# Unless the original is removed from the list.
self.assertGettext('Date/time', 'Datum/Zeit (APP)')
@override_settings(LOCALE_PATHS=extended_locale_paths)
class LocalePathsResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_locale_paths_translation(self):
self.assertGettext('Time', 'LOCALE_PATHS')
def test_locale_paths_override_app_translation(self):
with self.settings(INSTALLED_APPS=['i18n.resolution']):
self.assertGettext('Time', 'LOCALE_PATHS')
class DjangoFallbackResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_django_fallback(self):
self.assertEqual(gettext('Date/time'), 'Datum/Zeit')
@override_settings(INSTALLED_APPS=['i18n.territorial_fallback'])
class TranslationFallbackI18NTests(ResolutionOrderI18NTests):
def test_sparse_territory_catalog(self):
"""
Untranslated strings for territorial language variants use the
translations of the generic language. In this case, the de-de
translation falls back to de.
"""
with translation.override('de-de'):
self.assertGettext('Test 1 (en)', '(de-de)')
self.assertGettext('Test 2 (en)', '(de)')
class TestModels(TestCase):
def test_lazy(self):
tm = TestModel()
tm.save()
def test_safestr(self):
c = Company(cents_paid=12, products_delivered=1)
c.name = SafeString('Iñtërnâtiônàlizætiøn1')
c.save()
class TestLanguageInfo(SimpleTestCase):
def test_localized_language_info(self):
li = get_language_info('de')
self.assertEqual(li['code'], 'de')
self.assertEqual(li['name_local'], 'Deutsch')
self.assertEqual(li['name'], 'German')
self.assertIs(li['bidi'], False)
def test_unknown_language_code(self):
with self.assertRaisesMessage(KeyError, "Unknown language code xx"):
get_language_info('xx')
with translation.override('xx'):
# A language with no translation catalogs should fallback to the
# untranslated string.
self.assertEqual(gettext("Title"), "Title")
def test_unknown_only_country_code(self):
li = get_language_info('de-xx')
self.assertEqual(li['code'], 'de')
self.assertEqual(li['name_local'], 'Deutsch')
self.assertEqual(li['name'], 'German')
self.assertIs(li['bidi'], False)
def test_unknown_language_code_and_country_code(self):
with self.assertRaisesMessage(KeyError, "Unknown language code xx-xx and xx"):
get_language_info('xx-xx')
def test_fallback_language_code(self):
"""
get_language_info return the first fallback language info if the lang_info
struct does not contain the 'name' key.
"""
li = get_language_info('zh-my')
self.assertEqual(li['code'], 'zh-hans')
li = get_language_info('zh-hans')
self.assertEqual(li['code'], 'zh-hans')
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en', 'English'),
('fr', 'French'),
],
MIDDLEWARE=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.urls',
)
class LocaleMiddlewareTests(TestCase):
def test_streaming_response(self):
# Regression test for #5241
response = self.client.get('/fr/streaming/')
self.assertContains(response, "Oui/Non")
response = self.client.get('/en/streaming/')
self.assertContains(response, "Yes/No")
@override_settings(
MIDDLEWARE=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
)
def test_language_not_saved_to_session(self):
"""
The Current language isno' automatically saved to the session on every
request (#21473).
"""
self.client.get('/fr/simple/')
self.assertNotIn(LANGUAGE_SESSION_KEY, self.client.session)
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en', 'English'),
('de', 'German'),
('fr', 'French'),
],
MIDDLEWARE=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.urls_default_unprefixed',
LANGUAGE_CODE='en',
)
class UnprefixedDefaultLanguageTests(SimpleTestCase):
def test_default_lang_without_prefix(self):
"""
With i18n_patterns(..., prefix_default_language=False), the default
language (settings.LANGUAGE_CODE) should be accessible without a prefix.
"""
response = self.client.get('/simple/')
self.assertEqual(response.content, b'Yes')
def test_other_lang_with_prefix(self):
response = self.client.get('/fr/simple/')
self.assertEqual(response.content, b'Oui')
def test_unprefixed_language_other_than_accept_language(self):
response = self.client.get('/simple/', HTTP_ACCEPT_LANGUAGE='fr')
self.assertEqual(response.content, b'Yes')
def test_page_with_dash(self):
# A page starting with /de* shouldn't match the 'de' language code.
response = self.client.get('/de-simple-page/')
self.assertEqual(response.content, b'Yes')
def test_no_redirect_on_404(self):
"""
A request for a nonexistent URL shouldn't cause a redirect to
/<defaut_language>/<request_url> when prefix_default_language=False and
/<default_language>/<request_url> has a URL match (#27402).
"""
# A match for /group1/group2/ must exist for this to act as a
# regression test.
response = self.client.get('/group1/group2/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/nonexistent/')
self.assertEqual(response.status_code, 404)
@override_settings(
USE_I18N=True,
LANGUAGES=[
('bg', 'Bulgarian'),
('en-us', 'English'),
('pt-br', 'Portuguese (Brazil)'),
],
MIDDLEWARE=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.urls'
)
class CountrySpecificLanguageTests(SimpleTestCase):
rf = RequestFactory()
def test_check_for_language(self):
self.assertTrue(check_for_language('en'))
self.assertTrue(check_for_language('en-us'))
self.assertTrue(check_for_language('en-US'))
self.assertFalse(check_for_language('en_US'))
self.assertTrue(check_for_language('be'))
self.assertTrue(check_for_language('be@latin'))
self.assertTrue(check_for_language('sr-RS@latin'))
self.assertTrue(check_for_language('sr-RS@12345'))
self.assertFalse(check_for_language('en-ü'))
self.assertFalse(check_for_language('en\x00'))
self.assertFalse(check_for_language(None))
self.assertFalse(check_for_language('be@ '))
# Specifying encoding is not supported (Django enforces UTF-8)
self.assertFalse(check_for_language('tr-TR.UTF-8'))
self.assertFalse(check_for_language('tr-TR.UTF8'))
self.assertFalse(check_for_language('de-DE.utf-8'))
def test_check_for_language_null(self):
self.assertIs(trans_null.check_for_language('en'), True)
def test_get_language_from_request(self):
# issue 19919
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8,bg;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('en-us', lang)
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'bg-bg,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('bg', lang)
def test_get_language_from_request_null(self):
lang = trans_null.get_language_from_request(None)
self.assertEqual(lang, 'en')
with override_settings(LANGUAGE_CODE='de'):
lang = trans_null.get_language_from_request(None)
self.assertEqual(lang, 'de')
def test_specific_language_codes(self):
# issue 11915
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('pt-br', lang)
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('pt-br', lang)
class TranslationFilesMissing(SimpleTestCase):
def setUp(self):
super().setUp()
self.gettext_find_builtin = gettext_module.find
def tearDown(self):
gettext_module.find = self.gettext_find_builtin
super().tearDown()
def patchGettextFind(self):
gettext_module.find = lambda *args, **kw: None
def test_failure_finding_default_mo_files(self):
"""OSError is raised if the default language is unparseable."""
self.patchGettextFind()
trans_real._translations = {}
with self.assertRaises(OSError):
activate('en')
class NonDjangoLanguageTests(SimpleTestCase):
"""
A language non present in default Django languages can still be
installed/used by a Django project.
"""
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en-us', 'English'),
('xxx', 'Somelanguage'),
],
LANGUAGE_CODE='xxx',
LOCALE_PATHS=[os.path.join(here, 'commands', 'locale')],
)
def test_non_django_language(self):
self.assertEqual(get_language(), 'xxx')
self.assertEqual(gettext("year"), "reay")
@override_settings(USE_I18N=True)
def test_check_for_langauge(self):
with tempfile.TemporaryDirectory() as app_dir:
os.makedirs(os.path.join(app_dir, 'locale', 'dummy_Lang', 'LC_MESSAGES'))
open(os.path.join(app_dir, 'locale', 'dummy_Lang', 'LC_MESSAGES', 'django.mo'), 'w').close()
app_config = AppConfig('dummy_app', AppModuleStub(__path__=[app_dir]))
with mock.patch('django.apps.apps.get_app_configs', return_value=[app_config]):
self.assertIs(check_for_language('dummy-lang'), True)
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en-us', 'English'),
# xyz language has no locale files
('xyz', 'XYZ'),
],
)
@translation.override('xyz')
def test_plural_non_django_language(self):
self.assertEqual(get_language(), 'xyz')
self.assertEqual(ngettext('year', 'years', 2), 'years')
@override_settings(USE_I18N=True)
class WatchForTranslationChangesTests(SimpleTestCase):
@override_settings(USE_I18N=False)
def test_i18n_disabled(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
mocked_sender.watch_dir.assert_not_called()
def test_i18n_enabled(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
self.assertGreater(mocked_sender.watch_dir.call_count, 1)
def test_i18n_locale_paths(self):
mocked_sender = mock.MagicMock()
with tempfile.TemporaryDirectory() as app_dir:
with self.settings(LOCALE_PATHS=[app_dir]):
watch_for_translation_changes(mocked_sender)
mocked_sender.watch_dir.assert_any_call(Path(app_dir), '**/*.mo')
def test_i18n_app_dirs(self):
mocked_sender = mock.MagicMock()
with self.settings(INSTALLED_APPS=['tests.i18n.sampleproject']):
watch_for_translation_changes(mocked_sender)
project_dir = Path(__file__).parent / 'sampleproject' / 'locale'
mocked_sender.watch_dir.assert_any_call(project_dir, '**/*.mo')
def test_i18n_local_locale(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
locale_dir = Path(__file__).parent / 'locale'
mocked_sender.watch_dir.assert_any_call(locale_dir, '**/*.mo')
class TranslationFileChangedTests(SimpleTestCase):
def setUp(self):
self.gettext_translations = gettext_module._translations.copy()
self.trans_real_translations = trans_real._translations.copy()
def tearDown(self):
gettext._translations = self.gettext_translations
trans_real._translations = self.trans_real_translations
def test_ignores_non_mo_files(self):
gettext_module._translations = {'foo': 'bar'}
path = Path('test.py')
self.assertIsNone(translation_file_changed(None, path))
self.assertEqual(gettext_module._translations, {'foo': 'bar'})
def test_resets_cache_with_mo_files(self):
gettext_module._translations = {'foo': 'bar'}
trans_real._translations = {'foo': 'bar'}
trans_real._default = 1
trans_real._active = False
path = Path('test.mo')
self.assertIs(translation_file_changed(None, path), True)
self.assertEqual(gettext_module._translations, {})
self.assertEqual(trans_real._translations, {})
self.assertIsNone(trans_real._default)
self.assertIsInstance(trans_real._active, _thread._local)
|
fa0a88967fabfc30658c96415ce21e79c61c72c626618531ea64c17b6f921d8f | import os
import signal
from unittest import mock
from django.db.backends.postgresql.client import DatabaseClient
from django.test import SimpleTestCase
class PostgreSqlDbshellCommandTestCase(SimpleTestCase):
def _run_it(self, dbinfo):
"""
That function invokes the runshell command, while mocking
subprocess.call. It returns a 2-tuple with:
- The command line list
- The content of the file pointed by environment PGPASSFILE, or None.
"""
def _mock_subprocess_call(*args):
self.subprocess_args = list(*args)
if 'PGPASSFILE' in os.environ:
with open(os.environ['PGPASSFILE']) as f:
self.pgpass = f.read().strip() # ignore line endings
else:
self.pgpass = None
return 0
self.subprocess_args = None
self.pgpass = None
with mock.patch('subprocess.call', new=_mock_subprocess_call):
DatabaseClient.runshell_db(dbinfo)
return self.subprocess_args, self.pgpass
def test_basic(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'someuser',
'password': 'somepassword',
'host': 'somehost',
'port': '444',
}), (
['psql', '-U', 'someuser', '-h', 'somehost', '-p', '444', 'dbname'],
'somehost:444:dbname:someuser:somepassword',
)
)
def test_nopass(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'someuser',
'host': 'somehost',
'port': '444',
}), (
['psql', '-U', 'someuser', '-h', 'somehost', '-p', '444', 'dbname'],
None,
)
)
def test_column(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'some:user',
'password': 'some:password',
'host': '::1',
'port': '444',
}), (
['psql', '-U', 'some:user', '-h', '::1', '-p', '444', 'dbname'],
'\\:\\:1:444:dbname:some\\:user:some\\:password',
)
)
def test_escape_characters(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'some\\user',
'password': 'some\\password',
'host': 'somehost',
'port': '444',
}), (
['psql', '-U', 'some\\user', '-h', 'somehost', '-p', '444', 'dbname'],
'somehost:444:dbname:some\\\\user:some\\\\password',
)
)
def test_accent(self):
username = 'rôle'
password = 'sésame'
pgpass_string = 'somehost:444:dbname:%s:%s' % (username, password)
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': username,
'password': password,
'host': 'somehost',
'port': '444',
}), (
['psql', '-U', username, '-h', 'somehost', '-p', '444', 'dbname'],
pgpass_string,
)
)
def test_sigint_handler(self):
"""SIGINT is ignored in Python and passed to psql to abort quries."""
def _mock_subprocess_call(*args):
handler = signal.getsignal(signal.SIGINT)
self.assertEqual(handler, signal.SIG_IGN)
sigint_handler = signal.getsignal(signal.SIGINT)
# The default handler isn't SIG_IGN.
self.assertNotEqual(sigint_handler, signal.SIG_IGN)
with mock.patch('subprocess.check_call', new=_mock_subprocess_call):
DatabaseClient.runshell_db({})
# dbshell restores the original handler.
self.assertEqual(sigint_handler, signal.getsignal(signal.SIGINT))
|
f7d33dd91efbe617c6e26efe173bcab9f1ff9788be9afb3c2b1909ae104199a2 | import gettext
import os
import re
from datetime import datetime, timedelta
from importlib import import_module
import pytz
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import CharField, DateField, DateTimeField, UUIDField
from django.test import SimpleTestCase, TestCase, override_settings
from django.urls import reverse
from django.utils import translation
from .models import (
Advisor, Album, Band, Bee, Car, Company, Event, Honeycomb, Individual,
Inventory, Member, MyFileField, Profile, School, Student,
)
from .widgetadmin import site as widget_admin_site
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='secret', email=None)
cls.u2 = User.objects.create_user(username='testser', password='secret')
Car.objects.create(owner=cls.superuser, make='Volkswagen', model='Passat')
Car.objects.create(owner=cls.u2, make='BMW', model='M3')
class AdminFormfieldForDBFieldTests(SimpleTestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin):
pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
self.assertIsInstance(widget, widgetclass)
# Return the formfield so that other tests can continue
return ff
def test_DateField(self):
self.assertFormfield(Event, 'start_date', widgets.AdminDateWidget)
def test_DateTimeField(self):
self.assertFormfield(Member, 'birthdate', widgets.AdminSplitDateTime)
def test_TimeField(self):
self.assertFormfield(Event, 'start_time', widgets.AdminTimeWidget)
def test_TextField(self):
self.assertFormfield(Event, 'description', widgets.AdminTextareaWidget)
def test_URLField(self):
self.assertFormfield(Event, 'link', widgets.AdminURLFieldWidget)
def test_IntegerField(self):
self.assertFormfield(Event, 'min_age', widgets.AdminIntegerFieldWidget)
def test_CharField(self):
self.assertFormfield(Member, 'name', widgets.AdminTextInputWidget)
def test_EmailField(self):
self.assertFormfield(Member, 'email', widgets.AdminEmailInputWidget)
def test_FileField(self):
self.assertFormfield(Album, 'cover_art', widgets.AdminFileWidget)
def test_ForeignKey(self):
self.assertFormfield(Event, 'main_band', forms.Select)
def test_raw_id_ForeignKey(self):
self.assertFormfield(Event, 'main_band', widgets.ForeignKeyRawIdWidget,
raw_id_fields=['main_band'])
def test_radio_fields_ForeignKey(self):
ff = self.assertFormfield(Event, 'main_band', widgets.AdminRadioSelect,
radio_fields={'main_band': admin.VERTICAL})
self.assertIsNone(ff.empty_label)
def test_many_to_many(self):
self.assertFormfield(Band, 'members', forms.SelectMultiple)
def test_raw_id_many_to_many(self):
self.assertFormfield(Band, 'members', widgets.ManyToManyRawIdWidget,
raw_id_fields=['members'])
def test_filtered_many_to_many(self):
self.assertFormfield(Band, 'members', widgets.FilteredSelectMultiple,
filter_vertical=['members'])
def test_formfield_overrides(self):
self.assertFormfield(Event, 'start_date', forms.TextInput,
formfield_overrides={DateField: {'widget': forms.TextInput}})
def test_formfield_overrides_widget_instances(self):
"""
Widget instances in formfield_overrides are not shared between
different fields. (#19423)
"""
class BandAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {'widget': forms.TextInput(attrs={'size': '10'})}
}
ma = BandAdmin(Band, admin.site)
f1 = ma.formfield_for_dbfield(Band._meta.get_field('name'), request=None)
f2 = ma.formfield_for_dbfield(Band._meta.get_field('style'), request=None)
self.assertNotEqual(f1.widget, f2.widget)
self.assertEqual(f1.widget.attrs['maxlength'], '100')
self.assertEqual(f2.widget.attrs['maxlength'], '20')
self.assertEqual(f2.widget.attrs['size'], '10')
def test_formfield_overrides_for_datetime_field(self):
"""
Overriding the widget for DateTimeField doesn't overrides the default
form_class for that field (#26449).
"""
class MemberAdmin(admin.ModelAdmin):
formfield_overrides = {DateTimeField: {'widget': widgets.AdminSplitDateTime}}
ma = MemberAdmin(Member, admin.site)
f1 = ma.formfield_for_dbfield(Member._meta.get_field('birthdate'), request=None)
self.assertIsInstance(f1.widget, widgets.AdminSplitDateTime)
self.assertIsInstance(f1, forms.SplitDateTimeField)
def test_formfield_overrides_for_custom_field(self):
"""
formfield_overrides works for a custom field class.
"""
class AlbumAdmin(admin.ModelAdmin):
formfield_overrides = {MyFileField: {'widget': forms.TextInput()}}
ma = AlbumAdmin(Member, admin.site)
f1 = ma.formfield_for_dbfield(Album._meta.get_field('backside_art'), request=None)
self.assertIsInstance(f1.widget, forms.TextInput)
def test_field_with_choices(self):
self.assertFormfield(Member, 'gender', forms.Select)
def test_choices_with_radio_fields(self):
self.assertFormfield(Member, 'gender', widgets.AdminRadioSelect,
radio_fields={'gender': admin.VERTICAL})
def test_inheritance(self):
self.assertFormfield(Album, 'backside_art', widgets.AdminFileWidget)
def test_m2m_widgets(self):
"""m2m fields help text as it applies to admin app (#9321)."""
class AdvisorAdmin(admin.ModelAdmin):
filter_vertical = ['companies']
self.assertFormfield(Advisor, 'companies', widgets.FilteredSelectMultiple,
filter_vertical=['companies'])
ma = AdvisorAdmin(Advisor, admin.site)
f = ma.formfield_for_dbfield(Advisor._meta.get_field('companies'), request=None)
self.assertEqual(
f.help_text,
'Hold down "Control", or "Command" on a Mac, to select more than one.'
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminFormfieldForDBFieldWithRequestTests(TestDataMixin, TestCase):
def test_filter_choices_by_request_user(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin:admin_widgets_cartire_add'))
self.assertNotContains(response, "BMW M3")
self.assertContains(response, "Volkswagen Passat")
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyWidgetChangeList(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_changelist_ForeignKey(self):
response = self.client.get(reverse('admin:admin_widgets_car_changelist'))
self.assertContains(response, '/auth/user/add/')
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyRawIdWidget(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_nonexistent_target_id(self):
band = Band.objects.create(name='Bogey Blues')
pk = band.pk
band.delete()
post_data = {
"main_band": '%s' % pk,
}
# Try posting with a nonexistent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), post_data)
self.assertContains(response, 'Select a valid choice. That choice is not one of the available choices.')
def test_invalid_target_id(self):
for test_str in ('Iñtërnâtiônàlizætiøn', "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), {"main_band": test_str})
self.assertContains(response, 'Select a valid choice. That choice is not one of the available choices.')
def test_url_params_from_lookup_dict_any_iterable(self):
lookup1 = widgets.url_params_from_lookup_dict({'color__in': ('red', 'blue')})
lookup2 = widgets.url_params_from_lookup_dict({'color__in': ['red', 'blue']})
self.assertEqual(lookup1, {'color__in': 'red,blue'})
self.assertEqual(lookup1, lookup2)
def test_url_params_from_lookup_dict_callable(self):
def my_callable():
return 'works'
lookup1 = widgets.url_params_from_lookup_dict({'myfield': my_callable})
lookup2 = widgets.url_params_from_lookup_dict({'myfield': my_callable()})
self.assertEqual(lookup1, lookup2)
def test_label_and_url_for_value_invalid_uuid(self):
field = Bee._meta.get_field('honeycomb')
self.assertIsInstance(field.target_field, UUIDField)
widget = widgets.ForeignKeyRawIdWidget(field.remote_field, admin.site)
self.assertEqual(widget.label_and_url_for_value('invalid-uuid'), ('', ''))
class FilteredSelectMultipleWidgetTest(SimpleTestCase):
def test_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', False)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple name="test" class="selectfilter" '
'data-field-name="test\\" data-is-stacked="0">\n</select>'
)
def test_stacked_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', True)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple name="test" class="selectfilterstacked" '
'data-field-name="test\\" data-is-stacked="1">\n</select>'
)
class AdminDateWidgetTest(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminDateWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="vDateField" name="test" size="10">',
)
# pass attrs to widget
w = widgets.AdminDateWidget(attrs={'size': 20, 'class': 'myDateField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="myDateField" name="test" size="20">',
)
class AdminTimeWidgetTest(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminTimeWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="vTimeField" name="test" size="8">',
)
# pass attrs to widget
w = widgets.AdminTimeWidget(attrs={'size': 20, 'class': 'myTimeField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="myTimeField" name="test" size="20">',
)
class AdminSplitDateTimeWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminSplitDateTime()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Date: <input value="2007-12-01" type="text" class="vDateField" '
'name="test_0" size="10"><br>'
'Time: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8"></p>'
)
def test_localization(self):
w = widgets.AdminSplitDateTime()
with self.settings(USE_L10N=True), translation.override('de-at'):
w.is_localized = True
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Datum: <input value="01.12.2007" type="text" '
'class="vDateField" name="test_0"size="10"><br>'
'Zeit: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8"></p>'
)
class AdminURLWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', ''),
'<input class="vURLField" name="test" type="url">'
)
self.assertHTMLEqual(
w.render('test', 'http://example.com'),
'<p class="url">Currently:<a href="http://example.com">'
'http://example.com</a><br>'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example.com"></p>'
)
def test_render_idn(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', 'http://example-äüö.com'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">'
'http://example-äüö.com</a><br>'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example-äüö.com"></p>'
)
def test_render_quoting(self):
"""
WARNING: This test doesn't use assertHTMLEqual since it will get rid
of some escapes which are tested here!
"""
HREF_RE = re.compile('href="([^"]+)"')
VALUE_RE = re.compile('value="([^"]+)"')
TEXT_RE = re.compile('<a[^>]+>([^>]+)</a>')
w = widgets.AdminURLFieldWidget()
output = w.render('test', 'http://example.com/<sometag>some text</sometag>')
self.assertEqual(
HREF_RE.search(output).groups()[0],
'http://example.com/%3Csometag%3Esome%20text%3C/sometag%3E',
)
self.assertEqual(
TEXT_RE.search(output).groups()[0],
'http://example.com/<sometag>some text</sometag>',
)
self.assertEqual(
VALUE_RE.search(output).groups()[0],
'http://example.com/<sometag>some text</sometag>',
)
output = w.render('test', 'http://example-äüö.com/<sometag>some text</sometag>')
self.assertEqual(
HREF_RE.search(output).groups()[0],
'http://xn--example--7za4pnc.com/%3Csometag%3Esome%20text%3C/sometag%3E',
)
self.assertEqual(
TEXT_RE.search(output).groups()[0],
'http://example-äüö.com/<sometag>some text</sometag>',
)
self.assertEqual(
VALUE_RE.search(output).groups()[0],
'http://example-äüö.com/<sometag>some text</sometag>',
)
output = w.render('test', 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"')
self.assertEqual(
HREF_RE.search(output).groups()[0],
'http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)%3C/script%3E%22',
)
self.assertEqual(
TEXT_RE.search(output).groups()[0],
'http://www.example.com/%C3%A4"><script>'
'alert("XSS!")</script>"'
)
self.assertEqual(
VALUE_RE.search(output).groups()[0],
'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"',
)
class AdminUUIDWidgetTests(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminUUIDInputWidget()
self.assertHTMLEqual(
w.render('test', '550e8400-e29b-41d4-a716-446655440000'),
'<input value="550e8400-e29b-41d4-a716-446655440000" type="text" class="vUUIDField" name="test">',
)
w = widgets.AdminUUIDInputWidget(attrs={'class': 'myUUIDInput'})
self.assertHTMLEqual(
w.render('test', '550e8400-e29b-41d4-a716-446655440000'),
'<input value="550e8400-e29b-41d4-a716-446655440000" type="text" class="myUUIDInput" name="test">',
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminFileWidgetTests(TestDataMixin, TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
band = Band.objects.create(name='Linkin Park')
cls.album = band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
def test_render(self):
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id"> '
'<label for="test-clear_id">Clear</label></span><br>'
'Change: <input type="file" name="test"></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
self.assertHTMLEqual(
w.render('test', SimpleUploadedFile('test', b'content')),
'<input type="file" name="test">',
)
def test_render_required(self):
widget = widgets.AdminFileWidget()
widget.is_required = True
self.assertHTMLEqual(
widget.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a><br>'
'Change: <input type="file" name="test"></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
def test_readonly_fields(self):
"""
File widgets should render as a link when they're marked "read only."
"""
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin:admin_widgets_album_change', args=(self.album.id,)))
self.assertContains(
response,
'<div class="readonly"><a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">'
r'albums\hybrid_theory.jpg</a></div>' % {'STORAGE_URL': default_storage.url('')},
html=True,
)
self.assertNotContains(
response,
'<input type="file" name="cover_art" id="id_cover_art">',
html=True,
)
response = self.client.get(reverse('admin:admin_widgets_album_add'))
self.assertContains(
response,
'<div class="readonly"></div>',
html=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ForeignKeyRawIdWidgetTest(TestCase):
def test_render(self):
band = Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
rel = Album._meta.get_field('band').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', band.pk, attrs={}),
'<input type="text" name="test" value="%(bandpk)s" '
'class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/band/?_to_field=id" class="related-lookup" '
'id="lookup_id_test" title="Lookup"></a> <strong>'
'<a href="/admin_widgets/band/%(bandpk)s/change/">Linkin Park</a>'
'</strong>' % {'bandpk': band.pk}
)
def test_relations_to_non_primary_key(self):
# ForeignKeyRawIdWidget works with fields which aren't related to
# the model's primary key.
apple = Inventory.objects.create(barcode=86, name='Apple')
Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(
barcode=87, name='Core', parent=apple
)
rel = Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', core.parent_id, attrs={}),
'<input type="text" name="test" value="86" '
'class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Apple</a></strong>' % {'pk': apple.pk}
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = Honeycomb.objects.create(location='Old tree')
big_honeycomb.bee_set.create()
rel = Bee._meta.get_field('honeycomb').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('honeycomb_widget', big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s">'
' <strong>%(hcomb)s</strong>'
% {'hcombpk': big_honeycomb.pk, 'hcomb': big_honeycomb}
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = Individual.objects.create(name='Subject #1')
Individual.objects.create(name='Child', parent=subject1)
rel = Individual._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('individual_widget', subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s">'
' <strong>%(subj1)s</strong>'
% {'subj1pk': subject1.pk, 'subj1': subject1}
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = Inventory.objects.create(
barcode=93, name='Hidden', hidden=True
)
child_of_hidden = Inventory.objects.create(
barcode=94, name='Child of hidden', parent=hidden
)
self.assertHTMLEqual(
w.render('test', child_of_hidden.parent_id, attrs={}),
'<input type="text" name="test" value="93" class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Hidden</a></strong>' % {'pk': hidden.pk}
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ManyToManyRawIdWidgetTest(TestCase):
def test_render(self):
band = Band.objects.create(name='Linkin Park')
m1 = Member.objects.create(name='Chester')
m2 = Member.objects.create(name='Mike')
band.members.add(m1, m2)
rel = Band._meta.get_field('members').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', [m1.pk, m2.pk], attrs={}), (
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % {'m1pk': m1.pk, 'm2pk': m2.pk}
)
self.assertHTMLEqual(
w.render('test', [m1.pk]), (
'<input type="text" name="test" value="%(m1pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % {'m1pk': m1.pk}
)
def test_m2m_related_model_not_in_admin(self):
# M2M relationship with model not registered with admin site. Raw ID
# widget should have no magnifying glass link. See #16542
consultor1 = Advisor.objects.create(name='Rockstar Techie')
c1 = Company.objects.create(name='Doodle')
c2 = Company.objects.create(name='Pear')
consultor1.companies.add(c1, c2)
rel = Advisor._meta.get_field('companies').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('company_widget1', [c1.pk, c2.pk], attrs={}),
'<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s">' % {'c1pk': c1.pk, 'c2pk': c2.pk}
)
self.assertHTMLEqual(
w.render('company_widget2', [c1.pk]),
'<input type="text" name="company_widget2" value="%(c1pk)s">' % {'c1pk': c1.pk}
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class RelatedFieldWidgetWrapperTests(SimpleTestCase):
def test_no_can_add_related(self):
rel = Individual._meta.get_field('parent').remote_field
w = widgets.AdminRadioSelect()
# Used to fail with a name error.
w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site)
self.assertFalse(w.can_add_related)
def test_select_multiple_widget_cant_change_delete_related(self):
rel = Individual._meta.get_field('parent').remote_field
widget = forms.SelectMultiple()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertFalse(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_on_delete_cascade_rel_cant_delete_related(self):
rel = Individual._meta.get_field('soulmate').remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertTrue(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_custom_widget_render(self):
class CustomWidget(forms.Select):
def render(self, *args, **kwargs):
return 'custom render output'
rel = Album._meta.get_field('band').remote_field
widget = CustomWidget()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
output = wrapper.render('name', 'value')
self.assertIn('custom render output', output)
def test_widget_delegates_value_omitted_from_data(self):
class CustomWidget(forms.Select):
def value_omitted_from_data(self, data, files, name):
return False
rel = Album._meta.get_field('band').remote_field
widget = CustomWidget()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.value_omitted_from_data({}, {}, 'band'), False)
def test_widget_is_hidden(self):
rel = Album._meta.get_field('band').remote_field
widget = forms.HiddenInput()
widget.choices = ()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.is_hidden, True)
context = wrapper.get_context('band', None, {})
self.assertIs(context['is_hidden'], True)
output = wrapper.render('name', 'value')
# Related item links are hidden.
self.assertNotIn('<a ', output)
def test_widget_is_not_hidden(self):
rel = Album._meta.get_field('band').remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.is_hidden, False)
context = wrapper.get_context('band', None, {})
self.assertIs(context['is_hidden'], False)
output = wrapper.render('name', 'value')
# Related item links are present.
self.assertIn('<a ', output)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminWidgetSeleniumTestCase(AdminSeleniumTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.u1 = User.objects.create_superuser(username='super', password='secret', email='[email protected]')
class DateTimePickerSeleniumTests(AdminWidgetSeleniumTestCase):
def test_show_hide_date_time_picker_widgets(self):
"""
Pressing the ESC key or clicking on a widget value closes the date and
time picker widgets.
"""
from selenium.webdriver.common.keys import Keys
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# First, with the date picker widget ---------------------------------
cal_icon = self.selenium.find_element_by_id('calendarlink0')
# The date picker is hidden
self.assertEqual(self.get_css_value('#calendarbox0', 'display'), 'none')
# Click the calendar icon
cal_icon.click()
# The date picker is visible
self.assertEqual(self.get_css_value('#calendarbox0', 'display'), 'block')
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# The date picker is hidden again
self.assertEqual(self.get_css_value('#calendarbox0', 'display'), 'none')
# Click the calendar icon, then on the 15th of current month
cal_icon.click()
self.selenium.find_element_by_xpath("//a[contains(text(), '15')]").click()
self.assertEqual(self.get_css_value('#calendarbox0', 'display'), 'none')
self.assertEqual(
self.selenium.find_element_by_id('id_birthdate_0').get_attribute('value'),
datetime.today().strftime('%Y-%m-') + '15',
)
# Then, with the time picker widget ----------------------------------
time_icon = self.selenium.find_element_by_id('clocklink0')
# The time picker is hidden
self.assertEqual(self.get_css_value('#clockbox0', 'display'), 'none')
# Click the time icon
time_icon.click()
# The time picker is visible
self.assertEqual(self.get_css_value('#clockbox0', 'display'), 'block')
self.assertEqual(
[
x.text for x in
self.selenium.find_elements_by_xpath("//ul[@class='timelist']/li/a")
],
['Now', 'Midnight', '6 a.m.', 'Noon', '6 p.m.']
)
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# The time picker is hidden again
self.assertEqual(self.get_css_value('#clockbox0', 'display'), 'none')
# Click the time icon, then select the 'Noon' value
time_icon.click()
self.selenium.find_element_by_xpath("//a[contains(text(), 'Noon')]").click()
self.assertEqual(self.get_css_value('#clockbox0', 'display'), 'none')
self.assertEqual(
self.selenium.find_element_by_id('id_birthdate_1').get_attribute('value'),
'12:00:00',
)
def test_calendar_nonday_class(self):
"""
Ensure cells that are not days of the month have the `nonday` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# make sure the first and last 6 cells have class nonday
for td in tds[:6] + tds[-6:]:
self.assertEqual(td.get_attribute('class'), 'nonday')
def test_calendar_selected_class(self):
"""
Ensure cell for the day in the input has the `selected` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify the selected cell
selected = tds[6]
self.assertEqual(selected.get_attribute('class'), 'selected')
self.assertEqual(selected.text, '1')
def test_calendar_no_selected_class(self):
"""
Ensure no cells are given the selected class when the field is empty.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify there are no cells with the selected class
selected = [td for td in tds if td.get_attribute('class') == 'selected']
self.assertEqual(len(selected), 0)
def test_calendar_show_date_from_input(self):
"""
The calendar shows the date from the input field for every locale
supported by Django.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Enter test data
member = Member.objects.create(name='Bob', birthdate=datetime(1984, 5, 15), gender='M')
# Get month name translations for every locale
month_string = 'May'
path = os.path.join(os.path.dirname(import_module('django.contrib.admin').__file__), 'locale')
for language_code, language_name in settings.LANGUAGES:
try:
catalog = gettext.translation('djangojs', path, [language_code])
except OSError:
continue
if month_string in catalog._catalog:
month_name = catalog._catalog[month_string]
else:
month_name = month_string
# Get the expected caption
may_translation = month_name
expected_caption = '{0:s} {1:d}'.format(may_translation.upper(), 1984)
# Test with every locale
with override_settings(LANGUAGE_CODE=language_code, USE_L10N=True):
# Open a page that has a date picker widget
url = reverse('admin:admin_widgets_member_change', args=(member.pk,))
self.selenium.get(self.live_server_url + url)
# Click on the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Make sure that the right month and year are displayed
self.wait_for_text('#calendarin0 caption', expected_caption)
@override_settings(TIME_ZONE='Asia/Singapore')
class DateTimePickerShortcutsSeleniumTests(AdminWidgetSeleniumTestCase):
def test_date_time_picker_shortcuts(self):
"""
date/time/datetime picker shortcuts work in the current time zone.
Refs #20663.
This test case is fairly tricky, it relies on selenium still running the browser
in the default time zone "America/Chicago" despite `override_settings` changing
the time zone to "Asia/Singapore".
"""
self.admin_login(username='super', password='secret', login_url='/')
error_margin = timedelta(seconds=10)
# If we are neighbouring a DST, we add an hour of error margin.
tz = pytz.timezone('America/Chicago')
utc_now = datetime.now(pytz.utc)
tz_yesterday = (utc_now - timedelta(days=1)).astimezone(tz).tzname()
tz_tomorrow = (utc_now + timedelta(days=1)).astimezone(tz).tzname()
if tz_yesterday != tz_tomorrow:
error_margin += timedelta(hours=1)
now = datetime.now()
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
self.selenium.find_element_by_id('id_name').send_keys('test')
# Click on the "today" and "now" shortcuts.
shortcuts = self.selenium.find_elements_by_css_selector('.field-birthdate .datetimeshortcuts')
for shortcut in shortcuts:
shortcut.find_element_by_tag_name('a').click()
# There is a time zone mismatch warning.
# Warning: This would effectively fail if the TIME_ZONE defined in the
# settings has the same UTC offset as "Asia/Singapore" because the
# mismatch warning would be rightfully missing from the page.
self.selenium.find_elements_by_css_selector('.field-birthdate .timezonewarning')
# Submit the form.
self.selenium.find_element_by_tag_name('form').submit()
self.wait_page_loaded()
# Make sure that "now" in javascript is within 10 seconds
# from "now" on the server side.
member = Member.objects.get(name='test')
self.assertGreater(member.birthdate, now - error_margin)
self.assertLess(member.birthdate, now + error_margin)
# The above tests run with Asia/Singapore which are on the positive side of
# UTC. Here we test with a timezone on the negative side.
@override_settings(TIME_ZONE='US/Eastern')
class DateTimePickerAltTimezoneSeleniumTests(DateTimePickerShortcutsSeleniumTests):
pass
class HorizontalVerticalFilterSeleniumTests(AdminWidgetSeleniumTestCase):
def setUp(self):
super().setUp()
self.lisa = Student.objects.create(name='Lisa')
self.john = Student.objects.create(name='John')
self.bob = Student.objects.create(name='Bob')
self.peter = Student.objects.create(name='Peter')
self.jenny = Student.objects.create(name='Jenny')
self.jason = Student.objects.create(name='Jason')
self.cliff = Student.objects.create(name='Cliff')
self.arthur = Student.objects.create(name='Arthur')
self.school = School.objects.create(name='School of Awesome')
def assertActiveButtons(self, mode, field_name, choose, remove, choose_all=None, remove_all=None):
choose_link = '#id_%s_add_link' % field_name
choose_all_link = '#id_%s_add_all_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
remove_all_link = '#id_%s_remove_all_link' % field_name
self.assertEqual(self.has_css_class(choose_link, 'active'), choose)
self.assertEqual(self.has_css_class(remove_link, 'active'), remove)
if mode == 'horizontal':
self.assertEqual(self.has_css_class(choose_all_link, 'active'), choose_all)
self.assertEqual(self.has_css_class(remove_all_link, 'active'), remove_all)
def execute_basic_operations(self, mode, field_name):
original_url = self.selenium.current_url
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
choose_all_link = 'id_%s_add_all_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
remove_all_link = 'id_%s_remove_all_link' % field_name
# Initial positions ---------------------------------------------------
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertSelectOptions(to_box, [str(self.lisa.id), str(self.peter.id)])
self.assertActiveButtons(mode, field_name, False, False, True, True)
# Click 'Choose all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(choose_all_link).click()
elif mode == 'vertical':
# There 's no 'Choose all' button in vertical mode, so individually
# select all options and click 'Choose'.
for option in self.selenium.find_elements_by_css_selector(from_box + ' > option'):
option.click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertActiveButtons(mode, field_name, False, False, False, True)
# Click 'Remove all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(remove_all_link).click()
elif mode == 'vertical':
# There 's no 'Remove all' button in vertical mode, so individually
# select all options and click 'Remove'.
for option in self.selenium.find_elements_by_css_selector(to_box + ' > option'):
option.click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box, [
str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertSelectOptions(to_box, [])
self.assertActiveButtons(mode, field_name, False, False, True, False)
# Choose some options ------------------------------------------------
from_lisa_select_option = self.get_select_option(from_box, str(self.lisa.id))
# Check the title attribute is there for tool tips: ticket #20821
self.assertEqual(from_lisa_select_option.get_attribute('title'), from_lisa_select_option.get_attribute('text'))
from_lisa_select_option.click()
self.get_select_option(from_box, str(self.jason.id)).click()
self.get_select_option(from_box, str(self.bob.id)).click()
self.get_select_option(from_box, str(self.john.id)).click()
self.assertActiveButtons(mode, field_name, True, False, True, False)
self.selenium.find_element_by_id(choose_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.bob.id),
str(self.jason.id), str(self.john.id),
])
# Check the tooltip is still there after moving: ticket #20821
to_lisa_select_option = self.get_select_option(to_box, str(self.lisa.id))
self.assertEqual(to_lisa_select_option.get_attribute('title'), to_lisa_select_option.get_attribute('text'))
# Remove some options -------------------------------------------------
self.get_select_option(to_box, str(self.lisa.id)).click()
self.get_select_option(to_box, str(self.bob.id)).click()
self.assertActiveButtons(mode, field_name, False, True, True, True)
self.selenium.find_element_by_id(remove_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)
])
self.assertSelectOptions(to_box, [str(self.jason.id), str(self.john.id)])
# Choose some more options --------------------------------------------
self.get_select_option(from_box, str(self.arthur.id)).click()
self.get_select_option(from_box, str(self.cliff.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id),
])
self.assertSelectOptions(to_box, [
str(self.jason.id), str(self.john.id),
str(self.arthur.id), str(self.cliff.id),
])
# Choose some more options --------------------------------------------
self.get_select_option(from_box, str(self.peter.id)).click()
self.get_select_option(from_box, str(self.lisa.id)).click()
# Confirm they're selected after clicking inactive buttons: ticket #26575
self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)])
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)])
# Unselect the options ------------------------------------------------
self.get_select_option(from_box, str(self.peter.id)).click()
self.get_select_option(from_box, str(self.lisa.id)).click()
# Choose some more options --------------------------------------------
self.get_select_option(to_box, str(self.jason.id)).click()
self.get_select_option(to_box, str(self.john.id)).click()
# Confirm they're selected after clicking inactive buttons: ticket #26575
self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)])
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)])
# Unselect the options ------------------------------------------------
self.get_select_option(to_box, str(self.jason.id)).click()
self.get_select_option(to_box, str(self.john.id)).click()
# Pressing buttons shouldn't change the URL.
self.assertEqual(self.selenium.current_url, original_url)
def test_basic(self):
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_school_change', args=(self.school.id,)))
self.wait_page_loaded()
self.execute_basic_operations('vertical', 'students')
self.execute_basic_operations('horizontal', 'alumni')
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()), [self.arthur, self.cliff, self.jason, self.john])
self.assertEqual(list(self.school.alumni.all()), [self.arthur, self.cliff, self.jason, self.john])
def test_filter(self):
"""
Typing in the search box filters out options displayed in the 'from'
box.
"""
from selenium.webdriver.common.keys import Keys
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_school_change', args=(self.school.id,)))
for field_name in ['students', 'alumni']:
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
input = self.selenium.find_element_by_id('id_%s_input' % field_name)
# Initial values
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
# Typing in some characters filters out non-matching options
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys('R')
self.assertSelectOptions(from_box, [str(self.arthur.id)])
# Clearing the text box makes the other options reappear
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
# -----------------------------------------------------------------
# Choosing a filtered option sends it properly to the 'to' box.
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
self.get_select_option(from_box, str(self.jason.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id)])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.peter.id), str(self.jason.id),
])
self.get_select_option(to_box, str(self.lisa.id)).click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.lisa.id)])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE]) # Clear text box
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jenny.id),
str(self.john.id), str(self.lisa.id),
])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
# -----------------------------------------------------------------
# Pressing enter on a filtered option sends it properly to
# the 'to' box.
self.get_select_option(to_box, str(self.jason.id)).click()
self.selenium.find_element_by_id(remove_link).click()
input.send_keys('ja')
self.assertSelectOptions(from_box, [str(self.jason.id)])
input.send_keys([Keys.ENTER])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE, Keys.BACK_SPACE])
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()), [self.jason, self.peter])
self.assertEqual(list(self.school.alumni.all()), [self.jason, self.peter])
def test_back_button_bug(self):
"""
Some browsers had a bug where navigating away from the change page
and then clicking the browser's back button would clear the
filter_horizontal/filter_vertical widgets (#13614).
"""
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
# Navigate away and go back to the change form page.
self.selenium.find_element_by_link_text('Home').click()
self.selenium.back()
expected_unselected_values = [
str(self.arthur.id), str(self.bob.id), str(self.cliff.id),
str(self.jason.id), str(self.jenny.id), str(self.john.id),
]
expected_selected_values = [str(self.lisa.id), str(self.peter.id)]
# Everything is still in place
self.assertSelectOptions('#id_students_from', expected_unselected_values)
self.assertSelectOptions('#id_students_to', expected_selected_values)
self.assertSelectOptions('#id_alumni_from', expected_unselected_values)
self.assertSelectOptions('#id_alumni_to', expected_selected_values)
def test_refresh_page(self):
"""
Horizontal and vertical filter widgets keep selected options on page
reload (#22955).
"""
self.school.students.add(self.arthur, self.jason)
self.school.alumni.add(self.arthur, self.jason)
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
# self.selenium.refresh() or send_keys(Keys.F5) does hard reload and
# doesn't replicate what happens when a user clicks the browser's
# 'Refresh' button.
self.selenium.execute_script("location.reload()")
self.wait_page_loaded()
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
class AdminRawIdWidgetSeleniumTests(AdminWidgetSeleniumTestCase):
def setUp(self):
super().setUp()
Band.objects.create(id=42, name='Bogey Blues')
Band.objects.create(id=98, name='Green Potatoes')
def test_ForeignKey(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_event_add'))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(self.selenium.find_element_by_id('id_main_band').get_attribute('value'), '')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the other selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '98')
def test_many_to_many(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_event_add'))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(self.selenium.find_element_by_id('id_supporting_bands').get_attribute('value'), '')
# Help text for the field is displayed
self.assertEqual(
self.selenium.find_element_by_css_selector('.field-supporting_bands div.help').text,
'Supporting Bands.'
)
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the two selected bands' ids
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42,98')
class RelatedFieldWidgetSeleniumTests(AdminWidgetSeleniumTestCase):
def test_ForeignKey_using_to_field(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_profile_add'))
main_window = self.selenium.current_window_handle
# Click the Add User button to add new
self.selenium.find_element_by_id('add_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
password_field = self.selenium.find_element_by_id('id_password')
password_field.send_keys('password')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'newuser'
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# The field now contains the new user
self.selenium.find_element_by_css_selector('#id_user option[value=newuser]')
# Click the Change User button to change it
self.selenium.find_element_by_id('change_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'changednewuser'
username_field.clear()
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
self.selenium.find_element_by_css_selector('#id_user option[value=changednewuser]')
# Go ahead and submit the form to make sure it works
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.wait_for_text('li.success', 'The profile "changednewuser" was added successfully.')
profiles = Profile.objects.all()
self.assertEqual(len(profiles), 1)
self.assertEqual(profiles[0].user.username, username_value)
|
cb3ed97a47239ed1b0220854618d11506c92bc5135ffa294592971bf564a8a83 | import gettext
import json
from os import path
from django.conf import settings
from django.test import (
RequestFactory, SimpleTestCase, TestCase, modify_settings,
override_settings,
)
from django.test.selenium import SeleniumTestCase
from django.urls import reverse
from django.utils.translation import (
LANGUAGE_SESSION_KEY, get_language, override,
)
from django.views.i18n import JavaScriptCatalog, get_formats
from ..urls import locale_dir
@override_settings(ROOT_URLCONF='view_tests.urls')
class SetLanguageTests(TestCase):
"""Test the django.views.i18n.set_language view."""
def _get_inactive_language_code(self):
"""Return language code for a language which is not activated."""
current_language = get_language()
return [code for code, name in settings.LANGUAGES if not code == current_language][0]
def test_setlang(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the 'next' argument if provided.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_REFERER='/i_should_not_be_used/')
self.assertRedirects(response, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
# The language is set in a cookie.
language_cookie = self.client.cookies[settings.LANGUAGE_COOKIE_NAME]
self.assertEqual(language_cookie.value, lang_code)
self.assertEqual(language_cookie['domain'], '')
self.assertEqual(language_cookie['path'], '/')
self.assertEqual(language_cookie['max-age'], '')
def test_setlang_unsafe_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe".
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '//unsafe/redirection/'}
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_http_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe" and its scheme is https if the request was sent over https.
"""
lang_code = self._get_inactive_language_code()
non_https_next_url = 'http://testserver/redirection/'
post_data = {'language': lang_code, 'next': non_https_next_url}
# Insecure URL in POST data.
response = self.client.post('/i18n/setlang/', data=post_data, secure=True)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
# Insecure URL in HTTP referer.
response = self.client.post('/i18n/setlang/', secure=True, HTTP_REFERER=non_https_next_url)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_redirect_to_referer(self):
"""
The set_language view redirects to the URL in the referer header when
there isn't a "next" parameter.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data, HTTP_REFERER='/i18n/')
self.assertRedirects(response, '/i18n/', fetch_redirect_response=False)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_default_redirect(self):
"""
The set_language view redirects to '/' when there isn't a referer or
"next" parameter.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data)
self.assertRedirects(response, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_performs_redirect_for_ajax_if_explicitly_requested(self):
"""
The set_language view redirects to the "next" parameter for AJAX calls.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertRedirects(response, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_doesnt_perform_a_redirect_to_referer_for_ajax(self):
"""
The set_language view doesn't redirect to the HTTP referer header for
AJAX calls.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
headers = {'HTTP_REFERER': '/', 'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
response = self.client.post('/i18n/setlang/', post_data, **headers)
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_doesnt_perform_a_default_redirect_for_ajax(self):
"""
The set_language view returns 204 for AJAX calls by default.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_unsafe_next_for_ajax(self):
"""
The fallback to root URL for the set_language view works for AJAX calls.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '//unsafe/redirection/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.url, '/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_reversal(self):
self.assertEqual(reverse('set_language'), '/i18n/setlang/')
def test_setlang_cookie(self):
# we force saving language to a cookie rather than a session
# by excluding session middleware and those which do require it
test_settings = {
'MIDDLEWARE': ['django.middleware.common.CommonMiddleware'],
'LANGUAGE_COOKIE_NAME': 'mylanguage',
'LANGUAGE_COOKIE_AGE': 3600 * 7 * 2,
'LANGUAGE_COOKIE_DOMAIN': '.example.com',
'LANGUAGE_COOKIE_PATH': '/test/',
}
with self.settings(**test_settings):
post_data = {'language': 'pl', 'next': '/views/'}
response = self.client.post('/i18n/setlang/', data=post_data)
language_cookie = response.cookies.get('mylanguage')
self.assertEqual(language_cookie.value, 'pl')
self.assertEqual(language_cookie['domain'], '.example.com')
self.assertEqual(language_cookie['path'], '/test/')
self.assertEqual(language_cookie['max-age'], 3600 * 7 * 2)
def test_setlang_decodes_http_referer_url(self):
"""
The set_language view decodes the HTTP_REFERER URL.
"""
# The URL & view must exist for this to work as a regression test.
self.assertEqual(reverse('with_parameter', kwargs={'parameter': 'x'}), '/test-setlang/x/')
lang_code = self._get_inactive_language_code()
encoded_url = '/test-setlang/%C3%A4/' # (%C3%A4 decodes to ä)
response = self.client.post('/i18n/setlang/', {'language': lang_code}, HTTP_REFERER=encoded_url)
self.assertRedirects(response, encoded_url, fetch_redirect_response=False)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
@modify_settings(MIDDLEWARE={
'append': 'django.middleware.locale.LocaleMiddleware',
})
def test_lang_from_translated_i18n_pattern(self):
response = self.client.post(
'/i18n/setlang/', data={'language': 'nl'},
follow=True, HTTP_REFERER='/en/translated/'
)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'nl')
self.assertRedirects(response, '/nl/vertaald/')
# And reverse
response = self.client.post(
'/i18n/setlang/', data={'language': 'en'},
follow=True, HTTP_REFERER='/nl/vertaald/'
)
self.assertRedirects(response, '/en/translated/')
@override_settings(ROOT_URLCONF='view_tests.urls')
class I18NViewTests(SimpleTestCase):
"""Test django.views.i18n views other than set_language."""
@override_settings(LANGUAGE_CODE='de')
def test_get_formats(self):
formats = get_formats()
# Test 3 possible types in get_formats: integer, string, and list.
self.assertEqual(formats['FIRST_DAY_OF_WEEK'], 0)
self.assertEqual(formats['DECIMAL_SEPARATOR'], '.')
self.assertEqual(formats['TIME_INPUT_FORMATS'], ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'])
def test_jsi18n(self):
"""The javascript_catalog can be deployed with language settings"""
for lang_code in ['es', 'fr', 'ru']:
with override(lang_code):
catalog = gettext.translation('djangojs', locale_dir, [lang_code])
trans_txt = catalog.gettext('this is to be translated')
response = self.client.get('/jsi18n/')
self.assertEqual(response['Content-Type'], 'text/javascript; charset="utf-8"')
# response content must include a line like:
# "this is to be translated": <value of trans_txt Python variable>
# json.dumps() is used to be able to check unicode strings
self.assertContains(response, json.dumps(trans_txt), 1)
if lang_code == 'fr':
# Message with context (msgctxt)
self.assertContains(response, '"month name\\u0004May": "mai"', 1)
@override_settings(USE_I18N=False)
def test_jsi18n_USE_I18N_False(self):
response = self.client.get('/jsi18n/')
# default plural function
self.assertContains(response, 'django.pluralidx = function(count) { return (count == 1) ? 0 : 1; };')
self.assertNotContains(response, 'var newcatalog =')
def test_jsoni18n(self):
"""
The json_catalog returns the language catalog and settings as JSON.
"""
with override('de'):
response = self.client.get('/jsoni18n/')
data = json.loads(response.content.decode())
self.assertIn('catalog', data)
self.assertIn('formats', data)
self.assertEqual(data['formats']['TIME_INPUT_FORMATS'], ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'])
self.assertEqual(data['formats']['FIRST_DAY_OF_WEEK'], 0)
self.assertIn('plural', data)
self.assertEqual(data['catalog']['month name\x04May'], 'Mai')
self.assertIn('DATETIME_FORMAT', data['formats'])
self.assertEqual(data['plural'], '(n != 1)')
def test_jsi18n_with_missing_en_files(self):
"""
The javascript_catalog shouldn't load the fallback language in the
case that the current selected language is actually the one translated
from, and hence missing translation files completely.
This happens easily when you're translating from English to other
languages and you've set settings.LANGUAGE_CODE to some other language
than English.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'esto tiene que ser traducido')
def test_jsoni18n_with_missing_en_files(self):
"""
Same as above for the json_catalog view. Here we also check for the
expected JSON format.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsoni18n/')
data = json.loads(response.content.decode())
self.assertIn('catalog', data)
self.assertIn('formats', data)
self.assertIn('plural', data)
self.assertEqual(data['catalog'], {})
self.assertIn('DATETIME_FORMAT', data['formats'])
self.assertIsNone(data['plural'])
def test_jsi18n_fallback_language(self):
"""
Let's make sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('fi'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'il faut le traduire')
self.assertNotContains(response, "Untranslated string")
def test_i18n_fallback_language_plural(self):
"""
The fallback to a language with less plural forms maintains the real
language's number of plural forms and correct translations.
"""
with self.settings(LANGUAGE_CODE='pt'), override('ru'):
response = self.client.get('/jsi18n/')
self.assertEqual(
response.context['catalog']['{count} plural3'],
['{count} plural3 p3', '{count} plural3 p3s', '{count} plural3 p3t']
)
self.assertEqual(
response.context['catalog']['{count} plural2'],
['{count} plural2', '{count} plural2s', '']
)
with self.settings(LANGUAGE_CODE='ru'), override('pt'):
response = self.client.get('/jsi18n/')
self.assertEqual(
response.context['catalog']['{count} plural3'],
['{count} plural3', '{count} plural3s']
)
self.assertEqual(
response.context['catalog']['{count} plural2'],
['{count} plural2', '{count} plural2s']
)
def test_i18n_english_variant(self):
with override('en-gb'):
response = self.client.get('/jsi18n/')
self.assertIn(
'"this color is to be translated": "this colour is to be translated"',
response.context['catalog_str']
)
def test_i18n_language_non_english_default(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English, the selected language
is English and there is not 'en' translation available. See #13388,
#3594 and #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
@modify_settings(INSTALLED_APPS={'append': 'view_tests.app0'})
def test_non_english_default_english_userpref(self):
"""
Same as above with the difference that there IS an 'en' translation
available. The Javascript i18n view must return a NON empty language catalog
with the proper English translations. See #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n_english_translation/')
self.assertContains(response, 'this app0 string is to be translated')
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('none'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'Choisir une heure')
def test_escaping(self):
# Force a language via GET otherwise the gettext functions are a noop!
response = self.client.get('/jsi18n_admin/?language=de')
self.assertContains(response, '\\x04')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app5']})
def test_non_BMP_char(self):
"""
Non-BMP characters should not break the javascript_catalog (#21725).
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n/app5/')
self.assertContains(response, 'emoji')
self.assertContains(response, '\\ud83d\\udca9')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
def test_i18n_language_english_default(self):
"""
Check if the JavaScript i18n view returns a complete language catalog
if the default language is en-us, the selected language has a
translation available and a catalog composed by djangojs domain
translations of multiple Python packages is requested. See #13388,
#3594 and #13514 for more details.
"""
base_trans_string = 'il faut traduire cette cha\\u00eene de caract\\u00e8res de '
app1_trans_string = base_trans_string + 'app1'
app2_trans_string = base_trans_string + 'app2'
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n_multi_packages1/')
self.assertContains(response, app1_trans_string)
self.assertContains(response, app2_trans_string)
response = self.client.get('/jsi18n/app1/')
self.assertContains(response, app1_trans_string)
self.assertNotContains(response, app2_trans_string)
response = self.client.get('/jsi18n/app2/')
self.assertNotContains(response, app1_trans_string)
self.assertContains(response, app2_trans_string)
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app3', 'view_tests.app4']})
def test_i18n_different_non_english_languages(self):
"""
Similar to above but with neither default or requested language being
English.
"""
with self.settings(LANGUAGE_CODE='fr'), override('es-ar'):
response = self.client.get('/jsi18n_multi_packages2/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def test_i18n_with_locale_paths(self):
extended_locale_paths = settings.LOCALE_PATHS + [
path.join(
path.dirname(path.dirname(path.abspath(__file__))),
'app3',
'locale',
),
]
with self.settings(LANGUAGE_CODE='es-ar', LOCALE_PATHS=extended_locale_paths):
with override('es-ar'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def test_i18n_unknown_package_error(self):
view = JavaScriptCatalog.as_view()
request = RequestFactory().get('/')
msg = 'Invalid package(s) provided to JavaScriptCatalog: unknown_package'
with self.assertRaisesMessage(ValueError, msg):
view(request, packages='unknown_package')
msg += ',unknown_package2'
with self.assertRaisesMessage(ValueError, msg):
view(request, packages='unknown_package+unknown_package2')
@override_settings(ROOT_URLCONF='view_tests.urls')
class I18nSeleniumTests(SeleniumTestCase):
# The test cases use fixtures & translations from these apps.
available_apps = [
'django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'view_tests',
]
@override_settings(LANGUAGE_CODE='de')
def test_javascript_gettext(self):
self.selenium.get(self.live_server_url + '/jsi18n_template/')
elem = self.selenium.find_element_by_id("gettext")
self.assertEqual(elem.text, "Entfernen")
elem = self.selenium.find_element_by_id("ngettext_sing")
self.assertEqual(elem.text, "1 Element")
elem = self.selenium.find_element_by_id("ngettext_plur")
self.assertEqual(elem.text, "455 Elemente")
elem = self.selenium.find_element_by_id("ngettext_onnonplural")
self.assertEqual(elem.text, "Bild")
elem = self.selenium.find_element_by_id("pgettext")
self.assertEqual(elem.text, "Kann")
elem = self.selenium.find_element_by_id("npgettext_sing")
self.assertEqual(elem.text, "1 Resultat")
elem = self.selenium.find_element_by_id("npgettext_plur")
self.assertEqual(elem.text, "455 Resultate")
elem = self.selenium.find_element_by_id("formats")
self.assertEqual(
elem.text,
"DATE_INPUT_FORMATS is an object; DECIMAL_SEPARATOR is a string; FIRST_DAY_OF_WEEK is a number;"
)
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
@override_settings(LANGUAGE_CODE='fr')
def test_multiple_catalogs(self):
self.selenium.get(self.live_server_url + '/jsi18n_multi_catalogs/')
elem = self.selenium.find_element_by_id('app1string')
self.assertEqual(elem.text, 'il faut traduire cette chaîne de caractères de app1')
elem = self.selenium.find_element_by_id('app2string')
self.assertEqual(elem.text, 'il faut traduire cette chaîne de caractères de app2')
|
087a6ad3fd8de40eb3c22965cc2522e1ccac525308df4fe14dabf07ed5e7a8db | import importlib
import inspect
import os
import re
import sys
import tempfile
import threading
from io import StringIO
from pathlib import Path
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.urls import path, reverse
from django.utils.functional import SimpleLazyObject
from django.utils.safestring import mark_safe
from django.views.debug import (
CLEANSED_SUBSTITUTE, CallableSettingWrapper, ExceptionReporter,
cleanse_setting, technical_500_response,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [path('url/', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(SimpleTestCase):
def test_files(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
with self.assertLogs('django.request', 'ERROR'):
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "Django tried these URL patterns", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
# Pattern and view name of a RegexURLPattern appear.
self.assertContains(response, r"^regex-post/(?P<pk>[0-9]+)/$", status_code=404)
self.assertContains(response, "[name='regex-post']", status_code=404)
# Pattern and view name of a RoutePattern appear.
self.assertContains(response, r"path-post/<int:pk>/", status_code=404)
self.assertContains(response, "[name='path-post']", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(response, "The empty path didn't match any of these.", status_code=404)
def test_technical_404(self):
response = self.client.get('/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
with self.assertLogs('django.request', 'ERROR'):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]), self.assertLogs('django.request', 'ERROR'):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertLogs('django.request', 'ERROR'):
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>The install worked successfully! Congratulations!</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
databases = {'default'}
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
self.assertIn('<p>No POST data</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ['print %d' % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, (newline.join(LINES) + newline).encode())
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError(mark_safe('<p>Top level</p>'))
except AttributeError as explicit:
try:
raise ValueError(mark_safe('<p>Second exception</p>')) from explicit
except ValueError:
raise IndexError(mark_safe('<p>Final exception</p>'))
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format('<p>Top level</p>')))
self.assertEqual(2, html.count(implicit_exc.format('<p>Second exception</p>')))
self.assertEqual(10, html.count('<p>Final exception</p>'))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format('<p>Top level</p>'), text)
self.assertIn(implicit_exc.format('<p>Second exception</p>'), text)
self.assertEqual(3, text.count('<p>Final exception</p>'))
def test_reporting_frames_without_source(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn('generated in funcName', html)
text = reporter.get_traceback_text()
self.assertIn('"generated" in funcName', text)
def test_reporting_frames_for_cyclic_reference(self):
try:
def test_func():
try:
raise RuntimeError('outer') from RuntimeError('inner')
except RuntimeError as exc:
raise exc.__cause__
test_func()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
def generate_traceback_frames(*args, **kwargs):
nonlocal tb_frames
tb_frames = reporter.get_traceback_frames()
tb_frames = None
tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True)
tb_generator.start()
tb_generator.join(timeout=5)
if tb_generator.is_alive():
# tb_generator is a daemon that runs until the main thread/process
# exits. This is resource heavy when running the full test suite.
# Setting the following values to None makes
# reporter.get_traceback_frames() exit early.
exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None
tb_generator.join()
self.fail('Cyclic reference in Exception Reporter.get_traceback_frames()')
if tb_frames is None:
# can happen if the thread generating traceback got killed
# or exception while generating the traceback
self.fail('Traceback generation failed')
last_frame = tb_frames[-1]
self.assertIn('raise exc.__cause__', last_frame['context_line'])
self.assertEqual(last_frame['filename'], __file__)
self.assertEqual(last_frame['function'], 'test_func')
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_local_variable_escaping(self):
"""Safe strings in local variables are escaped."""
try:
local = mark_safe('<p>Local variable</p>')
raise ValueError(local)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()
self.assertIn('<td class="code"><pre>'<p>Local variable</p>'</pre></td>', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_encoding_error(self):
"""
A UnicodeError displays a portion of the problematic string. HTML in
safe strings is escaped.
"""
try:
mark_safe('abcdefghijkl<p>mnὀp</p>qrstuwxyz').encode('ascii')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h2>Unicode error hint</h2>', html)
self.assertIn('The string that could not be encoded/decoded was: ', html)
self.assertIn('<strong><p>mnὀp</p></strong>', html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ModuleNotFoundError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get('/test_view/')
try:
render(request, 'debug/template_error.html')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(Path(__file__).parent.parent, 'templates', 'debug', 'template_error.html')
self.assertIn(
'Template error:\n'
'In template %(path)s, error at line 2\n'
' \'cycle\' tag requires at least two arguments\n'
' 1 : Template with error:\n'
' 2 : {%% cycle %%} \n'
' 3 : ' % {'path': templ_path},
text
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {
'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value',
}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
def test_ajax_response_encoding(self):
response = self.client.get('/raises500/', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8')
class HelperFunctionTests(SimpleTestCase):
def test_cleanse_setting_basic(self):
self.assertEqual(cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(cleanse_setting('PASSWORD', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_ignore_case(self):
self.assertEqual(cleanse_setting('password', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_recurses_in_dictionary(self):
initial = {'login': 'cooper', 'password': 'secret'}
expected = {'login': 'cooper', 'password': CLEANSED_SUBSTITUTE}
self.assertEqual(cleanse_setting('SETTING_NAME', initial), expected)
|
7c9a7f443df52fc9ac0579c7c4b88d49d37c6e34aea67165c076aeeada56207b | import re
import threading
import unittest
from sqlite3 import dbapi2
from unittest import mock
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, transaction
from django.db.backends.sqlite3.base import check_sqlite_version
from django.db.models import Avg, StdDev, Sum, Variance
from django.db.models.aggregates import Aggregate
from django.db.models.fields import CharField
from django.db.utils import NotSupportedError
from django.test import (
TestCase, TransactionTestCase, override_settings, skipIfDBFeature,
)
from django.test.utils import isolate_apps
from ..models import Author, Item, Object, Square
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite tests')
class Tests(TestCase):
longMessage = True
def test_check_sqlite_version(self):
msg = 'SQLite 3.8.3 or later is required (found 3.8.2).'
with mock.patch.object(dbapi2, 'sqlite_version_info', (3, 8, 2)), \
mock.patch.object(dbapi2, 'sqlite_version', '3.8.2'), \
self.assertRaisesMessage(ImproperlyConfigured, msg):
check_sqlite_version()
def test_aggregation(self):
"""
Raise NotImplementedError when aggregating on date/time fields (#19360).
"""
for aggregate in (Sum, Avg, Variance, StdDev):
with self.assertRaises(NotSupportedError):
Item.objects.all().aggregate(aggregate('time'))
with self.assertRaises(NotSupportedError):
Item.objects.all().aggregate(aggregate('date'))
with self.assertRaises(NotSupportedError):
Item.objects.all().aggregate(aggregate('last_modified'))
with self.assertRaises(NotSupportedError):
Item.objects.all().aggregate(
**{'complex': aggregate('last_modified') + aggregate('last_modified')}
)
def test_distinct_aggregation(self):
class DistinctAggregate(Aggregate):
allow_distinct = True
aggregate = DistinctAggregate('first', 'second', distinct=True)
msg = (
"SQLite doesn't support DISTINCT on aggregate functions accepting "
"multiple arguments."
)
with self.assertRaisesMessage(NotSupportedError, msg):
connection.ops.check_expression_support(aggregate)
def test_memory_db_test_name(self):
"""A named in-memory db should be allowed where supported."""
from django.db.backends.sqlite3.base import DatabaseWrapper
settings_dict = {
'TEST': {
'NAME': 'file:memorydb_test?mode=memory&cache=shared',
}
}
creation = DatabaseWrapper(settings_dict).creation
self.assertEqual(creation._get_test_db_name(), creation.connection.settings_dict['TEST']['NAME'])
def test_regexp_function(self):
tests = (
('test', r'[0-9]+', False),
('test', r'[a-z]+', True),
('test', None, None),
(None, r'[a-z]+', None),
(None, None, None),
)
for string, pattern, expected in tests:
with self.subTest((string, pattern)):
with connection.cursor() as cursor:
cursor.execute('SELECT %s REGEXP %s', [string, pattern])
value = cursor.fetchone()[0]
value = bool(value) if value in {0, 1} else value
self.assertIs(value, expected)
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite tests')
@isolate_apps('backends')
class SchemaTests(TransactionTestCase):
available_apps = ['backends']
def test_autoincrement(self):
"""
auto_increment fields are created with the AUTOINCREMENT keyword
in order to be monotonically increasing (#10164).
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual(
'integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1),
'Wrong SQL used to create an auto-increment column on SQLite'
)
def test_disable_constraint_checking_failure_disallowed(self):
"""
SQLite schema editor is not usable within an outer transaction if
foreign key constraint checks are not disabled beforehand.
"""
msg = (
'SQLite schema editor cannot be used while foreign key '
'constraint checks are enabled. Make sure to disable them '
'before entering a transaction.atomic() context because '
'SQLite does not support disabling them in the middle of '
'a multi-statement transaction.'
)
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic(), connection.schema_editor(atomic=True):
pass
def test_constraint_checks_disabled_atomic_allowed(self):
"""
SQLite schema editor is usable within an outer transaction as long as
foreign key constraints checks are disabled beforehand.
"""
def constraint_checks_enabled():
with connection.cursor() as cursor:
return bool(cursor.execute('PRAGMA foreign_keys').fetchone()[0])
with connection.constraint_checks_disabled(), transaction.atomic():
with connection.schema_editor(atomic=True):
self.assertFalse(constraint_checks_enabled())
self.assertFalse(constraint_checks_enabled())
self.assertTrue(constraint_checks_enabled())
@skipIfDBFeature('supports_atomic_references_rename')
def test_field_rename_inside_atomic_block(self):
"""
NotImplementedError is raised when a model field rename is attempted
inside an atomic block.
"""
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name('renamed')
msg = (
"Renaming the 'backends_author'.'name' column while in a "
"transaction is not supported on SQLite < 3.26 because it would "
"break referential integrity. Try adding `atomic = False` to the "
"Migration class."
)
with self.assertRaisesMessage(NotSupportedError, msg):
with connection.schema_editor(atomic=True) as editor:
editor.alter_field(Author, Author._meta.get_field('name'), new_field)
@skipIfDBFeature('supports_atomic_references_rename')
def test_table_rename_inside_atomic_block(self):
"""
NotImplementedError is raised when a table rename is attempted inside
an atomic block.
"""
msg = (
"Renaming the 'backends_author' table while in a transaction is "
"not supported on SQLite < 3.26 because it would break referential "
"integrity. Try adding `atomic = False` to the Migration class."
)
with self.assertRaisesMessage(NotSupportedError, msg):
with connection.schema_editor(atomic=True) as editor:
editor.alter_db_table(Author, "backends_author", "renamed_table")
@unittest.skipUnless(connection.vendor == 'sqlite', 'Test only for SQLite')
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_no_interpolation(self):
# This shouldn't raise an exception (#17158)
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'], query)
def test_parameter_quoting(self):
# The implementation of last_executed_queries isn't optimal. It's
# worth testing that parameters are quoted (#14091).
query = "SELECT %s"
params = ["\"'\\"]
connection.cursor().execute(query, params)
# Note that the single quote is repeated
substituted = "SELECT '\"''\\'"
self.assertEqual(connection.queries[-1]['sql'], substituted)
def test_large_number_of_parameters(self):
# If SQLITE_MAX_VARIABLE_NUMBER (default = 999) has been changed to be
# greater than SQLITE_MAX_COLUMN (default = 2000), last_executed_query
# can hit the SQLITE_MAX_COLUMN limit (#26063).
with connection.cursor() as cursor:
sql = "SELECT MAX(%s)" % ", ".join(["%s"] * 2001)
params = list(range(2001))
# This should not raise an exception.
cursor.db.ops.last_executed_query(cursor.cursor, sql, params)
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite tests')
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
def test_parameter_escaping(self):
# '%s' escaping support for sqlite3 (#13648).
with connection.cursor() as cursor:
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite tests')
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite tests')
class ThreadSharing(TransactionTestCase):
available_apps = ['backends']
def test_database_sharing_in_threads(self):
def create_object():
Object.objects.create()
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(Object.objects.count(), 2)
|
7df47931b8b943f6a362916c04faf179d07312b73f66e8a2ad9952f6370f17a3 | """Compare two HTML documents."""
import re
from html.parser import HTMLParser
WHITESPACE = re.compile(r'\s+')
def normalize_whitespace(string):
return WHITESPACE.sub(' ', string)
class Element:
def __init__(self, name, attributes):
self.name = name
self.attributes = sorted(attributes)
self.children = []
def append(self, element):
if isinstance(element, str):
element = normalize_whitespace(element)
if self.children:
if isinstance(self.children[-1], str):
self.children[-1] += element
self.children[-1] = normalize_whitespace(self.children[-1])
return
elif self.children:
# removing last children if it is only whitespace
# this can result in incorrect dom representations since
# whitespace between inline tags like <span> is significant
if isinstance(self.children[-1], str):
if self.children[-1].isspace():
self.children.pop()
if element:
self.children.append(element)
def finalize(self):
def rstrip_last_element(children):
if children:
if isinstance(children[-1], str):
children[-1] = children[-1].rstrip()
if not children[-1]:
children.pop()
children = rstrip_last_element(children)
return children
rstrip_last_element(self.children)
for i, child in enumerate(self.children):
if isinstance(child, str):
self.children[i] = child.strip()
elif hasattr(child, 'finalize'):
child.finalize()
def __eq__(self, element):
if not hasattr(element, 'name') or self.name != element.name:
return False
if len(self.attributes) != len(element.attributes):
return False
if self.attributes != element.attributes:
# attributes without a value is same as attribute with value that
# equals the attributes name:
# <input checked> == <input checked="checked">
for i in range(len(self.attributes)):
attr, value = self.attributes[i]
other_attr, other_value = element.attributes[i]
if value is None:
value = attr
if other_value is None:
other_value = other_attr
if attr != other_attr or value != other_value:
return False
return self.children == element.children
def __hash__(self):
return hash((self.name, *self.attributes))
def _count(self, element, count=True):
if not isinstance(element, str):
if self == element:
return 1
if isinstance(element, RootElement):
if self.children == element.children:
return 1
i = 0
for child in self.children:
# child is text content and element is also text content, then
# make a simple "text" in "text"
if isinstance(child, str):
if isinstance(element, str):
if count:
i += child.count(element)
elif element in child:
return 1
else:
i += child._count(element, count=count)
if not count and i:
return i
return i
def __contains__(self, element):
return self._count(element, count=False) > 0
def count(self, element):
return self._count(element, count=True)
def __getitem__(self, key):
return self.children[key]
def __str__(self):
output = '<%s' % self.name
for key, value in self.attributes:
if value:
output += ' %s="%s"' % (key, value)
else:
output += ' %s' % key
if self.children:
output += '>\n'
output += ''.join(str(c) for c in self.children)
output += '\n</%s>' % self.name
else:
output += '>'
return output
def __repr__(self):
return str(self)
class RootElement(Element):
def __init__(self):
super().__init__(None, ())
def __str__(self):
return ''.join(str(c) for c in self.children)
class HTMLParseError(Exception):
pass
class Parser(HTMLParser):
SELF_CLOSING_TAGS = (
'br', 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base',
'col',
)
def __init__(self):
super().__init__(convert_charrefs=False)
self.root = RootElement()
self.open_tags = []
self.element_positions = {}
def error(self, msg):
raise HTMLParseError(msg, self.getpos())
def format_position(self, position=None, element=None):
if not position and element:
position = self.element_positions[element]
if position is None:
position = self.getpos()
if hasattr(position, 'lineno'):
position = position.lineno, position.offset
return 'Line %d, Column %d' % position
@property
def current(self):
if self.open_tags:
return self.open_tags[-1]
else:
return self.root
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
if tag not in self.SELF_CLOSING_TAGS:
self.handle_endtag(tag)
def handle_starttag(self, tag, attrs):
# Special case handling of 'class' attribute, so that comparisons of DOM
# instances are not sensitive to ordering of classes.
attrs = [
(name, " ".join(sorted(value.split(" "))))
if name == "class"
else (name, value)
for name, value in attrs
]
element = Element(tag, attrs)
self.current.append(element)
if tag not in self.SELF_CLOSING_TAGS:
self.open_tags.append(element)
self.element_positions[element] = self.getpos()
def handle_endtag(self, tag):
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
while element.name != tag:
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
def handle_data(self, data):
self.current.append(data)
def handle_charref(self, name):
self.current.append('&%s;' % name)
def handle_entityref(self, name):
self.current.append('&%s;' % name)
def parse_html(html):
"""
Take a string that contains *valid* HTML and turn it into a Python object
structure that can be easily compared against other HTML on semantic
equivalence. Syntactical differences like which quotation is used on
arguments will be ignored.
"""
parser = Parser()
parser.feed(html)
parser.close()
document = parser.root
document.finalize()
# Removing ROOT element if it's not necessary
if len(document.children) == 1:
if not isinstance(document.children[0], str):
document = document.children[0]
return document
|
8153b09049f9ab3cf4936861a53a453bbaf5e46ad3924ba7599d49a0eb7e2193 | """
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', '[email protected]'), ('Full Name', '[email protected]')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('hy', gettext_noop('Armenian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default charset to use for all HttpResponse objects, if a MIME type isn't
# manually specified. It's used to construct the Content-Type header.
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = 0o644
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', or None to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
|
0ee8066064c076298a1494ba3392f73ffa4af25b72fe758a1ea435ac19a989ab | import functools
import itertools
import logging
import os
import pathlib
import signal
import subprocess
import sys
import threading
import time
import traceback
import weakref
from collections import defaultdict
from pathlib import Path
from types import ModuleType
from zipimport import zipimporter
from django.apps import apps
from django.core.signals import request_finished
from django.dispatch import Signal
from django.utils.functional import cached_property
from django.utils.version import get_version_tuple
autoreload_started = Signal()
file_changed = Signal(providing_args=['file_path', 'kind'])
DJANGO_AUTORELOAD_ENV = 'RUN_MAIN'
logger = logging.getLogger('django.utils.autoreload')
# If an error is raised while importing a file, it's not placed in sys.modules.
# This means that any future modifications aren't caught. Keep a list of these
# file paths to allow watching them in the future.
_error_files = []
_exception = None
try:
import termios
except ImportError:
termios = None
try:
import pywatchman
except ImportError:
pywatchman = None
def check_errors(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
raise _exception[0](_exception[1]).with_traceback(_exception[2])
def ensure_echo_on():
if termios:
fd = sys.stdin
if fd.isatty():
attr_list = termios.tcgetattr(fd)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(fd, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def iter_all_python_module_files():
# This is a hot path during reloading. Create a stable sorted list of
# modules based on the module name and pass it to iter_modules_and_files().
# This ensures cached results are returned in the usual case that modules
# aren't loaded on the fly.
keys = sorted(sys.modules)
modules = tuple(m for m in map(sys.modules.__getitem__, keys) if not isinstance(m, weakref.ProxyTypes))
return iter_modules_and_files(modules, frozenset(_error_files))
@functools.lru_cache(maxsize=1)
def iter_modules_and_files(modules, extra_files):
"""Iterate through all modules needed to be watched."""
sys_file_paths = []
for module in modules:
# During debugging (with PyDev) the 'typing.io' and 'typing.re' objects
# are added to sys.modules, however they are types not modules and so
# cause issues here.
if not isinstance(module, ModuleType) or module.__spec__ is None:
continue
spec = module.__spec__
# Modules could be loaded from places without a concrete location. If
# this is the case, skip them.
if spec.has_location:
origin = spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin
sys_file_paths.append(origin)
results = set()
for filename in itertools.chain(sys_file_paths, extra_files):
if not filename:
continue
path = pathlib.Path(filename)
if not path.exists():
# The module could have been removed, don't fail loudly if this
# is the case.
continue
results.add(path.resolve().absolute())
return frozenset(results)
@functools.lru_cache(maxsize=1)
def common_roots(paths):
"""
Return a tuple of common roots that are shared between the given paths.
File system watchers operate on directories and aren't cheap to create.
Try to find the minimum set of directories to watch that encompass all of
the files that need to be watched.
"""
# Inspired from Werkzeug:
# https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py
# Create a sorted list of the path components, longest first.
path_parts = sorted([x.parts for x in paths], key=len, reverse=True)
tree = {}
for chunks in path_parts:
node = tree
# Add each part of the path to the tree.
for chunk in chunks:
node = node.setdefault(chunk, {})
# Clear the last leaf in the tree.
node.clear()
# Turn the tree into a list of Path instances.
def _walk(node, path):
for prefix, child in node.items():
yield from _walk(child, path + (prefix,))
if not node:
yield Path(*path)
return tuple(_walk(tree, ()))
def sys_path_directories():
"""
Yield absolute directories from sys.path, ignoring entries that don't
exist.
"""
for path in sys.path:
path = Path(path)
if not path.exists():
continue
path = path.resolve().absolute()
# If the path is a file (like a zip file), watch the parent directory.
if path.is_file():
yield path.parent
else:
yield path
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import django.__main__
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if sys.argv[0] == django.__main__.__file__:
# The server was started with `python -m django runserver`.
args += ['-m', 'django']
args += sys.argv[1:]
else:
args += sys.argv
return args
def trigger_reload(filename):
logger.info('%s changed, reloading.', filename)
sys.exit(3)
def restart_with_reloader():
new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: 'true'}
args = get_child_arguments()
while True:
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
if exit_code != 3:
return exit_code
class BaseReloader:
def __init__(self):
self.extra_files = set()
self.directory_globs = defaultdict(set)
self._stop_condition = threading.Event()
def watch_dir(self, path, glob):
path = Path(path)
if not path.is_absolute():
raise ValueError('%s must be absolute.' % path)
logger.debug('Watching dir %s with glob %s.', path, glob)
self.directory_globs[path].add(glob)
def watch_file(self, path):
path = Path(path)
if not path.is_absolute():
raise ValueError('%s must be absolute.' % path)
logger.debug('Watching file %s.', path)
self.extra_files.add(path)
def watched_files(self, include_globs=True):
"""
Yield all files that need to be watched, including module files and
files within globs.
"""
yield from iter_all_python_module_files()
yield from self.extra_files
if include_globs:
for directory, patterns in self.directory_globs.items():
for pattern in patterns:
yield from directory.glob(pattern)
def wait_for_apps_ready(self, app_reg, django_main_thread):
"""
Wait until Django reports that the apps have been loaded. If the given
thread has terminated before the apps are ready, then a SyntaxError or
other non-recoverable error has been raised. In that case, stop waiting
for the apps_ready event and continue processing.
Return True if the thread is alive and the ready event has been
triggered, or False if the thread is terminated while waiting for the
event.
"""
while django_main_thread.is_alive():
if app_reg.ready_event.wait(timeout=0.1):
return True
else:
logger.debug('Main Django thread has terminated before apps are ready.')
return False
def run(self, django_main_thread):
logger.debug('Waiting for apps ready_event.')
self.wait_for_apps_ready(apps, django_main_thread)
from django.urls import get_resolver
# Prevent a race condition where URL modules aren't loaded when the
# reloader starts by accessing the urlconf_module property.
get_resolver().urlconf_module
logger.debug('Apps ready_event triggered. Sending autoreload_started signal.')
autoreload_started.send(sender=self)
self.run_loop()
def run_loop(self):
ticker = self.tick()
while not self.should_stop:
try:
next(ticker)
except StopIteration:
break
self.stop()
def tick(self):
"""
This generator is called in a loop from run_loop. It's important that
the method takes care of pausing or otherwise waiting for a period of
time. This split between run_loop() and tick() is to improve the
testability of the reloader implementations by decoupling the work they
do from the loop.
"""
raise NotImplementedError('subclasses must implement tick().')
@classmethod
def check_availability(cls):
raise NotImplementedError('subclasses must implement check_availability().')
def notify_file_changed(self, path):
results = file_changed.send(sender=self, file_path=path)
logger.debug('%s notified as changed. Signal results: %s.', path, results)
if not any(res[1] for res in results):
trigger_reload(path)
# These are primarily used for testing.
@property
def should_stop(self):
return self._stop_condition.is_set()
def stop(self):
self._stop_condition.set()
class StatReloader(BaseReloader):
SLEEP_TIME = 1 # Check for changes once per second.
def tick(self):
state, previous_timestamp = {}, time.time()
while True:
state.update(self.loop_files(state, previous_timestamp))
previous_timestamp = time.time()
time.sleep(self.SLEEP_TIME)
yield
def loop_files(self, previous_times, previous_timestamp):
updated_times = {}
for path, mtime in self.snapshot_files():
previous_time = previous_times.get(path)
# If there are overlapping globs, a file may be iterated twice.
if path in updated_times:
continue
# A new file has been detected. This could happen due to it being
# imported at runtime and only being polled now, or because the
# file was just created. Compare the file's mtime to the
# previous_timestamp and send a notification if it was created
# since the last poll.
is_newly_created = previous_time is None and mtime > previous_timestamp
is_changed = previous_time is not None and previous_time != mtime
if is_newly_created or is_changed:
logger.debug('File %s. is_changed: %s, is_new: %s', path, is_changed, is_newly_created)
logger.debug('File %s previous mtime: %s, current mtime: %s', path, previous_time, mtime)
self.notify_file_changed(path)
updated_times[path] = mtime
return updated_times
def snapshot_files(self):
for file in self.watched_files():
try:
mtime = file.stat().st_mtime
except OSError:
# This is thrown when the file does not exist.
continue
yield file, mtime
@classmethod
def check_availability(cls):
return True
class WatchmanUnavailable(RuntimeError):
pass
class WatchmanReloader(BaseReloader):
def __init__(self):
self.roots = defaultdict(set)
self.processed_request = threading.Event()
super().__init__()
@cached_property
def client(self):
return pywatchman.client()
def _watch_root(self, root):
# In practice this shouldn't occur, however, it's possible that a
# directory that doesn't exist yet is being watched. If it's outside of
# sys.path then this will end up a new root. How to handle this isn't
# clear: Not adding the root will likely break when subscribing to the
# changes, however, as this is currently an internal API, no files
# will be being watched outside of sys.path. Fixing this by checking
# inside watch_glob() and watch_dir() is expensive, instead this could
# could fall back to the StatReloader if this case is detected? For
# now, watching its parent, if possible, is sufficient.
if not root.exists():
if not root.parent.exists():
logger.warning('Unable to watch root dir %s as neither it or its parent exist.', root)
return
root = root.parent
result = self.client.query('watch-project', str(root.absolute()))
if 'warning' in result:
logger.warning('Watchman warning: %s', result['warning'])
logger.debug('Watchman watch-project result: %s', result)
return result['watch'], result.get('relative_path')
@functools.lru_cache()
def _get_clock(self, root):
return self.client.query('clock', root)['clock']
def _subscribe(self, directory, name, expression):
root, rel_path = self._watch_root(directory)
query = {
'expression': expression,
'fields': ['name'],
'since': self._get_clock(root),
'dedup_results': True,
}
if rel_path:
query['relative_root'] = rel_path
logger.debug('Issuing watchman subscription %s, for root %s. Query: %s', name, root, query)
self.client.query('subscribe', root, name, query)
def _subscribe_dir(self, directory, filenames):
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'files-parent-%s' % directory.name
filenames = ['%s/%s' % (directory.name, filename) for filename in filenames]
directory = directory.parent
expression = ['name', filenames, 'wholename']
else:
prefix = 'files'
expression = ['name', filenames]
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def _watch_glob(self, directory, patterns):
"""
Watch a directory with a specific glob. If the directory doesn't yet
exist, attempt to watch the parent directory and amend the patterns to
include this. It's important this method isn't called more than one per
directory when updating all subscriptions. Subsequent calls will
overwrite the named subscription, so it must include all possible glob
expressions.
"""
prefix = 'glob'
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'glob-parent-%s' % directory.name
patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns]
directory = directory.parent
expression = ['anyof']
for pattern in patterns:
expression.append(['match', pattern, 'wholename'])
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def watched_roots(self, watched_files):
extra_directories = self.directory_globs.keys()
watched_file_dirs = [f.parent for f in watched_files]
sys_paths = list(sys_path_directories())
return frozenset((*extra_directories, *watched_file_dirs, *sys_paths))
def _update_watches(self):
watched_files = list(self.watched_files(include_globs=False))
found_roots = common_roots(self.watched_roots(watched_files))
logger.debug('Watching %s files', len(watched_files))
logger.debug('Found common roots: %s', found_roots)
# Setup initial roots for performance, shortest roots first.
for root in sorted(found_roots):
self._watch_root(root)
for directory, patterns in self.directory_globs.items():
self._watch_glob(directory, patterns)
# Group sorted watched_files by their parent directory.
sorted_files = sorted(watched_files, key=lambda p: p.parent)
for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent):
# These paths need to be relative to the parent directory.
self._subscribe_dir(directory, [str(p.relative_to(directory)) for p in group])
def update_watches(self):
try:
self._update_watches()
except Exception as ex:
# If the service is still available, raise the original exception.
if self.check_server_status(ex):
raise
def _check_subscription(self, sub):
subscription = self.client.getSubscription(sub)
if not subscription:
return
logger.debug('Watchman subscription %s has results.', sub)
for result in subscription:
# When using watch-project, it's not simple to get the relative
# directory without storing some specific state. Store the full
# path to the directory in the subscription name, prefixed by its
# type (glob, files).
root_directory = Path(result['subscription'].split(':', 1)[1])
logger.debug('Found root directory %s', root_directory)
for file in result.get('files', []):
self.notify_file_changed(root_directory / file)
def request_processed(self, **kwargs):
logger.debug('Request processed. Setting update_watches event.')
self.processed_request.set()
def tick(self):
request_finished.connect(self.request_processed)
self.update_watches()
while True:
if self.processed_request.is_set():
self.update_watches()
self.processed_request.clear()
try:
self.client.receive()
except pywatchman.WatchmanError as ex:
self.check_server_status(ex)
else:
for sub in list(self.client.subs.keys()):
self._check_subscription(sub)
yield
def stop(self):
self.client.close()
super().stop()
def check_server_status(self, inner_ex=None):
"""Return True if the server is available."""
try:
self.client.query('version')
except Exception:
raise WatchmanUnavailable(str(inner_ex)) from inner_ex
return True
@classmethod
def check_availability(cls):
if not pywatchman:
raise WatchmanUnavailable('pywatchman not installed.')
client = pywatchman.client(timeout=0.01)
try:
result = client.capabilityCheck()
except Exception:
# The service is down?
raise WatchmanUnavailable('Cannot connect to the watchman service.')
version = get_version_tuple(result['version'])
# Watchman 4.9 includes multiple improvements to watching project
# directories as well as case insensitive filesystems.
logger.debug('Watchman version %s', version)
if version < (4, 9):
raise WatchmanUnavailable('Watchman 4.9 or later is required.')
def get_reloader():
"""Return the most suitable reloader for this environment."""
try:
WatchmanReloader.check_availability()
except WatchmanUnavailable:
return StatReloader()
return WatchmanReloader()
def start_django(reloader, main_func, *args, **kwargs):
ensure_echo_on()
main_func = check_errors(main_func)
django_main_thread = threading.Thread(target=main_func, args=args, kwargs=kwargs)
django_main_thread.setDaemon(True)
django_main_thread.start()
while not reloader.should_stop:
try:
reloader.run(django_main_thread)
except WatchmanUnavailable as ex:
# It's possible that the watchman service shuts down or otherwise
# becomes unavailable. In that case, use the StatReloader.
reloader = StatReloader()
logger.error('Error connecting to Watchman: %s', ex)
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
def run_with_reloader(main_func, *args, **kwargs):
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get(DJANGO_AUTORELOAD_ENV) == 'true':
reloader = get_reloader()
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
start_django(reloader, main_func, *args, **kwargs)
else:
try:
WatchmanReloader.check_availability()
except WatchmanUnavailable as e:
logger.info('Watchman unavailable: %s', e)
exit_code = restart_with_reloader()
sys.exit(exit_code)
except KeyboardInterrupt:
pass
|
c1a69c36faa5ccb972ff4cb9d2d2e1560f16006a285fa54a8719125a013daf20 | from decimal import Decimal
from django.conf import settings
from django.utils.safestring import mark_safe
def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False, use_l10n=None):
"""
Get a number (as a number or string), and return it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator.
For non-uniform digit grouping, it can be a sequence with the number
of digit group sizes following the format used by the Python locale
module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)).
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = (use_l10n or (use_l10n is None and settings.USE_L10N)) and settings.USE_THOUSAND_SEPARATOR
use_grouping = use_grouping or force_grouping
use_grouping = use_grouping and grouping != 0
# Make the common case fast
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(number)
# sign
sign = ''
if isinstance(number, Decimal):
# Format values with more than 200 digits (an arbitrary cutoff) using
# scientific notation to avoid high memory usage in {:f}'.format().
_, digits, exponent = number.as_tuple()
if abs(exponent) + len(digits) > 200:
number = '{:e}'.format(number)
coefficient, exponent = number.split('e')
# Format the coefficient.
coefficient = format(
coefficient, decimal_sep, decimal_pos, grouping,
thousand_sep, force_grouping, use_l10n,
)
return '{}e{}'.format(coefficient, exponent)
else:
str_number = '{:f}'.format(number)
else:
str_number = str(number)
if str_number[0] == '-':
sign = '-'
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos is not None:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos is not None:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
dec_part = dec_part and decimal_sep + dec_part
# grouping
if use_grouping:
try:
# if grouping is a sequence
intervals = list(grouping)
except TypeError:
# grouping is a single value
intervals = [grouping, 0]
active_interval = intervals.pop(0)
int_part_gd = ''
cnt = 0
for digit in int_part[::-1]:
if cnt and cnt == active_interval:
if intervals:
active_interval = intervals.pop(0) or active_interval
int_part_gd += thousand_sep[::-1]
cnt = 0
int_part_gd += digit
cnt += 1
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
|
291f99edff7153d954b60dae8fd13ba44fd980574064437fe2ab2e47720ea196 | """
HTML Widget classes
"""
import copy
import datetime
import re
import warnings
from itertools import chain
from django.conf import settings
from django.forms.utils import to_current_timezone
from django.templatetags.static import static
from django.utils import datetime_safe, formats
from django.utils.dates import MONTHS
from django.utils.formats import get_format
from django.utils.html import format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from .renderers import get_default_renderer
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
class MediaOrderConflictWarning(RuntimeWarning):
pass
@html_safe
class Media:
def __init__(self, media=None, css=None, js=None):
if media is not None:
css = getattr(media, 'css', {})
js = getattr(media, 'js', [])
else:
if css is None:
css = {}
if js is None:
js = []
self._css_lists = [css]
self._js_lists = [js]
def __repr__(self):
return 'Media(css=%r, js=%r)' % (self._css, self._js)
def __str__(self):
return self.render()
@property
def _css(self):
css = self._css_lists[0]
# filter(None, ...) avoids calling merge with empty dicts.
for obj in filter(None, self._css_lists[1:]):
css = {
medium: self.merge(css.get(medium, []), obj.get(medium, []))
for medium in css.keys() | obj.keys()
}
return css
@property
def _js(self):
js = self._js_lists[0]
# filter(None, ...) avoids calling merge() with empty lists.
for obj in filter(None, self._js_lists[1:]):
js = self.merge(js, obj)
return js
def render(self):
return mark_safe('\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES)))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css)
return chain.from_iterable([
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet">',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media)
def absolute_path(self, path):
"""
Given a relative or absolute path to a static asset, return an absolute
path. An absolute path will be returned unchanged while a relative path
will be passed to django.templatetags.static.static().
"""
if path.startswith(('http://', 'https://', '/')):
return path
return static(path)
def __getitem__(self, name):
"""Return a Media object that only contains media of the given type."""
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
@staticmethod
def merge(list_1, list_2):
"""
Merge two lists while trying to keep the relative order of the elements.
Warn if the lists have the same two elements in a different relative
order.
For static assets it can be important to have them included in the DOM
in a certain order. In JavaScript you may not be able to reference a
global or in CSS you might want to override a style.
"""
# Start with a copy of list_1.
combined_list = list(list_1)
last_insert_index = len(list_1)
# Walk list_2 in reverse, inserting each element into combined_list if
# it doesn't already exist.
for path in reversed(list_2):
try:
# Does path already exist in the list?
index = combined_list.index(path)
except ValueError:
# Add path to combined_list since it doesn't exist.
combined_list.insert(last_insert_index, path)
else:
if index > last_insert_index:
warnings.warn(
'Detected duplicate Media files in an opposite order:\n'
'%s\n%s' % (combined_list[last_insert_index], combined_list[index]),
MediaOrderConflictWarning,
)
# path already exists in the list. Update last_insert_index so
# that the following elements are inserted in front of this one.
last_insert_index = index
return combined_list
def __add__(self, other):
combined = Media()
combined._css_lists = self._css_lists + other._css_lists
combined._js_lists = self._js_lists + other._js_lists
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
return Media(definition)
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = super(MediaDefiningClass, mcs).__new__(mcs, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class Widget(metaclass=MediaDefiningClass):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
self.attrs = {} if attrs is None else attrs.copy()
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None):
context = self.get_context(name, value, attrs)
yield context['widget']
def format_value(self, value):
"""
Return a value as it should appear when rendered in a template.
"""
if value == '' or value is None:
return None
if self.is_localized:
return formats.localize_input(value)
return str(value)
def get_context(self, name, value, attrs):
context = {}
context['widget'] = {
'name': name,
'is_hidden': self.is_hidden,
'required': self.is_required,
'value': self.format_value(value),
'attrs': self.build_attrs(self.attrs, attrs),
'template_name': self.template_name,
}
return context
def render(self, name, value, attrs=None, renderer=None):
"""Render the widget as an HTML string."""
context = self.get_context(name, value, attrs)
return self._render(self.template_name, context, renderer)
def _render(self, template_name, context, renderer=None):
if renderer is None:
renderer = get_default_renderer()
return mark_safe(renderer.render(template_name, context))
def build_attrs(self, base_attrs, extra_attrs=None):
"""Build an attribute dictionary."""
return {**base_attrs, **(extra_attrs or {})}
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, return the value
of this widget or None if it's not provided.
"""
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in data
def id_for_label(self, id_):
"""
Return the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Return None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
def use_required_attribute(self, initial):
return not self.is_hidden
class Input(Widget):
"""
Base class for all <input> widgets.
"""
input_type = None # Subclasses must define this.
template_name = 'django/forms/widgets/input.html'
def __init__(self, attrs=None):
if attrs is not None:
attrs = attrs.copy()
self.input_type = attrs.pop('type', self.input_type)
super().__init__(attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['type'] = self.input_type
return context
class TextInput(Input):
input_type = 'text'
template_name = 'django/forms/widgets/text.html'
class NumberInput(Input):
input_type = 'number'
template_name = 'django/forms/widgets/number.html'
class EmailInput(Input):
input_type = 'email'
template_name = 'django/forms/widgets/email.html'
class URLInput(Input):
input_type = 'url'
template_name = 'django/forms/widgets/url.html'
class PasswordInput(Input):
input_type = 'password'
template_name = 'django/forms/widgets/password.html'
def __init__(self, attrs=None, render_value=False):
super().__init__(attrs)
self.render_value = render_value
def get_context(self, name, value, attrs):
if not self.render_value:
value = None
return super().get_context(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
template_name = 'django/forms/widgets/hidden.html'
class MultipleHiddenInput(HiddenInput):
"""
Handle <input type="hidden"> for fields that have a list
of values.
"""
template_name = 'django/forms/widgets/multiple_hidden.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
final_attrs = context['widget']['attrs']
id_ = context['widget']['attrs'].get('id')
subwidgets = []
for index, value_ in enumerate(context['widget']['value']):
widget_attrs = final_attrs.copy()
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
widget_attrs['id'] = '%s_%s' % (id_, index)
widget = HiddenInput()
widget.is_required = self.is_required
subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def format_value(self, value):
return [] if value is None else value
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
template_name = 'django/forms/widgets/file.html'
def format_value(self, value):
"""File input never renders a value."""
return
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in files
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
clear_checkbox_label = _('Clear')
initial_text = _('Currently')
input_text = _('Change')
template_name = 'django/forms/widgets/clearable_file_input.html'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
return bool(value and getattr(value, 'url', False))
def format_value(self, value):
"""
Return the file object if it has a defined url attribute.
"""
if self.is_initial(value):
return value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
context['widget'].update({
'checkbox_name': checkbox_name,
'checkbox_id': checkbox_id,
'is_initial': self.is_initial(value),
'input_text': self.input_text,
'initial_text': self.initial_text,
'clear_checkbox_label': self.clear_checkbox_label,
})
return context
def value_from_datadict(self, data, files, name):
upload = super().value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
def use_required_attribute(self, initial):
return super().use_required_attribute(initial) and not initial
def value_omitted_from_data(self, data, files, name):
return (
super().value_omitted_from_data(data, files, name) and
self.clear_checkbox_name(name) not in data
)
class Textarea(Widget):
template_name = 'django/forms/widgets/textarea.html'
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super().__init__(attrs)
self.format = format or None
def format_value(self, value):
return formats.localize_input(value, self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
template_name = 'django/forms/widgets/date.html'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/datetime.html'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/time.html'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Input):
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox.html'
def __init__(self, attrs=None, check_test=None):
super().__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def format_value(self, value):
"""Only return the 'value' attribute if value isn't empty."""
if value is True or value is False or value is None or value == '':
return
return str(value)
def get_context(self, name, value, attrs):
if self.check_test(value):
if attrs is None:
attrs = {}
attrs['checked'] = True
return super().get_context(name, value, attrs)
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, str):
value = values.get(value.lower(), value)
return bool(value)
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
class ChoiceWidget(Widget):
allow_multiple_selected = False
input_type = None
template_name = None
option_template_name = None
add_id_index = True
checked_attribute = {'checked': True}
option_inherits_attrs = True
def __init__(self, attrs=None, choices=()):
super().__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self.choices)
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None):
"""
Yield all "subwidgets" of this widget. Used to enable iterating
options from a BoundField for choice widgets.
"""
value = self.format_value(value)
yield from self.options(name, value, attrs)
def options(self, name, value, attrs=None):
"""Yield a flat list of options for this widgets."""
for group in self.optgroups(name, value, attrs):
yield from group[1]
def optgroups(self, name, value, attrs=None):
"""Return a list of optgroups for this widget."""
groups = []
has_selected = False
for index, (option_value, option_label) in enumerate(self.choices):
if option_value is None:
option_value = ''
subgroup = []
if isinstance(option_label, (list, tuple)):
group_name = option_value
subindex = 0
choices = option_label
else:
group_name = None
subindex = None
choices = [(option_value, option_label)]
groups.append((group_name, subgroup, index))
for subvalue, sublabel in choices:
selected = (
str(subvalue) in value and
(not has_selected or self.allow_multiple_selected)
)
has_selected |= selected
subgroup.append(self.create_option(
name, subvalue, sublabel, selected, index,
subindex=subindex, attrs=attrs,
))
if subindex is not None:
subindex += 1
return groups
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
index = str(index) if subindex is None else "%s_%s" % (index, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
option_attrs['id'] = self.id_for_label(option_attrs['id'], index)
return {
'name': name,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
'wrap_label': True,
}
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs)
return context
def id_for_label(self, id_, index='0'):
"""
Use an incremented id for each option where the main widget
references the zero index.
"""
if id_ and self.add_id_index:
id_ = '%s_%s' % (id_, index)
return id_
def value_from_datadict(self, data, files, name):
getter = data.get
if self.allow_multiple_selected:
try:
getter = data.getlist
except AttributeError:
pass
return getter(name)
def format_value(self, value):
"""Return selected values as a list."""
if value is None and self.allow_multiple_selected:
return []
if not isinstance(value, (tuple, list)):
value = [value]
return [str(v) if v is not None else '' for v in value]
class Select(ChoiceWidget):
input_type = 'select'
template_name = 'django/forms/widgets/select.html'
option_template_name = 'django/forms/widgets/select_option.html'
add_id_index = False
checked_attribute = {'selected': True}
option_inherits_attrs = False
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.allow_multiple_selected:
context['widget']['attrs']['multiple'] = True
return context
@staticmethod
def _choice_has_empty_value(choice):
"""Return True if the choice's value is empty string or None."""
value, _ = choice
return value is None or value == ''
def use_required_attribute(self, initial):
"""
Don't render 'required' if the first <option> has a value, as that's
invalid HTML.
"""
use_required_attribute = super().use_required_attribute(initial)
# 'required' is always okay for <select multiple>.
if self.allow_multiple_selected:
return use_required_attribute
first_choice = next(iter(self.choices), None)
return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (
('unknown', _('Unknown')),
('true', _('Yes')),
('false', _('No')),
)
super().__init__(attrs, choices)
def format_value(self, value):
try:
return {
True: 'true', False: 'false',
'true': 'true', 'false': 'false',
# For backwards compatibility with Django < 2.2.
'2': 'true', '3': 'false',
}[value]
except KeyError:
return 'unknown'
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {
True: True,
'True': True,
'False': False,
False: False,
'true': True,
'false': False,
# For backwards compatibility with Django < 2.2.
'2': True,
'3': False,
}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def value_omitted_from_data(self, data, files, name):
# An unselected <select multiple> doesn't appear in POST data, so it's
# never known if the value is actually omitted.
return False
class RadioSelect(ChoiceWidget):
input_type = 'radio'
template_name = 'django/forms/widgets/radio.html'
option_template_name = 'django/forms/widgets/radio_option.html'
class CheckboxSelectMultiple(ChoiceWidget):
allow_multiple_selected = True
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox_select.html'
option_template_name = 'django/forms/widgets/checkbox_option.html'
def use_required_attribute(self, initial):
# Don't use the 'required' attribute because browser validation would
# require all checkboxes to be checked instead of at least one.
return False
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
def id_for_label(self, id_, index=None):
""""
Don't include for="field_0" in <label> because clicking such a label
would toggle the first checkbox.
"""
if index is None:
return ''
return super().id_for_label(id_, index)
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
In addition to the values added by Widget.get_context(), this widget
adds a list of subwidgets to the context as widget['subwidgets'].
These can be looped over and rendered like normal widgets.
You'll probably want to use this class with MultiValueField.
"""
template_name = 'django/forms/widgets/multiwidget.html'
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super().__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = context['widget']['attrs']
input_type = final_attrs.pop('type', None)
id_ = final_attrs.get('id')
subwidgets = []
for i, widget in enumerate(self.widgets):
if input_type is not None:
widget.input_type = input_type
widget_name = '%s_%s' % (name, i)
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
widget_attrs = final_attrs.copy()
widget_attrs['id'] = '%s_%s' % (id_, i)
else:
widget_attrs = final_attrs
subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def id_for_label(self, id_):
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def value_omitted_from_data(self, data, files, name):
return all(
widget.value_omitted_from_data(data, files, name + '_%s' % i)
for i, widget in enumerate(self.widgets)
)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"""
Media for a multiwidget is the combination of all media of the
subwidgets.
"""
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super().__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
template_name = 'django/forms/widgets/splitdatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
widgets = (
DateInput(
attrs=attrs if date_attrs is None else date_attrs,
format=date_format,
),
TimeInput(
attrs=attrs if time_attrs is None else time_attrs,
format=time_format,
),
)
super().__init__(widgets)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time()]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A widget that splits datetime input into two <input type="hidden"> inputs.
"""
template_name = 'django/forms/widgets/splithiddendatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
super().__init__(attrs, date_format, time_format, date_attrs, time_attrs)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = ('', '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
template_name = 'django/forms/widgets/select_date.html'
input_type = 'select'
select_widget = Select
date_re = re.compile(r'(\d{4}|0)-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = ('', empty_label[0])
self.month_none_value = ('', empty_label[1])
self.day_none_value = ('', empty_label[2])
else:
if empty_label is not None:
self.none_value = ('', empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
date_context = {}
year_choices = [(i, str(i)) for i in self.years]
if not self.is_required:
year_choices.insert(0, self.year_none_value)
year_name = self.year_field % name
date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context(
name=year_name,
value=context['widget']['value']['year'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % year_name},
)
month_choices = list(self.months.items())
if not self.is_required:
month_choices.insert(0, self.month_none_value)
month_name = self.month_field % name
date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context(
name=month_name,
value=context['widget']['value']['month'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % month_name},
)
day_choices = [(i, i) for i in range(1, 32)]
if not self.is_required:
day_choices.insert(0, self.day_none_value)
day_name = self.day_field % name
date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context(
name=day_name,
value=context['widget']['value']['day'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % day_name},
)
subwidgets = []
for field in self._parse_date_fmt():
subwidgets.append(date_context[field]['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def format_value(self, value):
"""
Return a dict containing the year, month, and day of the current value.
Use dict instead of a datetime to allow invalid dates such as February
31 to display correctly.
"""
year, month, day = None, None, None
if isinstance(value, (datetime.date, datetime.datetime)):
year, month, day = value.year, value.month, value.day
elif isinstance(value, str):
match = self.date_re.match(value)
if match:
# Convert any zeros in the date to empty strings to match the
# empty option value.
year, month, day = [int(val) or '' for val in match.groups()]
elif settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
d = datetime.datetime.strptime(value, input_format)
except ValueError:
pass
else:
year, month, day = d.year, d.month, d.day
return {'year': year, 'month': month, 'day': day}
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == '':
return None
if y is not None and m is not None and d is not None:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
pass
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
# Return pseudo-ISO dates with zeros for any unselected values,
# e.g. '2017-0-23'.
return '%s-%s-%s' % (y or 0, m or 0, d or 0)
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return not any(
('{}_{}'.format(name, interval) in data)
for interval in ('year', 'month', 'day')
)
|
556622efb02ade6fec2d9053fbbc034e6655a6084307966ad3ca4872804138a7 | """
Form classes
"""
import copy
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
# BoundField is imported for backwards compatibility in Django 1.9
from django.forms.boundfield import BoundField # NOQA
from django.forms.fields import Field, FileField
# pretty_name is imported for backwards compatibility in Django 1.9
from django.forms.utils import ErrorDict, ErrorList, pretty_name # NOQA
from django.forms.widgets import Media, MediaDefiningClass
from django.utils.functional import cached_property
from django.utils.html import conditional_escape, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from .renderers import get_default_renderer
__all__ = ('BaseForm', 'Form')
class DeclarativeFieldsMetaclass(MediaDefiningClass):
"""Collect Fields declared on the base classes."""
def __new__(mcs, name, bases, attrs):
# Collect fields from current class.
current_fields = []
for key, value in list(attrs.items()):
if isinstance(value, Field):
current_fields.append((key, value))
attrs.pop(key)
attrs['declared_fields'] = dict(current_fields)
new_class = super(DeclarativeFieldsMetaclass, mcs).__new__(mcs, name, bases, attrs)
# Walk through the MRO.
declared_fields = {}
for base in reversed(new_class.__mro__):
# Collect fields from base class.
if hasattr(base, 'declared_fields'):
declared_fields.update(base.declared_fields)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in declared_fields:
declared_fields.pop(attr)
new_class.base_fields = declared_fields
new_class.declared_fields = declared_fields
return new_class
@html_safe
class BaseForm:
"""
The main implementation of all the Form logic. Note that this class is
different than Form. See the comments by the Form class for more info. Any
improvements to the form API should be made to this class, not to the Form
class.
"""
default_renderer = None
field_order = None
prefix = None
use_required_attribute = True
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, field_order=None, use_required_attribute=None, renderer=None):
self.is_bound = data is not None or files is not None
self.data = {} if data is None else data
self.files = {} if files is None else files
self.auto_id = auto_id
if prefix is not None:
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
# Translators: This is the default suffix added to form field labels
self.label_suffix = label_suffix if label_suffix is not None else _(':')
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = copy.deepcopy(self.base_fields)
self._bound_fields_cache = {}
self.order_fields(self.field_order if field_order is None else field_order)
if use_required_attribute is not None:
self.use_required_attribute = use_required_attribute
if self.empty_permitted and self.use_required_attribute:
raise ValueError(
'The empty_permitted and use_required_attribute arguments may '
'not both be True.'
)
# Initialize form renderer. Use a global default if not specified
# either as an argument or as self.default_renderer.
if renderer is None:
if self.default_renderer is None:
renderer = get_default_renderer()
else:
renderer = self.default_renderer
if isinstance(self.default_renderer, type):
renderer = renderer()
self.renderer = renderer
def order_fields(self, field_order):
"""
Rearrange the fields according to field_order.
field_order is a list of field names specifying the order. Append fields
not included in the list in the default order for backward compatibility
with subclasses not overriding field_order. If field_order is None,
keep all fields in the order defined in the class. Ignore unknown
fields in field_order to allow disabling fields in form subclasses
without redefining ordering.
"""
if field_order is None:
return
fields = {}
for key in field_order:
try:
fields[key] = self.fields.pop(key)
except KeyError: # ignore unknown fields
pass
fields.update(self.fields) # add remaining fields in original order
self.fields = fields
def __str__(self):
return self.as_table()
def __repr__(self):
if self._errors is None:
is_valid = "Unknown"
else:
is_valid = self.is_bound and not self._errors
return '<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>' % {
'cls': self.__class__.__name__,
'bound': self.is_bound,
'valid': is_valid,
'fields': ';'.join(self.fields),
}
def __iter__(self):
for name in self.fields:
yield self[name]
def __getitem__(self, name):
"""Return a BoundField with the given name."""
try:
field = self.fields[name]
except KeyError:
raise KeyError(
"Key '%s' not found in '%s'. Choices are: %s." % (
name,
self.__class__.__name__,
', '.join(sorted(self.fields)),
)
)
if name not in self._bound_fields_cache:
self._bound_fields_cache[name] = field.get_bound_field(self, name)
return self._bound_fields_cache[name]
@property
def errors(self):
"""Return an ErrorDict for the data provided for the form."""
if self._errors is None:
self.full_clean()
return self._errors
def is_valid(self):
"""Return True if the form has no errors, or False otherwise."""
return self.is_bound and not self.errors
def add_prefix(self, field_name):
"""
Return the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name
def add_initial_prefix(self, field_name):
"""Add a 'initial' prefix for checking dynamic initial values."""
return 'initial-%s' % self.add_prefix(field_name)
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Output HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = self[name]
bf_errors = self.error_class(bf.errors)
if bf.is_hidden:
if bf_errors:
top_errors.extend(
[_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': str(e)}
for e in bf_errors])
hidden_fields.append(str(bf))
else:
# Create a 'class="..."' attribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % str(bf_errors))
if bf.label:
label = conditional_escape(bf.label)
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % field.help_text
else:
help_text = ''
output.append(normal_row % {
'errors': bf_errors,
'label': label,
'field': bf,
'help_text': help_text,
'html_class_attr': html_class_attr,
'css_classes': css_classes,
'field_name': bf.html_name,
})
if top_errors:
output.insert(0, error_row % top_errors)
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = ''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {
'errors': '',
'label': '',
'field': '',
'help_text': '',
'html_class_attr': html_class_attr,
'css_classes': '',
'field_name': '',
})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe('\n'.join(output))
def as_table(self):
"Return this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row='<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row='<tr><td colspan="2">%s</td></tr>',
row_ender='</td></tr>',
help_text_html='<br><span class="helptext">%s</span>',
errors_on_separate_row=False,
)
def as_ul(self):
"Return this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(
normal_row='<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row='<li>%s</li>',
row_ender='</li>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=False,
)
def as_p(self):
"Return this form rendered as HTML <p>s."
return self._html_output(
normal_row='<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row='%s',
row_ender='</p>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=True,
)
def non_field_errors(self):
"""
Return an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Return an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, self.error_class(error_class='nonfield'))
def add_error(self, field, error):
"""
Update the content of `self._errors`.
The `field` argument is the name of the field to which the errors
should be added. If it's None, treat the errors as NON_FIELD_ERRORS.
The `error` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. An "error" can be
either a simple string or an instance of ValidationError with its
message attribute set and a "list or dictionary" can be an actual
`list` or `dict` or an instance of ValidationError with its
`error_list` or `error_dict` attribute set.
If `error` is a dictionary, the `field` argument *must* be None and
errors will be added to the fields that correspond to the keys of the
dictionary.
"""
if not isinstance(error, ValidationError):
# Normalize to ValidationError and let its constructor
# do the hard work of making sense of the input.
error = ValidationError(error)
if hasattr(error, 'error_dict'):
if field is not None:
raise TypeError(
"The argument `field` must be `None` when the `error` "
"argument contains errors for multiple fields."
)
else:
error = error.error_dict
else:
error = {field or NON_FIELD_ERRORS: error.error_list}
for field, error_list in error.items():
if field not in self.errors:
if field != NON_FIELD_ERRORS and field not in self.fields:
raise ValueError(
"'%s' has no field named '%s'." % (self.__class__.__name__, field))
if field == NON_FIELD_ERRORS:
self._errors[field] = self.error_class(error_class='nonfield')
else:
self._errors[field] = self.error_class()
self._errors[field].extend(error_list)
if field in self.cleaned_data:
del self.cleaned_data[field]
def has_error(self, field, code=None):
return field in self.errors and (
code is None or
any(error.code == code for error in self.errors.as_data()[field])
)
def full_clean(self):
"""
Clean all of self.data and populate self._errors and self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
if field.disabled:
value = self.get_initial_for_field(field, name)
else:
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.get_initial_for_field(field, name)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError as e:
self.add_error(name, e)
def _clean_form(self):
try:
cleaned_data = self.clean()
except ValidationError as e:
self.add_error(None, e)
else:
if cleaned_data is not None:
self.cleaned_data = cleaned_data
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() has been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""Return True if data differs from initial."""
return bool(self.changed_data)
@cached_property
def changed_data(self):
data = []
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
# Use the BoundField's initial as this is the value passed to
# the widget.
initial_value = self[name].initial
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except ValidationError:
# Always assume data has changed if validation fails.
data.append(name)
continue
if field.has_changed(initial_value, data_value):
data.append(name)
return data
@property
def media(self):
"""Return all media required to render the widgets on this form."""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
return media
def is_multipart(self):
"""
Return True if the form needs to be multipart-encoded, i.e. it has
FileInput, or False otherwise.
"""
return any(field.widget.needs_multipart_form for field in self.fields.values())
def hidden_fields(self):
"""
Return a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Return a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
def get_initial_for_field(self, field, field_name):
"""
Return initial data for field on form. Use initial data from the form
or the field, in that order. Evaluate callable values.
"""
value = self.initial.get(field_name, field.initial)
if callable(value):
value = value()
return value
class Form(BaseForm, metaclass=DeclarativeFieldsMetaclass):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
|
1b366f1be71f252587e5587caaa8f38c95eb65cd6662cdd58cbef1b0d98d4495 | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import operator
import warnings
from collections import namedtuple
from functools import lru_cache
from itertools import chain
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import Case, Expression, F, Value, When
from django.db.models.fields import AutoField
from django.db.models.functions import Cast, Trunc
from django.db.models.query_utils import FilteredRelation, InvalidQuery, Q
from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
from django.db.utils import NotSupportedError
from django.utils import timezone
from django.utils.functional import cached_property, partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable:
def __init__(self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
self.chunk_size = chunk_size
class ModelIterable(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
related_populators = get_related_populators(klass_info, select, db)
known_related_objects = [
(field, related_objs, operator.attrgetter(*[
field.attname
if from_field == 'self' else
queryset.model._meta.get_field(from_field).attname
for from_field in field.from_fields
])) for field, related_objs in queryset._known_related_objects.items()
]
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model.
for field, rel_objs, rel_getter in known_related_objects:
# Avoid overwriting objects loaded by, e.g., select_related().
if field.is_cached(obj):
continue
rel_obj_id = rel_getter(obj)
try:
rel_obj = rel_objs[rel_obj_id]
except KeyError:
pass # May happen in qs1 | qs2 scenarios.
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
indexes = range(len(names))
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
yield {names[i]: row[i] for i in indexes}
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if queryset._fields:
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
fields = [*queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields)]
if fields != names:
# Reorder according to fields.
index_map = {name: idx for idx, name in enumerate(names)}
rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
return map(
rowfactory,
compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
)
return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
class NamedValuesListIterable(ValuesListIterable):
"""
Iterable returned by QuerySet.values_list(named=True) that yields a
namedtuple for each row.
"""
@staticmethod
@lru_cache()
def create_namedtuple_class(*names):
# Cache namedtuple() with @lru_cache() since it's too slow to be
# called for every QuerySet evaluation.
return namedtuple('Row', names)
def __iter__(self):
queryset = self.queryset
if queryset._fields:
names = queryset._fields
else:
query = queryset.query
names = [*query.extra_select, *query.values_select, *query.annotation_select]
tuple_class = self.create_namedtuple_class(*names)
new = tuple.__new__
for row in super().__iter__():
yield new(tuple_class, row)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
yield row[0]
class QuerySet:
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: get_version()}
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled queryset instance's Django version %s does not "
"match the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return '<%s %r>' % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._chain()
qs.query.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._chain()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values('pk'))
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values('pk'))
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def _iterator(self, use_chunked_fetch, chunk_size):
yield from self._iterable_class(self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size)
def iterator(self, chunk_size=2000):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
if chunk_size <= 0:
raise ValueError('Chunk size must be strictly positive.')
use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS')
return self._iterator(use_chunked_fetch, chunk_size)
def aggregate(self, *args, **kwargs):
"""
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions((*args, *kwargs.values()), method_name='aggregate')
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.chain()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs)
def count(self):
"""
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Perform the query and return a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None, ignore_conflicts=False):
"""
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if features.can_return_rows_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_rows_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size, ignore_conflicts=ignore_conflicts)
for obj_with_pk in objs_with_pk:
obj_with_pk._state.adding = False
obj_with_pk._state.db = self.db
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
ids = self._batched_insert(objs_without_pk, fields, batch_size, ignore_conflicts=ignore_conflicts)
if connection.features.can_return_rows_from_bulk_insert and not ignore_conflicts:
assert len(ids) == len(objs_without_pk)
for obj_without_pk, pk in zip(objs_without_pk, ids):
obj_without_pk.pk = pk
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def bulk_update(self, objs, fields, batch_size=None):
"""
Update the given fields in each of the given objects in the database.
"""
if batch_size is not None and batch_size < 0:
raise ValueError('Batch size must be a positive integer.')
if not fields:
raise ValueError('Field names must be given to bulk_update().')
objs = tuple(objs)
if any(obj.pk is None for obj in objs):
raise ValueError('All bulk_update() objects must have a primary key set.')
fields = [self.model._meta.get_field(name) for name in fields]
if any(not f.concrete or f.many_to_many for f in fields):
raise ValueError('bulk_update() can only be used with concrete fields.')
if any(f.primary_key for f in fields):
raise ValueError('bulk_update() cannot be used with primary key fields.')
if not objs:
return
# PK is used twice in the resulting update query, once in the filter
# and once in the WHEN. Each field will also have one CAST.
max_batch_size = connections[self.db].ops.bulk_batch_size(['pk', 'pk'] + fields, objs)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
requires_casting = connections[self.db].features.requires_casted_case_in_updates
batches = (objs[i:i + batch_size] for i in range(0, len(objs), batch_size))
updates = []
for batch_objs in batches:
update_kwargs = {}
for field in fields:
when_statements = []
for obj in batch_objs:
attr = getattr(obj, field.attname)
if not isinstance(attr, Expression):
attr = Value(attr, output_field=field)
when_statements.append(When(pk=obj.pk, then=attr))
case_statement = Case(*when_statements, output_field=field)
if requires_casting:
case_statement = Cast(case_statement, output_field=field)
update_kwargs[field.attname] = case_statement
updates.append(([obj.pk for obj in batch_objs], update_kwargs))
with transaction.atomic(using=self.db, savepoint=False):
for pks, update_kwargs in updates:
self.filter(pk__in=pks).update(**update_kwargs)
bulk_update.alters_data = True
def get_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
return self._create_object_from_params(kwargs, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
self._for_write = True
with transaction.atomic(using=self.db):
try:
obj = self.select_for_update().get(**kwargs)
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
# Lock the row so that a concurrent update is blocked until
# after update_or_create() has performed its save.
obj, created = self._create_object_from_params(kwargs, params, lock=True)
if created:
return obj, created
for k, v in defaults.items():
setattr(obj, k, v() if callable(v) else v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params, lock=False):
"""
Try to create an object using passed params. Used by get_or_create()
and update_or_create().
"""
try:
with transaction.atomic(using=self.db):
params = {k: v() if callable(v) else v for k, v in params.items()}
obj = self.create(**params)
return obj, True
except IntegrityError as e:
try:
qs = self.select_for_update() if lock else self
return qs.get(**lookup), False
except self.model.DoesNotExist:
pass
raise e
def _extract_model_params(self, defaults, **kwargs):
"""
Prepare `params` for creating a model instance based on the given
kwargs; for use by get_or_create() and update_or_create().
"""
defaults = defaults or {}
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
property_names = self.model._meta._property_names
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
# It's okay to use a model's property if it has a setter.
if not (param in property_names and getattr(self.model, param).fset):
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'." % (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
))
return params
def _earliest(self, *fields):
"""
Return the earliest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if fields:
order_by = fields
else:
order_by = getattr(self.model._meta, 'get_latest_by')
if order_by and not isinstance(order_by, (tuple, list)):
order_by = (order_by,)
if order_by is None:
raise ValueError(
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta."
)
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._chain()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering(*order_by)
return obj.get()
def earliest(self, *fields):
return self._earliest(*fields)
def latest(self, *fields):
return self.reverse()._earliest(*fields)
def first(self):
"""Return the first object of a query or None if no match is found."""
for obj in (self if self.ordered else self.order_by('pk'))[:1]:
return obj
def last(self):
"""Return the last object of a query or None if no match is found."""
for obj in (self.reverse() if self.ordered else self.order_by('-pk'))[:1]:
return obj
def in_bulk(self, id_list=None, *, field_name='pk'):
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if field_name != 'pk' and not self.model._meta.get_field(field_name).unique:
raise ValueError("in_bulk()'s field_name must be a unique field but %r isn't." % field_name)
if id_list is not None:
if not id_list:
return {}
filter_key = '{}__in'.format(field_name)
batch_size = connections[self.db].features.max_query_params
id_list = tuple(id_list)
# If the database has a limit on the number of query parameters
# (e.g. SQLite), retrieve objects in batches if necessary.
if batch_size and batch_size < len(id_list):
qs = ()
for offset in range(0, len(id_list), batch_size):
batch = id_list[offset:offset + batch_size]
qs += tuple(self.filter(**{filter_key: batch}).order_by())
else:
qs = self.filter(**{filter_key: id_list}).order_by()
else:
qs = self._chain()
return {getattr(obj, field_name): obj for obj in qs}
def delete(self):
"""Delete the records in the current QuerySet."""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._chain()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.chain(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
with transaction.mark_for_rollback_on_error(using=self.db):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update() that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.chain(sql.UpdateQuery)
query.add_update_fields(values)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def explain(self, *, format=None, **options):
return self.query.explain(using=self.db, format=format, **options)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
qs = RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using)
qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
return qs
def _values(self, *fields, **expressions):
clone = self._chain()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False, named=False):
if flat and named:
raise TypeError("'flat' and 'named' can't be used together.")
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
field_names = {f for f in fields if not hasattr(f, 'resolve_expression')}
_fields = []
expressions = {}
counter = 1
for field in fields:
if hasattr(field, 'resolve_expression'):
field_id_prefix = getattr(field, 'default_alias', field.__class__.__name__.lower())
while True:
field_id = field_id_prefix + str(counter)
counter += 1
if field_id not in field_names:
break
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = (
NamedValuesListIterable if named
else FlatValuesListIterable if flat
else ValuesListIterable
)
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ('year', 'month', 'week', 'day'), \
"'kind' must be one of 'year', 'month', 'week', or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ('year', 'month', 'week', 'day', 'hour', 'minute', 'second'), \
"'kind' must be one of 'year', 'month', 'week', 'day', 'hour', 'minute', or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""Return an empty QuerySet."""
clone = self._chain()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._chain()
def filter(self, *args, **kwargs):
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._chain()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object or a dictionary of keyword lookup
arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q):
clone = self._chain()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._chain()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
# If the query is an EmptyQuerySet, combine all nonempty querysets.
if isinstance(self, EmptyQuerySet):
qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
return qs[0]._combinator_query('union', *qs[1:], all=all) if qs else self
return self._combinator_query('union', *other_qs, all=all)
def intersection(self, *other_qs):
# If any query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
for other in other_qs:
if isinstance(other, EmptyQuerySet):
return other
return self._combinator_query('intersection', *other_qs)
def difference(self, *other_qs):
# If the query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
return self._combinator_query('difference', *other_qs)
def select_for_update(self, nowait=False, skip_locked=False, of=()):
"""
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError('The nowait option cannot be used with skip_locked.')
obj = self._chain()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
obj.query.select_for_update_of = of
return obj
def select_related(self, *fields):
"""
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, clear the list.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._chain()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
clone = self._chain()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
for lookup in lookups:
if isinstance(lookup, Prefetch):
lookup = lookup.prefetch_to
lookup = lookup.split(LOOKUP_SEP, 1)[0]
if lookup in self.query._filtered_relations:
raise ValueError('prefetch_related() is not supported with FilteredRelation.')
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
self._validate_values_are_expressions(args + tuple(kwargs.values()), method_name='annotate')
annotations = {}
for arg in args:
# The default_alias property may raise a TypeError.
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except TypeError:
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._chain()
names = self._fields
if names is None:
names = set(chain.from_iterable(
(field.name, field.attname) if hasattr(field, 'attname') else (field.name,)
for field in self.model._meta.get_fields()
))
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
if isinstance(annotation, FilteredRelation):
clone.query.add_filtered_relation(annotation, alias)
else:
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""Return a new QuerySet instance with the ordering changed."""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._chain()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Return a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._chain()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""Add extra SQL fragments to the query."""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._chain()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""Reverse the ordering of the QuerySet."""
if not self.query.can_filter():
raise TypeError('Cannot reverse a query once a slice has been taken.')
clone = self._chain()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case removal all deferrals.
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._chain()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
for field in fields:
field = field.split(LOOKUP_SEP, 1)[0]
if field in self.query._filtered_relations:
raise ValueError('only() is not supported with FilteredRelation.')
clone = self._chain()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""Select which database this QuerySet should execute against."""
clone = self._chain()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model (or is empty).
"""
if isinstance(self, EmptyQuerySet):
return True
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
@property
def db(self):
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None, ignore_conflicts=False):
"""
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model, ignore_conflicts=ignore_conflicts)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size, ignore_conflicts=False):
"""
Helper method for bulk_create() to insert objs one batch at a time.
"""
if ignore_conflicts and not connections[self.db].features.supports_ignore_conflicts:
raise NotSupportedError('This database backend does not support ignoring conflicts.')
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
inserted_ids = []
bulk_return = connections[self.db].features.can_return_rows_from_bulk_insert
for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
if bulk_return and not ignore_conflicts:
inserted_id = self._insert(
item, fields=fields, using=self.db, return_id=True,
ignore_conflicts=ignore_conflicts,
)
if isinstance(inserted_id, list):
inserted_ids.extend(inserted_id)
else:
inserted_ids.append(inserted_id)
else:
self._insert(item, fields=fields, using=self.db, ignore_conflicts=ignore_conflicts)
return inserted_ids
def _chain(self, **kwargs):
"""
Return a copy of the current QuerySet that's ready for another
operation.
"""
obj = self._clone()
if obj._sticky_filter:
obj.query.filter_is_sticky = True
obj._sticky_filter = False
obj.__dict__.update(kwargs)
return obj
def _clone(self):
"""
Return a copy of the current QuerySet. A lightweight alternative
to deepcopy().
"""
c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints)
c._sticky_filter = self._sticky_filter
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c._iterable_class = self._iterable_class
c._fields = self._fields
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def resolve_expression(self, *args, **kwargs):
if self._fields and len(self._fields) > 1:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
raise TypeError('Cannot use multi-field values as a filter value.')
query = self.query.resolve_expression(*args, **kwargs)
query._db = self._db
return query
resolve_expression.queryset_only = True
def _add_hints(self, **hints):
"""
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
self._hints.update(hints)
def _has_filters(self):
"""
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
@staticmethod
def _validate_values_are_expressions(values, method_name):
invalid_args = sorted(str(arg) for arg in values if not hasattr(arg, 'resolve_expression'))
if invalid_args:
raise TypeError(
'QuerySet.%s() received non-expression(s): %s.' % (
method_name,
', '.join(invalid_args),
)
)
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
self._result_cache = None
self._prefetch_related_lookups = ()
self._prefetch_done = False
def resolve_model_init_order(self):
"""Resolve the init field names and value positions."""
converter = connections[self.db].introspection.identifier_converter
model_init_fields = [f for f in self.model._meta.fields if converter(f.column) in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(converter(f.column)) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def prefetch_related(self, *lookups):
"""Same as QuerySet.prefetch_related()"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def _prefetch_related_objects(self):
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def _clone(self):
"""Same as QuerySet._clone()"""
c = self.__class__(
self.raw_query, model=self.model, query=self.query, params=self.params,
translations=self.translations, using=self._db, hints=self._hints
)
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def iterator(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
if self.model._meta.pk.attname not in model_init_names:
raise InvalidQuery('Raw query must include the primary key')
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
if converters:
query = compiler.apply_converters(query, converters)
for values in query:
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.chain(using=alias),
params=self.params, translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
# Ignore translations for nonexistent column names
try:
index = columns.index(query_name)
except ValueError:
pass
else:
columns[index] = model_name
return columns
@cached_property
def model_fields(self):
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.identifier_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and not issubclass(queryset._iterable_class, ModelIterable):
raise ValueError('Prefetch querysets cannot use values().')
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
# Prevent the QuerySet from being evaluated
obj_dict['queryset'] = self.queryset._chain(
_result_cache=[],
_prefetch_done=True,
)
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
return isinstance(other, Prefetch) and self.prefetch_to == other.prefetch_to
def __hash__(self):
return hash((self.__class__, self.prefetch_to))
def normalize_prefetch_lookups(lookups, prefix=None):
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if not model_instances:
return # nothing to do
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
while all_lookups:
lookup = all_lookups.pop()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if not obj_list:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(reversed(additional_lookups), prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extend(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
if through_attr in getattr(obj, '_prefetched_objects_cache', ()):
# If related objects have been prefetched, use the
# cache rather than the object's through_attr.
new_obj = list(obj._prefetched_objects_cache.get(through_attr))
else:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_queryset().
Return a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(getattr(instance.__class__, to_attr, None), cached_property):
is_fetched = to_attr in instance.__dict__
else:
is_fetched = hasattr(instance, to_attr)
else:
is_fetched = through_attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects().
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache or field name to assign to,
# boolean that is True when the previous argument is a cache name vs a field name).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
if as_attr:
# A to_attr has been given for the prefetch.
setattr(obj, to_attr, val)
elif is_descriptor:
# cache_name points to a field name in obj.
# This field is a descriptor for a related object.
setattr(obj, cache_name, val)
else:
# No to_attr has been given for this prefetch operation and the
# cache_name does not point to a descriptor. Store the value of
# the field in the object's field cache.
obj._state.fields_cache[cache_name] = val
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - local_setter, remote_setter: Methods to set cached values on
# the object being populated and on the remote object. Usually
# these are Field.set_cached_value() methods.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
attname_indexes = {select[idx][0].target.attname: idx for idx in select_fields}
model_init_attnames = (f.attname for f in klass_info['model']._meta.concrete_fields)
self.init_list = [attname for attname in model_init_attnames if attname in attname_indexes]
self.reorder_for_init = operator.itemgetter(*[attname_indexes[attname] for attname in self.init_list])
self.model_cls = klass_info['model']
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
self.local_setter = klass_info['local_setter']
self.remote_setter = klass_info['remote_setter']
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
self.local_setter(from_obj, obj)
if obj is not None:
self.remote_setter(obj, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
|
f8255998bbb8f1c1df625a55d62ca53a0bc17b9e4ec128c0bf00a5fd661c6d20 | import copy
import datetime
import inspect
from decimal import Decimal
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import connection
from django.db.models import fields
from django.db.models.query_utils import Q
from django.db.utils import NotSupportedError
from django.utils.deconstruct import deconstructible
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class SQLiteNumericMixin:
"""
Some expressions with output_field=DecimalField() must be cast to
numeric to be properly filtered.
"""
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
class Combinable:
"""
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
BITLEFTSHIFT = '<<'
BITRIGHTSHIFT = '>>'
def _combine(self, other, connector, reversed):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __neg__(self):
return self._combine(-1, self.MUL, False)
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
@deconstructible
class BaseExpression:
"""Base class for all query expressions."""
# aggregate specific fields
is_summary = False
_output_field_resolved_to_none = False
# Can the expression be used in a WHERE clause?
filterable = True
# Can the expression can be used as a source expression in Window?
window_compatible = False
def __init__(self, output_field=None):
if output_field is not None:
self.output_field = output_field
def __getstate__(self):
state = self.__dict__.copy()
state.pop('convert_value', None)
return state
def get_db_converters(self, connection):
return (
[]
if self.convert_value is self._convert_value_noop else
[self.convert_value]
) + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert not exprs
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, str) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
return any(expr and expr.contains_aggregate for expr in self.get_source_expressions())
@cached_property
def contains_over_clause(self):
return any(expr and expr.contains_over_clause for expr in self.get_source_expressions())
@cached_property
def contains_column_references(self):
return any(expr and expr.contains_column_references for expr in self.get_source_expressions())
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
if expr else None
for expr in c.get_source_expressions()
])
return c
def _prepare(self, field):
"""Hook used by Lookup.get_prep_lookup() to do custom preparation."""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""Return the output type of this expressions."""
output_field = self._resolve_output_field()
if output_field is None:
self._output_field_resolved_to_none = True
raise FieldError('Cannot resolve expression type, unknown output_field')
return output_field
@cached_property
def _output_field_or_none(self):
"""
Return the output field of this expression, or None if
_resolve_output_field() didn't return an output type.
"""
try:
return self.output_field
except FieldError:
if not self._output_field_resolved_to_none:
raise
def _resolve_output_field(self):
"""
Attempt to infer the output type of the expression. If the output
fields of all source fields match then, simply infer the same type
here. This isn't always correct, but it makes sense most of the time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source's output field resolves to None, exclude it from this check.
If all sources are None, then an error is raised higher up the stack in
the output_field property.
"""
sources_iter = (source for source in self.get_source_fields() if source is not None)
for output_field in sources_iter:
if any(not isinstance(output_field, source.__class__) for source in sources_iter):
raise FieldError('Expression contains mixed types. You must set output_field.')
return output_field
@staticmethod
def _convert_value_noop(value, expression, connection):
return value
@cached_property
def convert_value(self):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if internal_type == 'FloatField':
return lambda value, expression, connection: None if value is None else float(value)
elif internal_type.endswith('IntegerField'):
return lambda value, expression, connection: None if value is None else int(value)
elif internal_type == 'DecimalField':
return lambda value, expression, connection: None if value is None else Decimal(value)
return self._convert_value_noop
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions([
e.relabeled_clone(change_map) if e is not None else None
for e in self.get_source_expressions()
])
return clone
def copy(self):
return copy.copy(self)
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
yield from expr.flatten()
@cached_property
def identity(self):
constructor_signature = inspect.signature(self.__init__)
args, kwargs = self._constructor_args
signature = constructor_signature.bind_partial(*args, **kwargs)
signature.apply_defaults()
arguments = signature.arguments.items()
identity = [self.__class__]
for arg, value in arguments:
if isinstance(value, fields.Field):
value = type(value)
else:
value = make_hashable(value)
identity.append((arg, value))
return tuple(identity)
def __eq__(self, other):
return isinstance(other, BaseExpression) and other.identity == self.identity
def __hash__(self):
return hash(self.identity)
class Expression(BaseExpression, Combinable):
"""An expression that can be combined with other expressions."""
pass
class CombinedExpression(SQLiteNumericMixin, Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField') or
(rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
if (lhs_output and rhs_output and self.connector == self.SUB and
lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and
lhs_output.get_internal_type() == rhs_output.get_internal_type()):
return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class TemporalSubtraction(CombinedExpression):
output_field = fields.DurationField()
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs, connection)
rhs = compiler.compile(self.rhs, connection)
return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs)
@deconstructible
class F(Combinable):
"""An object capable of resolving references to existing query objects."""
# Can the expression be used in a WHERE clause?
filterable = True
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None,
summarize=False, for_save=False, simple_col=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize, simple_col)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
def as_sql(self, *args, **kwargs):
raise ValueError(
'This queryset contains a reference to an outer query and may '
'only be used in a subquery.'
)
def _prepare(self, output_field=None):
return self
def relabeled_clone(self, relabels):
return self
class OuterRef(F):
def resolve_expression(self, query=None, allow_joins=True, reuse=None,
summarize=False, for_save=False, simple_col=False):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def _prepare(self, output_field=None):
return self
class Func(SQLiteNumericMixin, Expression):
"""An SQL function call."""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)" % (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = {**self.extra, **self._get_repr_options()}
if extra:
extra = ', '.join(str(key) + '=' + str(val) for key, val in sorted(extra.items()))
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def _get_repr_options(self):
"""Return a dict of extra __init__() options to include in the repr."""
return {}
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
data = {**self.extra, **extra_context}
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data['function'] = function
else:
data.setdefault('function', self.function)
template = template or data.get('template', self.template)
arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner)
data['expressions'] = data['field'] = arg_joiner.join(sql_parts)
return template % data, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""Represent a wrapped value as a node within an expression."""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
output_field = self._output_field_or_none
if output_field is not None:
if self.for_save:
val = output_field.get_db_prep_save(val, connection=connection)
else:
val = output_field.get_db_prep_value(val, connection=connection)
if hasattr(output_field, 'get_placeholder'):
return output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value), []
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
output_field = fields.FloatField()
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
contains_column_references = True
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class SimpleCol(Expression):
"""
Represents the SQL of a column name without the table name.
This variant of Col doesn't include the table name (or an alias) to
avoid a syntax error in check constraints.
"""
contains_column_references = True
def __init__(self, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.target = target
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return qn(self.target.column), []
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (
self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection)
)
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionList(Func):
"""
An expression containing multiple expressions. Can be used to provide a
list of expressions as an argument to another expression, like an
ordering clause.
"""
template = '%(expressions)s'
def __init__(self, *expressions, **extra):
if not expressions:
raise ValueError('%s requires at least one expression.' % self.__class__.__name__)
super().__init__(*expressions, **extra)
def __str__(self):
return self.arg_joiner.join(str(arg) for arg in self.source_expressions)
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not getattr(condition, 'conditional', False) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
if isinstance(condition, Q) and not condition:
raise ValueError("An empty Q() can't be used as a When() condition.")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, 'resolve_expression'):
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
*self.cases, self.default = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = {**self.extra, **extra_context}
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params['cases'] = case_joiner.join(case_parts)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or template_params.get('template', self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Subquery(Expression):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = '(%(subquery)s)'
contains_aggregate = False
def __init__(self, queryset, output_field=None, **extra):
self.queryset = queryset
self.extra = extra
super().__init__(output_field)
def _resolve_output_field(self):
if len(self.queryset.query.select) == 1:
return self.queryset.query.select[0].field
return super()._resolve_output_field()
def copy(self):
clone = super().copy()
clone.queryset = clone.queryset.all()
return clone
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
clone = self.copy()
clone.is_summary = summarize
clone.queryset.query.bump_prefix(query)
# Need to recursively resolve these.
def resolve_all(child):
if hasattr(child, 'children'):
[resolve_all(_child) for _child in child.children]
if hasattr(child, 'rhs'):
child.rhs = resolve(child.rhs)
def resolve(child):
if hasattr(child, 'resolve_expression'):
resolved = child.resolve_expression(
query=query, allow_joins=allow_joins, reuse=reuse,
summarize=summarize, for_save=for_save,
)
# Add table alias to the parent query's aliases to prevent
# quoting.
if hasattr(resolved, 'alias') and resolved.alias != resolved.target.model._meta.db_table:
clone.queryset.query.external_aliases.add(resolved.alias)
return resolved
return child
resolve_all(clone.queryset.query.where)
for key, value in clone.queryset.query.annotations.items():
if isinstance(value, Subquery):
clone.queryset.query.annotations[key] = resolve(value)
return clone
def get_source_expressions(self):
return [
x for x in [
getattr(expr, 'lhs', None)
for expr in self.queryset.query.where.children
] if x
]
def relabeled_clone(self, change_map):
clone = self.copy()
clone.queryset.query = clone.queryset.query.relabeled_clone(change_map)
clone.queryset.query.external_aliases.update(
alias for alias in change_map.values()
if alias not in clone.queryset.query.alias_map
)
return clone
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
template_params['subquery'], sql_params = self.queryset.query.get_compiler(connection=connection).as_sql()
template = template or template_params.get('template', self.template)
sql = template % template_params
return sql, sql_params
def _prepare(self, output_field):
# This method will only be called if this instance is the "rhs" in an
# expression: the wrapping () must be removed (as the expression that
# contains this will provide them). SQLite evaluates ((subquery))
# differently than the other databases.
if self.template == '(%(subquery)s)':
clone = self.copy()
clone.template = '%(subquery)s'
return clone
return self
class Exists(Subquery):
template = 'EXISTS(%(subquery)s)'
output_field = fields.BooleanField()
def __init__(self, *args, negated=False, **kwargs):
self.negated = negated
super().__init__(*args, **kwargs)
def __invert__(self):
return type(self)(self.queryset, negated=(not self.negated), **self.extra)
def resolve_expression(self, query=None, *args, **kwargs):
# As a performance optimization, remove ordering since EXISTS doesn't
# care about it, just whether or not a row matches.
self.queryset = self.queryset.order_by()
return super().resolve_expression(query, *args, **kwargs)
def as_sql(self, compiler, connection, template=None, **extra_context):
sql, params = super().as_sql(compiler, connection, template, **extra_context)
if self.negated:
sql = 'NOT {}'.format(sql)
return sql, params
def as_oracle(self, compiler, connection, template=None, **extra_context):
# Oracle doesn't allow EXISTS() in the SELECT list, so wrap it with a
# CASE WHEN expression. Change the template since the When expression
# requires a left hand side (column) to compare against.
sql, params = self.as_sql(compiler, connection, template, **extra_context)
sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql)
return sql, params
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False):
if nulls_first and nulls_last:
raise ValueError('nulls_first and nulls_last are mutually exclusive')
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
if not template:
if self.nulls_last:
template = '%s NULLS LAST' % self.template
elif self.nulls_first:
template = '%s NULLS FIRST' % self.template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
'expression': expression_sql,
'ordering': 'DESC' if self.descending else 'ASC',
**extra_context,
}
template = template or self.template
params *= template.count('%(expression)s')
return (template % placeholders).rstrip(), params
def as_sqlite(self, compiler, connection):
template = None
if self.nulls_last:
template = '%(expression)s IS NULL, %(expression)s %(ordering)s'
elif self.nulls_first:
template = '%(expression)s IS NOT NULL, %(expression)s %(ordering)s'
return self.as_sql(compiler, connection, template=template)
def as_mysql(self, compiler, connection):
template = None
if self.nulls_last:
template = 'IF(ISNULL(%(expression)s),1,0), %(expression)s %(ordering)s '
elif self.nulls_first:
template = 'IF(ISNULL(%(expression)s),0,1), %(expression)s %(ordering)s '
return self.as_sql(compiler, connection, template=template)
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
if self.nulls_first or self.nulls_last:
self.nulls_first = not self.nulls_first
self.nulls_last = not self.nulls_last
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
class Window(Expression):
template = '%(expression)s OVER (%(window)s)'
# Although the main expression may either be an aggregate or an
# expression with an aggregate function, the GROUP BY that will
# be introduced in the query as a result is not desired.
contains_aggregate = False
contains_over_clause = True
filterable = False
def __init__(self, expression, partition_by=None, order_by=None, frame=None, output_field=None):
self.partition_by = partition_by
self.order_by = order_by
self.frame = frame
if not getattr(expression, 'window_compatible', False):
raise ValueError(
"Expression '%s' isn't compatible with OVER clauses." %
expression.__class__.__name__
)
if self.partition_by is not None:
if not isinstance(self.partition_by, (tuple, list)):
self.partition_by = (self.partition_by,)
self.partition_by = ExpressionList(*self.partition_by)
if self.order_by is not None:
if isinstance(self.order_by, (list, tuple)):
self.order_by = ExpressionList(*self.order_by)
elif not isinstance(self.order_by, BaseExpression):
raise ValueError(
'order_by must be either an Expression or a sequence of '
'expressions.'
)
super().__init__(output_field=output_field)
self.source_expression = self._parse_expressions(expression)[0]
def _resolve_output_field(self):
return self.source_expression.output_field
def get_source_expressions(self):
return [self.source_expression, self.partition_by, self.order_by, self.frame]
def set_source_expressions(self, exprs):
self.source_expression, self.partition_by, self.order_by, self.frame = exprs
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
if not connection.features.supports_over_clause:
raise NotSupportedError('This backend does not support window expressions.')
expr_sql, params = compiler.compile(self.source_expression)
window_sql, window_params = [], []
if self.partition_by is not None:
sql_expr, sql_params = self.partition_by.as_sql(
compiler=compiler, connection=connection,
template='PARTITION BY %(expressions)s',
)
window_sql.extend(sql_expr)
window_params.extend(sql_params)
if self.order_by is not None:
window_sql.append(' ORDER BY ')
order_sql, order_params = compiler.compile(self.order_by)
window_sql.extend(order_sql)
window_params.extend(order_params)
if self.frame:
frame_sql, frame_params = compiler.compile(self.frame)
window_sql.append(' ' + frame_sql)
window_params.extend(frame_params)
params.extend(window_params)
template = template or self.template
return template % {
'expression': expr_sql,
'window': ''.join(window_sql).strip()
}, params
def __str__(self):
return '{} OVER ({}{}{})'.format(
str(self.source_expression),
'PARTITION BY ' + str(self.partition_by) if self.partition_by else '',
'ORDER BY ' + str(self.order_by) if self.order_by else '',
str(self.frame or ''),
)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self)
def get_group_by_cols(self):
return []
class WindowFrame(Expression):
"""
Model the frame clause in window expressions. There are two types of frame
clauses which are subclasses, however, all processing and validation (by no
means intended to be complete) is done here. Thus, providing an end for a
frame is optional (the default is UNBOUNDED FOLLOWING, which is the last
row in the frame).
"""
template = '%(frame_type)s BETWEEN %(start)s AND %(end)s'
def __init__(self, start=None, end=None):
self.start = Value(start)
self.end = Value(end)
def set_source_expressions(self, exprs):
self.start, self.end = exprs
def get_source_expressions(self):
return [self.start, self.end]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
start, end = self.window_frame_start_end(connection, self.start.value, self.end.value)
return self.template % {
'frame_type': self.frame_type,
'start': start,
'end': end,
}, []
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self)
def get_group_by_cols(self):
return []
def __str__(self):
if self.start.value is not None and self.start.value < 0:
start = '%d %s' % (abs(self.start.value), connection.ops.PRECEDING)
elif self.start.value is not None and self.start.value == 0:
start = connection.ops.CURRENT_ROW
else:
start = connection.ops.UNBOUNDED_PRECEDING
if self.end.value is not None and self.end.value > 0:
end = '%d %s' % (self.end.value, connection.ops.FOLLOWING)
elif self.end.value is not None and self.end.value == 0:
end = connection.ops.CURRENT_ROW
else:
end = connection.ops.UNBOUNDED_FOLLOWING
return self.template % {
'frame_type': self.frame_type,
'start': start,
'end': end,
}
def window_frame_start_end(self, connection, start, end):
raise NotImplementedError('Subclasses must implement window_frame_start_end().')
class RowRange(WindowFrame):
frame_type = 'ROWS'
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_rows_start_end(start, end)
class ValueRange(WindowFrame):
frame_type = 'RANGE'
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_range_start_end(start, end)
|
3d457f5a1a2e6fdd6904185512fa0964ac7e0af3d4e805d4283a5578e2f4b6f3 | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import difflib
import functools
from collections import Counter, namedtuple
from collections.abc import Iterator, Mapping
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import (
EmptyResultSet, FieldDoesNotExist, FieldError,
)
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, F, Ref, SimpleCol
from django.db.models.fields import Field
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q, check_rel_lookup_compatibility, refs_expression,
)
from django.db.models.sql.constants import (
INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE,
)
from django.db.models.sql.datastructures import (
BaseTable, Empty, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils.functional import cached_property
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
'JoinInfo',
('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function')
)
def _get_col(target, field, alias, simple_col):
if simple_col:
return SimpleCol(target, field)
return target.get_col(alias, field)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.identifier_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
class Query:
"""A single SQL query."""
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
self.external_aliases = set()
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.subquery = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note that annotations go to annotations dictionary.
self.select = ()
self.where = where()
self.where_class = where
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = ()
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = ()
self.select_for_update = False
self.select_for_update_nowait = False
self.select_for_update_skip_locked = False
self.select_for_update_of = ()
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = ()
# SQL annotation-related attributes
self.annotations = {} # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# Set combination attributes
self.combinator = None
self.combinator_all = False
self.combined_queries = ()
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = {} # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (frozenset(), True)
self._filtered_relations = {}
self.explain_query = False
self.explain_format = None
self.explain_options = {}
@property
def has_select_fields(self):
return bool(self.select or self.annotation_select_mask or self.extra_select_mask)
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def _prepare(self, field):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
to deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj.annotations = self.annotations.copy()
if self.annotation_select_mask is None:
obj.annotation_select_mask = None
else:
obj.annotation_select_mask = self.annotation_select_mask.copy()
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.extra = self.extra.copy()
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
obj._filtered_relations = self._filtered_relations.copy()
# Clear the cached_property
try:
del obj.base_table
except AttributeError:
pass
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
elif isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary):
# Reference to column. Make sure the referenced column
# is selected.
col_cnt += 1
col_alias = '__col%d' % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_exprs.append(Ref(col_alias, expr))
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
has_limit = self.low_mark != 0 or self.high_mark is not None
has_existing_annotations = any(
annotation for alias, annotation
in self.annotations.items()
if alias not in added_aggregate_names
)
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (isinstance(self.group_by, tuple) or has_limit or has_existing_annotations or
self.distinct or self.combinator):
from django.db.models.sql.subqueries import AggregateQuery
outer_query = AggregateQuery(self.model)
inner_query = self.clone()
inner_query.select_for_update = False
inner_query.select_related = False
if not has_limit and not self.distinct_fields:
# Queries with distinct_fields need ordering and when a limit
# is applied we must take the slice from the ordered query.
# Otherwise no need for ordering.
inner_query.clear_ordering(True)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
if inner_query.default_cols and has_existing_annotations:
inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
inner_query.default_cols = False
relabels = {t: 'subquery' for t in inner_query.alias_map}
relabels[None] = 'subquery'
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(relabels)
del inner_query.annotations[alias]
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask:
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
try:
outer_query.add_subquery(inner_query, using)
except EmptyResultSet:
return {
alias: None
for alias in outer_query.annotation_select
}
else:
outer_query = self
self.select = ()
self.default_cols = False
self.extra = {}
outer_query.clear_ordering(True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using)
result = compiler.execute_sql(SINGLE)
if result is None:
result = [None] * len(outer_query.annotation_select)
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return dict(zip(outer_query.annotation_select, result))
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('*'), alias='__count', is_summary=True)
number = obj.get_aggregation(using, ['__count'])['__count']
if number is None:
number = 0
return number
def has_filters(self):
return self.where
def has_results(self, using):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
q.set_group_by()
q.clear_select_clause()
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def explain(self, using, format=None, **options):
q = self.clone()
q.explain_query = True
q.explain_format = format
q.explain_options = options
compiler = q.get_compiler(using=using)
return '\n'.join(compiler.explain_query())
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
if name in self._filtered_relations:
name = self._filtered_relations[name].relation_name
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.items():
for field in model._meta.local_fields:
if field not in values:
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in must_include.items():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.items():
callback(target, model, values)
else:
for model, values in must_include.items():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
seen.setdefault(model, set())
for model, values in seen.items():
callback(target, model, values)
def table_alias(self, table_name, create=False, filtered_relation=None):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = filtered_relation.alias if filtered_relation is not None else table_name
self.table_map[table_name] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER
already_louter = self.alias_map[alias].join_type == LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map
if self.alias_map[join].parent_alias == alias and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by])
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
self.annotations = self.annotations and {
key: col.relabeled_clone(change_map) for key, col in self.annotations.items()
}
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {change_map.get(alias, alias)
for alias in self.external_aliases}
def bump_prefix(self, outer_query):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix):] if prefix else alphabet
for s in product(seq, repeat=n):
yield ''.join(s)
prefix = None
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
local_recursion_limit = 127 # explicitly avoid infinite loop
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RuntimeError(
'Maximum recursion depth exceeded: too many subqueries.'
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
self.change_aliases({
alias: '%s%d' % (self.alias_prefix, pos)
for pos, alias in enumerate(self.alias_map)
})
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
else:
alias = self.join(BaseTable(self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None, reuse_with_filtered_relation=False):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a
sql.datastructures.BaseTable or Join.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
The 'reuse_with_filtered_relation' parameter is used when computing
FilteredRelation instances.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
if reuse_with_filtered_relation and reuse:
reuse_aliases = [
a for a, j in self.alias_map.items()
if a in reuse and j.equals(join, with_filtered_relation=False)
]
else:
reuse_aliases = [
a for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join
]
if reuse_aliases:
if join.table_alias in reuse_aliases:
reuse_alias = join.table_alias
else:
# Reuse the most recent alias of the joined table
# (a many-to-many relation may be joined multiple times).
reuse_alias = reuse_aliases[-1]
self.ref_alias(reuse_alias)
return reuse_alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False):
"""Add a single annotation expression to the Query."""
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
summarize=is_summary)
self.append_annotation_mask([alias])
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
# It's safe to drop ordering if the queryset isn't using slicing,
# distinct(*fields) or select_for_update().
if (self.low_mark == 0 and self.high_mark is None and
not self.distinct_fields and
not self.select_for_update):
clone.clear_ordering(True)
return clone
def as_sql(self, compiler, connection):
return self.get_compiler(connection=connection).as_sql()
def resolve_lookup_value(self, value, can_reuse, allow_joins, simple_col):
if hasattr(value, 'resolve_expression'):
kwargs = {'reuse': can_reuse, 'allow_joins': allow_joins}
if isinstance(value, F):
kwargs['simple_col'] = simple_col
value = value.resolve_expression(self, **kwargs)
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
for sub_value in value:
if hasattr(sub_value, 'resolve_expression'):
if isinstance(sub_value, F):
sub_value.resolve_expression(
self, reuse=can_reuse, allow_joins=allow_joins,
simple_col=simple_col,
)
else:
sub_value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)
return value
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self.annotations:
expression, expression_lookups = refs_expression(lookup_splitted, self.annotations)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (isinstance(value, Query) and not value.has_select_fields and
not check_rel_lookup_compatibility(value.model, opts, field)):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, '_meta'):
self.check_query_object_type(value, opts, field)
elif hasattr(value, '__iter__'):
for v in value:
self.check_query_object_type(v, opts, field)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
*transforms, lookup_name = lookups or ['exact']
for name in transforms:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
if lhs.field.is_relation:
raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name))
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookup_name)
lookup_name = 'exact'
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value unless the lookup supports it.
if lookup.rhs is None and not lookup.can_use_none_as_rhs:
if lookup_name not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup('isnull')(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookup_name == 'exact' and lookup.rhs == ''):
return lhs.get_lookup('isnull')(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
output_field = lhs.output_field.__class__
suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups())
if suggested_lookups:
suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups)
else:
suggestion = '.'
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted%s" % (name, output_field.__name__, suggestion)
)
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, allow_joins=True, split_subq=True,
reuse_with_filtered_relation=False, simple_col=False):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
If 'reuse_with_filtered_relation' is True, then only joins in can_reuse
will be reused.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if not getattr(reffed_expression, 'filterable', True):
raise NotSupportedError(
reffed_expression.__class__.__name__ + ' is disallowed in '
'the filter clause.'
)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
pre_joins = self.alias_refcount.copy()
value = self.resolve_lookup_value(value, can_reuse, allow_joins, simple_col)
used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)}
clause = self.where_class()
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
clause.add(condition, AND)
return clause, []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(
parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))
if len(targets) == 1:
col = _get_col(targets[0], join_info.final_field, alias, simple_col)
else:
col = MultiColSource(alias, targets, join_info.targets, join_info.final_field)
else:
col = _get_col(targets[0], join_info.final_field, alias, simple_col)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause.add(condition, AND)
require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated
if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None:
require_outer = True
if (lookup_type != 'isnull' and (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == LOUTER)):
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
lookup_class = targets[0].get_lookup('isnull')
col = _get_col(targets[0], join_info.targets[0], alias, simple_col)
clause.add(lookup_class(col, False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_clause):
self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def build_where(self, q_object):
return self._add_q(q_object, used_aliases=set(), allow_joins=False, simple_col=True)[0]
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False, allow_joins=True, split_subq=True,
simple_col=False):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause, needed_inner = self._add_q(
child, used_aliases, branch_negated,
current_negated, allow_joins, split_subq)
joinpromoter.add_votes(needed_inner)
else:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, allow_joins=allow_joins,
split_subq=split_subq, simple_col=simple_col,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False):
"""Add a FilteredRelation object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector, negated=q_object.negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause = self.build_filtered_relation_q(
child, reuse=reuse, branch_negated=branch_negated,
current_negated=current_negated,
)
else:
child_clause, _ = self.build_filter(
child, can_reuse=reuse, branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=True, split_subq=False,
reuse_with_filtered_relation=True,
)
target_clause.add(child_clause, connector)
return target_clause
def add_filtered_relation(self, filtered_relation, alias):
filtered_relation.alias = alias
lookups = dict(get_children_from_q(filtered_relation.condition))
for lookup in chain((filtered_relation.relation_name,), lookups):
lookup_parts, field_parts, _ = self.solve_lookup_type(lookup)
shift = 2 if not lookup_parts else 1
if len(field_parts) > (shift + len(lookup_parts)):
raise ValueError(
"FilteredRelation's condition doesn't support nested "
"relations (got %r)." % lookup
)
self._filtered_relations[filtered_relation.alias] = filtered_relation
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
field = None
filtered_relation = None
try:
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif name in self._filtered_relations and pos == 0:
filtered_relation = self._filtered_relations[name]
field = opts.get_field(filtered_relation.relation_name)
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
available = sorted([
*get_field_names_from_opts(opts),
*self.annotation_select,
*self._filtered_relations,
])
raise FieldError("Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info(filtered_relation)
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True,
reuse_with_filtered_relation=False):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
The 'reuse_with_filtered_relation' can be used to force 'can_reuse'
parameter and force the relation on the given connections.
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins, the
field path traveled to generate the joins, and a transform function
that takes a field and alias and is equivalent to `field.get_col(alias)`
in the simple case but wraps field transforms if they were included in
names.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# The transform can't be applied yet, as joins must be trimmed later.
# To avoid making every caller of this method look up transforms
# directly, compute transforms here and create a partial that converts
# fields to the appropriate wrapped version.
def final_transformer(field, alias):
return field.get_col(alias)
# Try resolving all the names as fields first. If there's an error,
# treat trailing names as lookups until a field can be resolved.
last_field_exception = None
for pivot in range(len(names), 0, -1):
try:
path, final_field, targets, rest = self.names_to_path(
names[:pivot], opts, allow_many, fail_on_missing=True,
)
except FieldError as exc:
if pivot == 1:
# The first item cannot be a lookup, so it's safe
# to raise the field error here.
raise
else:
last_field_exception = exc
else:
# The transforms are the remaining items that couldn't be
# resolved into fields.
transforms = names[pivot:]
break
for name in transforms:
def transform(field, alias, *, name, previous):
try:
wrapped = previous(field, alias)
return self.try_transform(wrapped, name)
except FieldError:
# FieldError is raised if the transform doesn't exist.
if isinstance(final_field, Field) and last_field_exception:
raise last_field_exception
else:
raise
final_transformer = functools.partial(transform, name=name, previous=final_transformer)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
if join.filtered_relation:
filtered_relation = join.filtered_relation.clone()
table_alias = filtered_relation.alias
else:
filtered_relation = None
table_alias = None
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = Join(
opts.db_table, alias, table_alias, INNER, join.join_field,
nullable, filtered_relation=filtered_relation,
)
reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None
alias = self.join(
connection, reuse=reuse,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
joins.append(alias)
if filtered_relation:
filtered_relation.path = joins[:]
return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
if info.filtered_relation:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False, simple_col=False):
if not allow_joins and LOOKUP_SEP in name:
raise FieldError("Joined field references are not permitted in this query")
if name in self.annotations:
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
return Ref(name, self.annotation_select[name])
else:
return self.annotations[name]
else:
field_list = name.split(LOOKUP_SEP)
join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse)
targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if not allow_joins and len(join_list) > 1:
raise FieldError('Joined field references are not permitted in this query')
if len(targets) > 1:
raise FieldError("Referencing multicolumn fields with F() objects "
"isn't supported")
# Verify that the last lookup in name is a field or a transform:
# transform_function() raises FieldError if not.
join_info.transform_function(targets[0], final_alias)
if reuse is not None:
reuse.update(join_list)
col = _get_col(targets[0], join_info.targets[0], join_list[-1], simple_col)
return col
def split_exclude(self, filter_expr, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
For example, if the origin filter is ~Q(child__name='foo'), filter_expr
is ('child__name', 'foo') and can_reuse is a set of joins usable for
filters in the original query.
We will turn this into equivalent of:
WHERE NOT (pk IN (SELECT parent_id FROM thetable
WHERE name = 'foo' AND parent_id IS NOT NULL))
It might be worth it to consider using WHERE NOT EXISTS as that has
saner null handling, and is easier for the backend's optimizer to
handle.
"""
# Generate the inner query.
query = Query(self.model)
query.add_filter(filter_expr)
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
# Add extra check to make sure the selected field will not be null
# since we are adding an IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
col = query.select[0]
select_field = col.target
alias = col.alias
if self.is_nullable(select_field):
lookup_class = select_field.get_lookup('isnull')
lookup = lookup_class(select_field.get_col(alias), False)
query.where.add(lookup, AND)
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias),
pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases.add(alias)
condition, needed_inner = self.build_filter(
('%s__in' % trimmed_prefix, query),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
def has_limit_one(self):
return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(join_info.transform_function(target, final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted([
*get_field_names_from_opts(opts), *self.extra,
*self.annotation_select, *self._filtered_relations
])
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item):
errors.append(item)
if getattr(item, 'contains_aggregate', False):
raise FieldError(
'Using an aggregate in order_by() without also including '
'it in annotate() is not allowed: %s' % item
)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Remove any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = ()
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
group_by = list(self.select)
if self.annotation_select:
for annotation in self.annotation_select.values():
for col in annotation.get_group_by_cols():
group_by.append(col)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = {}
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = str(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != '%':
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, return a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, return an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""Callback used by get_deferred_field_names()."""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(self.annotation_select_mask.union(names))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
if self.group_by is True:
self.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
self.set_group_by()
self.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self.extra and not self.annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the dictionary of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self.annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = {
k: v for k, v in self.annotations.items()
if k in self.annotation_select_mask
}
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self.extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = {
k: v for k, v in self.extra.items()
if k in self.extra_select_mask
}
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map
if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for LEFT JOINs because we would
# miss those rows that have nothing on the outer side.
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != LOUTER:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a Join instead of a BaseTable reference.
# But the first entry in the query's FROM clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
return (
connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
field.empty_strings_allowed
) or field.null
def get_order_dir(field, default='ASC'):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
Add "value" to the set of values for "key", whether or not "key" already
exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
Check if the given field is reverse-o2o. The field is expected to be some
sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
|
a2919140806be3049c5d65a09eb816fcef0b83ee70737f71fb1a999490f1d5fd | from django.db.backends.base.features import BaseDatabaseFeatures
from django.db.utils import InterfaceError
class DatabaseFeatures(BaseDatabaseFeatures):
interprets_empty_strings_as_nulls = True
has_select_for_update = True
has_select_for_update_nowait = True
has_select_for_update_skip_locked = True
has_select_for_update_of = True
select_for_update_of_column = True
can_return_columns_from_insert = True
can_introspect_autofield = True
supports_subqueries_in_group_by = False
supports_transactions = True
supports_timezones = False
has_native_duration_field = True
can_defer_constraint_checks = True
supports_partially_nullable_unique_constraints = False
truncates_names = True
supports_tablespaces = True
supports_sequence_reset = False
can_introspect_materialized_views = True
can_introspect_time_field = False
atomic_transactions = False
supports_combined_alters = False
nulls_order_largest = True
requires_literal_defaults = True
closed_cursor_error_class = InterfaceError
bare_select_suffix = " FROM DUAL"
# select for update with limit can be achieved on Oracle, but not with the current backend.
supports_select_for_update_with_limit = False
supports_temporal_subtraction = True
# Oracle doesn't ignore quoted identifiers case but the current backend
# does by uppercasing all identifiers.
ignores_table_name_case = True
supports_index_on_text_field = False
has_case_insensitive_like = False
create_test_procedure_without_params_sql = """
CREATE PROCEDURE "TEST_PROCEDURE" AS
V_I INTEGER;
BEGIN
V_I := 1;
END;
"""
create_test_procedure_with_int_param_sql = """
CREATE PROCEDURE "TEST_PROCEDURE" (P_I INTEGER) AS
V_I INTEGER;
BEGIN
V_I := P_I;
END;
"""
supports_callproc_kwargs = True
supports_over_clause = True
supports_frame_range_fixed_distance = True
supports_ignore_conflicts = False
max_query_params = 2**16 - 1
supports_partial_indexes = False
supports_slicing_ordering_in_compound = True
|
fd5dac9067b685daa4260866e71dd58ed95f5a505c40ddd0984d5d2d1163e2e8 | from django.db.utils import ProgrammingError
from django.utils.functional import cached_property
class BaseDatabaseFeatures:
gis_enabled = False
allows_group_by_pk = False
allows_group_by_selected_pks = False
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate NULL rows in a nullable
# unique field? All core backends implement this correctly, but other
# databases such as SQL Server do not.
supports_nullable_unique_constraints = True
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists and some fields are nullable but not all of them?
supports_partially_nullable_unique_constraints = True
can_use_chunked_reads = True
can_return_columns_from_insert = False
can_return_rows_from_bulk_insert = False
has_bulk_insert = True
uses_savepoints = True
can_release_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries_with_in = True
has_select_for_update = False
has_select_for_update_nowait = False
has_select_for_update_skip_locked = False
has_select_for_update_of = False
# Does the database's SELECT FOR UPDATE OF syntax require a column rather
# than a table?
select_for_update_of_column = False
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does the backend truncate names properly when they are too long?
truncates_names = False
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
# Is there a true datatype for uuid?
has_native_uuid_field = False
# Is there a true datatype for timedeltas?
has_native_duration_field = False
# Does the database driver supports same type temporal data subtraction
# by returning the type used to store duration field?
supports_temporal_subtraction = False
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Does the backend order NULL values as largest or smallest?
nulls_order_largest = False
# The database's limit on the number of query parameters.
max_query_params = None
# Can an object have an autoincrement primary key of 0? MySQL says No.
allows_auto_pk_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Can the backend introspect the default value of a column?
can_introspect_default = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Can the backend introspect an AutoField, instead of an IntegerField?
can_introspect_autofield = False
# Can the backend introspect a BigIntegerField, instead of an IntegerField?
can_introspect_big_integer_field = True
# Can the backend introspect an BinaryField, instead of an TextField?
can_introspect_binary_field = True
# Can the backend introspect an DecimalField, instead of an FloatField?
can_introspect_decimal_field = True
# Can the backend introspect a DurationField, instead of a BigIntegerField?
can_introspect_duration_field = True
# Can the backend introspect an IPAddressField, instead of an CharField?
can_introspect_ip_address_field = False
# Can the backend introspect a PositiveIntegerField, instead of an IntegerField?
can_introspect_positive_integer_field = False
# Can the backend introspect a SmallIntegerField, instead of an IntegerField?
can_introspect_small_integer_field = False
# Can the backend introspect a TimeField, instead of a DateTimeField?
can_introspect_time_field = True
# Some backends may not be able to differentiate BigAutoField from other
# fields such as AutoField.
introspected_big_auto_field_type = 'BigAutoField'
# Some backends may not be able to differentiate BooleanField from other
# fields such as IntegerField.
introspected_boolean_field_type = 'BooleanField'
# Can the backend introspect the column order (ASC/DESC) for indexes?
supports_index_column_ordering = True
# Does the backend support introspection of materialized views?
can_introspect_materialized_views = False
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Does it support operations requiring references rename in a transaction?
supports_atomic_references_rename = True
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# Does it support foreign keys?
supports_foreign_keys = True
# Can it create foreign key constraints inline when adding columns?
can_create_inline_fk = True
# Does it support CHECK constraints?
supports_column_check_constraints = True
supports_table_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterized ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
# Does 'a' LIKE 'A' match?
has_case_insensitive_like = True
# Suffix for backends that don't support "SELECT xxx;" queries.
bare_select_suffix = ''
# If NULL is implied on columns without needing to be explicitly specified
implied_column_null = False
# Does the backend support "select for update" queries with limit (and offset)?
supports_select_for_update_with_limit = True
# Does the backend ignore null expressions in GREATEST and LEAST queries unless
# every expression is null?
greatest_least_ignores_nulls = False
# Can the backend clone databases for parallel test execution?
# Defaults to False to allow third-party backends to opt-in.
can_clone_databases = False
# Does the backend consider table names with different casing to
# be equal?
ignores_table_name_case = False
# Place FOR UPDATE right after FROM clause. Used on MSSQL.
for_update_after_from = False
# Combinatorial flags
supports_select_union = True
supports_select_intersection = True
supports_select_difference = True
supports_slicing_ordering_in_compound = False
supports_parentheses_in_compound = True
# Does the database support SQL 2003 FILTER (WHERE ...) in aggregate
# expressions?
supports_aggregate_filter_clause = False
# Does the backend support indexing a TextField?
supports_index_on_text_field = True
# Does the backend support window expressions (expression OVER (...))?
supports_over_clause = False
supports_frame_range_fixed_distance = False
# Does the backend support CAST with precision?
supports_cast_with_precision = True
# How many second decimals does the database return when casting a value to
# a type with time?
time_cast_precision = 6
# SQL to create a procedure for use by the Django test suite. The
# functionality of the procedure isn't important.
create_test_procedure_without_params_sql = None
create_test_procedure_with_int_param_sql = None
# Does the backend support keyword parameters for cursor.callproc()?
supports_callproc_kwargs = False
# Convert CharField results from bytes to str in database functions.
db_functions_convert_bytes_to_str = False
# What formats does the backend EXPLAIN syntax support?
supported_explain_formats = set()
# Does DatabaseOperations.explain_query_prefix() raise ValueError if
# unknown kwargs are passed to QuerySet.explain()?
validates_explain_options = True
# Does the backend support the default parameter in lead() and lag()?
supports_default_in_lead_lag = True
# Does the backend support ignoring constraint or uniqueness errors during
# INSERT?
supports_ignore_conflicts = True
# Does this backend require casting the results of CASE expressions used
# in UPDATE statements to ensure the expression has the correct type?
requires_casted_case_in_updates = False
# Does the backend support partial indexes (CREATE INDEX ... WHERE ...)?
supports_partial_indexes = True
supports_functions_in_partial_indexes = True
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_explaining_query_execution(self):
"""Does this backend support explaining query execution?"""
return self.connection.ops.explain_prefix is not None
@cached_property
def supports_transactions(self):
"""Confirm support for transactions."""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.set_autocommit(False)
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection.rollback()
self.connection.set_autocommit(True)
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
return count == 0
|
90c109458c9ab09a85d469b0d52e3f2cebf80b95d2c2e3eb12c2ff8f7352b3a6 | import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import timezone
from django.utils.duration import duration_microseconds
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = {
**BaseDatabaseOperations.integer_field_ranges,
'PositiveSmallIntegerField': (0, 65535),
'PositiveIntegerField': (0, 4294967295),
}
cast_data_types = {
'AutoField': 'signed integer',
'BigAutoField': 'signed integer',
'CharField': 'char(%(max_length)s)',
'DecimalField': 'decimal(%(max_digits)s, %(decimal_places)s)',
'TextField': 'char',
'IntegerField': 'signed integer',
'BigIntegerField': 'signed integer',
'SmallIntegerField': 'signed integer',
'PositiveIntegerField': 'unsigned integer',
'PositiveSmallIntegerField': 'unsigned integer',
}
cast_char_field_without_max_length = 'char'
explain_prefix = 'EXPLAIN'
def date_extract_sql(self, lookup_type, field_name):
# https://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
elif lookup_type == 'week':
# Override the value of default_week_format for consistency with
# other database backends.
# Mode 3: Monday, 1-53, with 4 or more days this year.
return "WEEK(%s, 3)" % field_name
elif lookup_type == 'iso_year':
# Get the year part from the YEARWEEK function, which returns a
# number as year * 100 + week.
return "TRUNCATE(YEARWEEK(%s, 3), -2) / 100" % field_name
else:
# EXTRACT returns 1-53 based on ISO-8601 for the week number.
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = {
'year': '%%Y-01-01',
'month': '%%Y-%%m-01',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str)
elif lookup_type == 'quarter':
return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % (
field_name, field_name
)
elif lookup_type == 'week':
return "DATE_SUB(%s, INTERVAL WEEKDAY(%s) DAY)" % (
field_name, field_name
)
else:
return "DATE(%s)" % (field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', '%s')" % (field_name, tzname)
return field_name
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "DATE(%s)" % field_name
def datetime_cast_time_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "TIME(%s)" % field_name
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
if lookup_type == 'quarter':
return (
"CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + "
"INTERVAL QUARTER({field_name}) QUARTER - " +
"INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)"
).format(field_name=field_name)
if lookup_type == 'week':
return (
"CAST(DATE_FORMAT(DATE_SUB({field_name}, "
"INTERVAL WEEKDAY({field_name}) DAY), "
"'%%Y-%%m-%%d 00:00:00') AS DATETIME)"
).format(field_name=field_name)
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join(format[:i] + format_def[i:])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def time_trunc_sql(self, lookup_type, field_name):
fields = {
'hour': '%%H:00:00',
'minute': '%%H:%%i:00',
'second': '%%H:%%i:%%s',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str)
else:
return "TIME(%s)" % (field_name)
def date_interval_sql(self, timedelta):
return 'INTERVAL %s MICROSECOND' % duration_microseconds(timedelta)
def format_for_duration_arithmetic(self, sql):
return 'INTERVAL %s MICROSECOND' % sql
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
query = getattr(cursor, '_executed', None)
if query is not None:
query = query.decode(errors='replace')
return query
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
return str(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return str(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def combine_expression(self, connector, sub_expressions):
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
# Convert the result to a signed integer since MySQL's binary operators
# return an unsigned integer.
elif connector in ('&', '|', '<<'):
return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions)
elif connector == '>>':
lhs, rhs = sub_expressions
return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
return super().combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
if settings.USE_TZ:
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_booleanfield_value(self, value, expression, connection):
if value in (0, 1):
value = bool(value)
return value
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
def binary_placeholder_sql(self, value):
return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s'
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if internal_type == 'TimeField':
if self.connection.mysql_is_mariadb:
# MariaDB includes the microsecond component in TIME_TO_SEC as
# a decimal. MySQL returns an integer without microseconds.
return 'CAST((TIME_TO_SEC(%(lhs)s) - TIME_TO_SEC(%(rhs)s)) * 1000000 AS SIGNED)' % {
'lhs': lhs_sql, 'rhs': rhs_sql
}, lhs_params + rhs_params
return (
"((TIME_TO_SEC(%(lhs)s) * 1000000 + MICROSECOND(%(lhs)s)) -"
" (TIME_TO_SEC(%(rhs)s) * 1000000 + MICROSECOND(%(rhs)s)))"
) % {'lhs': lhs_sql, 'rhs': rhs_sql}, lhs_params * 2 + rhs_params * 2
else:
return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), rhs_params + lhs_params
def explain_query_prefix(self, format=None, **options):
# Alias MySQL's TRADITIONAL to TEXT for consistency with other backends.
if format and format.upper() == 'TEXT':
format = 'TRADITIONAL'
prefix = super().explain_query_prefix(format, **options)
if format:
prefix += ' FORMAT=%s' % format
if self.connection.features.needs_explain_extended and format is None:
# EXTENDED and FORMAT are mutually exclusive options.
prefix += ' EXTENDED'
return prefix
def regex_lookup(self, lookup_type):
# REGEXP BINARY doesn't work correctly in MySQL 8+ and REGEXP_LIKE
# doesn't exist in MySQL 5.6 or in MariaDB.
if self.connection.mysql_version < (8, 0, 0) or self.connection.mysql_is_mariadb:
if lookup_type == 'regex':
return '%s REGEXP BINARY %s'
return '%s REGEXP %s'
match_option = 'c' if lookup_type == 'regex' else 'i'
return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option
def insert_statement(self, ignore_conflicts=False):
return 'INSERT IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts)
|
760dc0aa485ca50fc27a548c568ba7beab9e795c253ed1830f3101226a7d6eaf | import time
from importlib import import_module
from django.apps import apps
from django.core.checks import Tags, run_checks
from django.core.management.base import (
BaseCommand, CommandError, no_translations,
)
from django.core.management.sql import (
emit_post_migrate_signal, emit_pre_migrate_signal,
)
from django.db import DEFAULT_DB_ALIAS, connections, router
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.db.migrations.state import ModelState, ProjectState
from django.utils.module_loading import module_has_submodule
from django.utils.text import Truncator
class Command(BaseCommand):
help = "Updates database schema. Manages both apps with migrations and those without."
def add_arguments(self, parser):
parser.add_argument(
'app_label', nargs='?',
help='App label of an application to synchronize the state.',
)
parser.add_argument(
'migration_name', nargs='?',
help='Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.',
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.',
)
parser.add_argument(
'--fake', action='store_true',
help='Mark migrations as run without actually running them.',
)
parser.add_argument(
'--fake-initial', action='store_true',
help='Detect if tables already exist and fake-apply initial migrations if so. Make sure '
'that the current database schema matches your initial migration before using this '
'flag. Django will only check for an existing table name.',
)
parser.add_argument(
'--plan', action='store_true',
help='Shows a list of the migration actions that will be performed.',
)
parser.add_argument(
'--run-syncdb', action='store_true',
help='Creates tables for apps without migrations.',
)
def _run_checks(self, **kwargs):
issues = run_checks(tags=[Tags.database])
issues.extend(super()._run_checks(**kwargs))
return issues
@no_translations
def handle(self, *args, **options):
self.verbosity = options['verbosity']
self.interactive = options['interactive']
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options['database']
connection = connections[db]
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Raise an error if any migrations are applied before their dependencies.
executor.loader.check_consistent_history(connection)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
run_syncdb = options['run_syncdb']
target_app_labels_only = True
if options['app_label']:
# Validate app_label.
app_label = options['app_label']
try:
apps.get_app_config(app_label)
except LookupError as err:
raise CommandError(str(err))
if run_syncdb:
if app_label in executor.loader.migrated_apps:
raise CommandError("Can't use run_syncdb with app '%s' as it has migrations." % app_label)
elif app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations." % app_label)
if options['app_label'] and options['migration_name']:
migration_name = options['migration_name']
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." %
(migration_name, app_label)
)
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (
migration_name, app_label))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif options['app_label']:
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
plan = executor.migration_plan(targets)
if options['plan']:
self.stdout.write('Planned operations:', self.style.MIGRATE_LABEL)
if not plan:
self.stdout.write(' No planned migration operations.')
for migration, backwards in plan:
self.stdout.write(str(migration), self.style.MIGRATE_HEADING)
for operation in migration.operations:
message, is_error = self.describe_operation(operation, backwards)
style = self.style.WARNING if is_error else None
self.stdout.write(' ' + message, style)
return
# At this point, ignore run_syncdb if there aren't any apps to sync.
run_syncdb = options['run_syncdb'] and executor.loader.unmigrated_apps
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
if options['app_label']:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated app: %s" % app_label)
)
else:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") +
(", ".join(sorted(executor.loader.unmigrated_apps)))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ") +
(", ".join(sorted({a for a, n in targets})) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(
" Unapply all migrations: ") + "%s" % (targets[0][0],)
)
else:
self.stdout.write(self.style.MIGRATE_LABEL(
" Target specific migration: ") + "%s, from %s"
% (targets[0][1], targets[0][0])
)
pre_migrate_state = executor._create_project_state(with_applied_migrations=True)
pre_migrate_apps = pre_migrate_state.apps
emit_pre_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=pre_migrate_apps, plan=plan,
)
# Run the syncdb phase.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
if options['app_label']:
self.sync_apps(connection, [app_label])
else:
self.sync_apps(connection, executor.loader.unmigrated_apps)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(
" Your models have changes that are not yet reflected "
"in a migration, and so won't be applied."
))
self.stdout.write(self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
))
fake = False
fake_initial = False
else:
fake = options['fake']
fake_initial = options['fake_initial']
post_migrate_state = executor.migrate(
targets, plan=plan, state=pre_migrate_state.clone(), fake=fake,
fake_initial=fake_initial,
)
# post_migrate signals have access to all models. Ensure that all models
# are reloaded in case any are delayed.
post_migrate_state.clear_delayed_apps_cache()
post_migrate_apps = post_migrate_state.apps
# Re-render models of real apps to include relationships now that
# we've got a final state. This wouldn't be necessary if real apps
# models were rendered with relationships in the first place.
with post_migrate_apps.bulk_update():
model_keys = []
for model_state in post_migrate_apps.real_models:
model_key = model_state.app_label, model_state.name_lower
model_keys.append(model_key)
post_migrate_apps.unregister_model(*model_key)
post_migrate_apps.render_multiple([
ModelState.from_model(apps.get_model(*model)) for model in model_keys
])
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=post_migrate_apps, plan=plan,
)
def migration_progress_callback(self, action, migration=None, fake=False):
if self.verbosity >= 1:
compute_time = self.verbosity > 1
if action == "apply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "unapply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "render_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Rendering model states...", ending="")
self.stdout.flush()
elif action == "render_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
self.stdout.write(self.style.SUCCESS(" DONE" + elapsed))
def sync_apps(self, connection, app_labels):
"""Run the old syncdb-style operation on a list of app_labels."""
with connection.cursor() as cursor:
tables = connection.introspection.table_names(cursor)
# Build the manifest of apps and models that are to be synchronized.
all_models = [
(
app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=False),
)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.identifier_converter
return not (
(converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables)
)
manifest = {
app_name: list(filter(model_installed, model_list))
for app_name, model_list in all_models
}
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...\n")
with connection.schema_editor() as editor:
for app_name, model_list in manifest.items():
for model in model_list:
# Never install unmanaged models, etc.
if not model._meta.can_migrate(connection):
continue
if self.verbosity >= 3:
self.stdout.write(
" Processing %s.%s model\n" % (app_name, model._meta.object_name)
)
if self.verbosity >= 1:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
editor.create_model(model)
# Deferred SQL is executed when exiting the editor's context.
if self.verbosity >= 1:
self.stdout.write(" Running deferred SQL...\n")
@staticmethod
def describe_operation(operation, backwards):
"""Return a string that describes a migration operation for --plan."""
prefix = ''
if hasattr(operation, 'code'):
code = operation.reverse_code if backwards else operation.code
action = code.__doc__ if code else ''
elif hasattr(operation, 'sql'):
action = operation.reverse_sql if backwards else operation.sql
else:
action = ''
if backwards:
prefix = 'Undo '
if action is None:
action = 'IRREVERSIBLE'
is_error = True
else:
action = str(action).replace('\n', '')
is_error = False
if action:
action = ' -> ' + action
truncated = Truncator(action)
return prefix + operation.describe() + truncated.chars(40), is_error
|
3134c62c08b927b15f749d8f473f56dab6be66be026ce1bfa79ee13bdff51b17 | import functools
import glob
import gzip
import os
import sys
import warnings
import zipfile
from itertools import product
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.utils import parse_apps_and_model_labels
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router,
transaction,
)
from django.utils.functional import cached_property
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
READ_STDIN = '-'
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
missing_args_message = (
"No database fixture specified. Please provide the path of at least "
"one fixture in the command line."
)
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='+', help='Fixture labels.')
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to load fixtures into. Defaults to the "default" database.',
)
parser.add_argument(
'--app', dest='app_label',
help='Only look for fixtures in the specified app.',
)
parser.add_argument(
'--ignorenonexistent', '-i', action='store_true', dest='ignore',
help='Ignores entries in the serialized data for fields that do not '
'currently exist on the model.',
)
parser.add_argument(
'-e', '--exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude. Can be used multiple times.',
)
parser.add_argument(
'--format',
help='Format of serialized data when reading from stdin.',
)
def handle(self, *fixture_labels, **options):
self.ignore = options['ignore']
self.using = options['database']
self.app_label = options['app_label']
self.verbosity = options['verbosity']
self.excluded_models, self.excluded_apps = parse_apps_and_model_labels(options['exclude'])
self.format = options['format']
with transaction.atomic(using=self.using):
self.loaddata(fixture_labels)
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
self.serialization_formats = serializers.get_public_serializer_formats()
# Forcing binary mode may be revisited after dropping Python 2 support (see #22399)
self.compression_formats = {
None: (open, 'rb'),
'gz': (gzip.GzipFile, 'rb'),
'zip': (SingleZipReader, 'r'),
'stdin': (lambda *args: sys.stdin, None),
}
if has_bz2:
self.compression_formats['bz2'] = (bz2.BZ2File, 'r')
# Django's test suite repeatedly tries to load initial_data fixtures
# from apps that don't have any fixtures. Because disabling constraint
# checks can be expensive on some database (especially MSSQL), bail
# out early if no fixtures are found.
for fixture_label in fixture_labels:
if self.find_fixtures(fixture_label):
break
else:
return
with connection.constraint_checks_disabled():
self.objs_with_deferred_fields = []
for fixture_label in fixture_labels:
self.load_label(fixture_label)
for obj in self.objs_with_deferred_fields:
obj.save_deferred_fields(using=self.using)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if self.verbosity >= 2:
self.stdout.write("Resetting sequences\n")
with connection.cursor() as cursor:
for line in sequence_sql:
cursor.execute(line)
if self.verbosity >= 1:
if self.fixture_object_count == self.loaded_object_count:
self.stdout.write(
"Installed %d object(s) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_count)
)
else:
self.stdout.write(
"Installed %d object(s) (of %d) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_object_count, self.fixture_count)
)
def load_label(self, fixture_label):
"""Load fixtures files for a given label."""
show_progress = self.verbosity >= 3
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
try:
self.fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if self.verbosity >= 2:
self.stdout.write(
"Installing %s fixture '%s' from %s."
% (ser_fmt, fixture_name, humanize(fixture_dir))
)
objects = serializers.deserialize(
ser_fmt, fixture, using=self.using, ignorenonexistent=self.ignore,
handle_forward_references=True,
)
for obj in objects:
objects_in_fixture += 1
if (obj.object._meta.app_config in self.excluded_apps or
type(obj.object) in self.excluded_models):
continue
if router.allow_migrate_model(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
obj.save(using=self.using)
if show_progress:
self.stdout.write(
'\rProcessed %i object(s).' % loaded_objects_in_fixture,
ending=''
)
# psycopg2 raises ValueError if data contains NUL chars.
except (DatabaseError, IntegrityError, ValueError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': e,
},)
raise
if obj.deferred_fields:
self.objs_with_deferred_fields.append(obj)
if objects and show_progress:
self.stdout.write('') # add a newline after progress indicator
self.loaded_object_count += loaded_objects_in_fixture
self.fixture_object_count += objects_in_fixture
except Exception as e:
if not isinstance(e, CommandError):
e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),)
raise
finally:
fixture.close()
# Warn if the fixture we loaded contains 0 objects.
if objects_in_fixture == 0:
warnings.warn(
"No fixture data found for '%s'. (File format may be "
"invalid.)" % fixture_name,
RuntimeWarning
)
@functools.lru_cache(maxsize=None)
def find_fixtures(self, fixture_label):
"""Find fixture files for a given label."""
if fixture_label == READ_STDIN:
return [(READ_STDIN, None, READ_STDIN)]
fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
databases = [self.using, None]
cmp_fmts = list(self.compression_formats) if cmp_fmt is None else [cmp_fmt]
ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt]
if self.verbosity >= 2:
self.stdout.write("Loading '%s' fixtures..." % fixture_name)
if os.path.isabs(fixture_name):
fixture_dirs = [os.path.dirname(fixture_name)]
fixture_name = os.path.basename(fixture_name)
else:
fixture_dirs = self.fixture_dirs
if os.path.sep in os.path.normpath(fixture_name):
fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
for dir_ in fixture_dirs]
fixture_name = os.path.basename(fixture_name)
suffixes = (
'.'.join(ext for ext in combo if ext)
for combo in product(databases, ser_fmts, cmp_fmts)
)
targets = {'.'.join((fixture_name, suffix)) for suffix in suffixes}
fixture_files = []
for fixture_dir in fixture_dirs:
if self.verbosity >= 2:
self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
fixture_files_in_dir = []
path = os.path.join(fixture_dir, fixture_name)
for candidate in glob.iglob(glob.escape(path) + '*'):
if os.path.basename(candidate) in targets:
# Save the fixture_dir and fixture_name for future error messages.
fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))
if self.verbosity >= 2 and not fixture_files_in_dir:
self.stdout.write("No fixture '%s' in %s." %
(fixture_name, humanize(fixture_dir)))
# Check kept for backwards-compatibility; it isn't clear why
# duplicates are only allowed in different directories.
if len(fixture_files_in_dir) > 1:
raise CommandError(
"Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
fixture_files.extend(fixture_files_in_dir)
if not fixture_files:
raise CommandError("No fixture named '%s' found." % fixture_name)
return fixture_files
@cached_property
def fixture_dirs(self):
"""
Return a list of fixture directories.
The list contains the 'fixtures' subdirectory of each installed
application, if it exists, the directories in FIXTURE_DIRS, and the
current directory.
"""
dirs = []
fixture_dirs = settings.FIXTURE_DIRS
if len(fixture_dirs) != len(set(fixture_dirs)):
raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.")
for app_config in apps.get_app_configs():
app_label = app_config.label
app_dir = os.path.join(app_config.path, 'fixtures')
if app_dir in fixture_dirs:
raise ImproperlyConfigured(
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label)
)
if self.app_label and app_label != self.app_label:
continue
if os.path.isdir(app_dir):
dirs.append(app_dir)
dirs.extend(fixture_dirs)
dirs.append('')
dirs = [os.path.abspath(os.path.realpath(d)) for d in dirs]
return dirs
def parse_name(self, fixture_name):
"""
Split fixture name in name, serialization format, compression format.
"""
if fixture_name == READ_STDIN:
if not self.format:
raise CommandError('--format must be specified when reading from stdin.')
return READ_STDIN, self.format, 'stdin'
parts = fixture_name.rsplit('.', 2)
if len(parts) > 1 and parts[-1] in self.compression_formats:
cmp_fmt = parts[-1]
parts = parts[:-1]
else:
cmp_fmt = None
if len(parts) > 1:
if parts[-1] in self.serialization_formats:
ser_fmt = parts[-1]
parts = parts[:-1]
else:
raise CommandError(
"Problem installing fixture '%s': %s is not a known "
"serialization format." % ('.'.join(parts[:-1]), parts[-1]))
else:
ser_fmt = None
name = '.'.join(parts)
return name, ser_fmt, cmp_fmt
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if len(self.namelist()) != 1:
raise ValueError("Zip-compressed fixtures must contain one file.")
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
def humanize(dirname):
return "'%s'" % dirname if dirname else 'absolute path'
|
a0bb0ded71e42445f0a8ca7b2ccfe6717618fadf10d9d4a02dfa43ce4d587e97 | import sys
from django.apps import apps
from django.core.management.base import BaseCommand
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.loader import MigrationLoader
class Command(BaseCommand):
help = "Shows all available migrations for the current project"
def add_arguments(self, parser):
parser.add_argument(
'app_label', nargs='*',
help='App labels of applications to limit the output to.',
)
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.',
)
formats = parser.add_mutually_exclusive_group()
formats.add_argument(
'--list', '-l', action='store_const', dest='format', const='list',
help='Shows a list of all migrations and which are applied.',
)
formats.add_argument(
'--plan', '-p', action='store_const', dest='format', const='plan',
help=(
'Shows all migrations in the order they will be applied. '
'With a verbosity level of 2 or above all direct migration dependencies '
'and reverse dependencies (run_before) will be included.'
)
)
parser.set_defaults(format='list')
def handle(self, *args, **options):
self.verbosity = options['verbosity']
# Get the database we're operating from
db = options['database']
connection = connections[db]
if options['format'] == "plan":
return self.show_plan(connection, options['app_label'])
else:
return self.show_list(connection, options['app_label'])
def _validate_app_names(self, loader, app_names):
has_bad_names = False
for app_name in app_names:
try:
apps.get_app_config(app_name)
except LookupError as err:
self.stderr.write(str(err))
has_bad_names = True
if has_bad_names:
sys.exit(2)
def show_list(self, connection, app_names=None):
"""
Show a list of all migrations on the system, or only those of
some named apps.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection, ignore_no_migrations=True)
graph = loader.graph
# If we were passed a list of apps, validate it
if app_names:
self._validate_app_names(loader, app_names)
# Otherwise, show all apps in alphabetic order
else:
app_names = sorted(loader.migrated_apps)
# For each app, print its migrations in order from oldest (roots) to
# newest (leaves).
for app_name in app_names:
self.stdout.write(app_name, self.style.MIGRATE_LABEL)
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
# Mark it as applied/unapplied
if plan_node in loader.applied_migrations:
self.stdout.write(" [X] %s" % title)
else:
self.stdout.write(" [ ] %s" % title)
shown.add(plan_node)
# If we didn't print anything, then a small message
if not shown:
self.stdout.write(" (no migrations)", self.style.ERROR)
def show_plan(self, connection, app_names=None):
"""
Show all known migrations (or only those of the specified app_names)
in the order they will be applied.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection)
graph = loader.graph
if app_names:
self._validate_app_names(loader, app_names)
targets = [key for key in graph.leaf_nodes() if key[0] in app_names]
else:
targets = graph.leaf_nodes()
plan = []
seen = set()
# Generate the plan
for target in targets:
for migration in graph.forwards_plan(target):
if migration not in seen:
node = graph.node_map[migration]
plan.append(node)
seen.add(migration)
# Output
def print_deps(node):
out = []
for parent in sorted(node.parents):
out.append("%s.%s" % parent.key)
if out:
return " ... (%s)" % ", ".join(out)
return ""
for node in plan:
deps = ""
if self.verbosity >= 2:
deps = print_deps(node)
if node.key in loader.applied_migrations:
self.stdout.write("[X] %s.%s%s" % (node.key[0], node.key[1], deps))
else:
self.stdout.write("[ ] %s.%s%s" % (node.key[0], node.key[1], deps))
if not plan:
self.stdout.write('(no migrations)', self.style.ERROR)
|
2daebc74657e2e3c816cddece99af295a1c74476e08187eb65de4f6d3cf19c9b | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString,
OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils.encoding import force_str
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping:
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {
1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.BigAutoField: OFTInteger64,
models.BooleanField: (OFTInteger, OFTReal, OFTString),
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.UUIDField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, str):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"Check the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
Check the Layer metadata and ensure that it's compatible with the
mapping information and model. Unlike previous revisions, there is no
need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.remote_field.model
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Check the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, str)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Check the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, str):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, return a dictionary of keyword arguments for
constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`), construct
and return the uniqueness keyword arguments -- a subset of the feature
kwargs.
"""
if isinstance(self.unique, str):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verify if the OGR Field contents are acceptable to the model field. If
they are, return the verified value, otherwise raise an exception.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding and ogr_field.value is not None:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_str(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and val is not None and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
retrieve the related model for the ForeignKey mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verify the geometry -- construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Return the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as exc:
raise LayerMapError(
'Could not translate between the data source and model geometry.'
) from exc
def geometry_field(self):
"Return the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Save the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom_value = getattr(m, self.geom_field)
if geom_value is None:
geom = OGRGeometry(kwargs[self.geom_field])
else:
geom = geom_value.ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except Exception: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
|
71d0e5ba1d3b8dd2f4615c39aed9508781ba4c13055b307ad6eec77977367387 | from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import (
BaseSpatialOperations,
)
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geos.geometry import GEOSGeometryBase
from django.contrib.gis.geos.prototypes.io import wkb_r
from django.contrib.gis.measure import Distance
from django.db.backends.mysql.operations import DatabaseOperations
from django.utils.functional import cached_property
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
mysql = True
name = 'mysql'
geom_func_prefix = 'ST_'
Adapter = WKTAdapter
@cached_property
def select(self):
return self.geom_func_prefix + 'AsBinary(%s)'
@cached_property
def from_text(self):
return self.geom_func_prefix + 'GeomFromText'
@cached_property
def gis_operators(self):
return {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # ...
'contained': SpatialOperator(func='MBRWithin'), # ...
'contains': SpatialOperator(func='ST_Contains'),
'crosses': SpatialOperator(func='ST_Crosses'),
'disjoint': SpatialOperator(func='ST_Disjoint'),
'equals': SpatialOperator(func='ST_Equals'),
'exact': SpatialOperator(func='ST_Equals'),
'intersects': SpatialOperator(func='ST_Intersects'),
'overlaps': SpatialOperator(func='ST_Overlaps'),
'same_as': SpatialOperator(func='ST_Equals'),
'touches': SpatialOperator(func='ST_Touches'),
'within': SpatialOperator(func='ST_Within'),
}
disallowed_aggregates = (
aggregates.Collect, aggregates.Extent, aggregates.Extent3D,
aggregates.MakeLine, aggregates.Union,
)
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGML', 'AsKML', 'AsSVG', 'Azimuth', 'BoundingCircle',
'ForcePolygonCW', 'LineLocatePoint', 'MakeValid', 'MemSize',
'Perimeter', 'PointOnSurface', 'Reverse', 'Scale', 'SnapToGrid',
'Transform', 'Translate',
}
if self.connection.mysql_is_mariadb:
unsupported.update({'GeoHash', 'IsValid'})
if self.connection.mysql_version < (10, 2, 4):
unsupported.add('AsGeoJSON')
elif self.connection.mysql_version < (5, 7, 5):
unsupported.update({'AsGeoJSON', 'GeoHash', 'IsValid'})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_distance(self, f, value, lookup_type):
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError(
'Only numeric values of degree units are allowed on '
'geodetic distance queries.'
)
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geometry_converter(self, expression):
read = wkb_r().read
srid = expression.output_field.srid
if srid == -1:
srid = None
geom_class = expression.output_field.geom_class
def converter(value, expression, connection):
if value is not None:
geom = GEOSGeometryBase(read(memoryview(value)), geom_class)
if srid:
geom.srid = srid
return geom
return converter
|
f7487761b0ee4d77672caccc4d3d0f07c31e6873b38f8d3791a8d6033dcf31eb | import datetime
import importlib
import io
import os
import sys
from unittest import mock
from django.apps import apps
from django.core.management import CommandError, call_command
from django.db import (
ConnectionHandler, DatabaseError, connection, connections, models,
)
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.utils import truncate_name
from django.db.migrations.exceptions import InconsistentMigrationHistory
from django.db.migrations.recorder import MigrationRecorder
from django.test import TestCase, override_settings
from .models import UnicodeModel, UnserializableModel
from .routers import TestRouter
from .test_base import MigrationTestBase
class MigrateTests(MigrationTestBase):
"""
Tests running the migrate command.
"""
databases = {'default', 'other'}
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate(self):
"""
Tests basic usage of the migrate command.
"""
# No tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run the migrations to 0001 only
stdout = io.StringIO()
call_command('migrate', 'migrations', '0001', verbosity=1, stdout=stdout, no_color=True)
stdout = stdout.getvalue()
self.assertIn('Target specific migration: 0001_initial, from migrations', stdout)
self.assertIn('Applying migrations.0001_initial... OK', stdout)
# The correct tables exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run migrations all the way
call_command("migrate", verbosity=0)
# The correct tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Unmigrate everything
stdout = io.StringIO()
call_command('migrate', 'migrations', 'zero', verbosity=1, stdout=stdout, no_color=True)
stdout = stdout.getvalue()
self.assertIn('Unapply all migrations: migrations', stdout)
self.assertIn('Unapplying migrations.0002_second... OK', stdout)
# Tables are gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'migrations.migrations_test_apps.migrated_app',
])
def test_migrate_with_system_checks(self):
out = io.StringIO()
call_command('migrate', skip_checks=False, no_color=True, stdout=out)
self.assertIn('Apply all migrations: migrated_app', out.getvalue())
@override_settings(INSTALLED_APPS=['migrations', 'migrations.migrations_test_apps.unmigrated_app_syncdb'])
def test_app_without_migrations(self):
msg = "App 'unmigrated_app_syncdb' does not have migrations."
with self.assertRaisesMessage(CommandError, msg):
call_command('migrate', app_label='unmigrated_app_syncdb')
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_clashing_prefix'})
def test_ambigious_prefix(self):
msg = (
"More than one migration matches 'a' in app 'migrations'. Please "
"be more specific."
)
with self.assertRaisesMessage(CommandError, msg):
call_command('migrate', app_label='migrations', migration_name='a')
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'})
def test_unknown_prefix(self):
msg = "Cannot find a migration matching 'nonexistent' from app 'migrations'."
with self.assertRaisesMessage(CommandError, msg):
call_command('migrate', app_label='migrations', migration_name='nonexistent')
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_initial_false"})
def test_migrate_initial_false(self):
"""
`Migration.initial = False` skips fake-initial detection.
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
# Fake rollback
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
# Make sure fake-initial detection does not run
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", "0001", fake_initial=True, verbosity=0)
call_command("migrate", "migrations", "0001", fake=True, verbosity=0)
# Real rollback
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations"},
DATABASE_ROUTERS=['migrations.routers.TestRouter'],
)
def test_migrate_fake_initial(self):
"""
--fake-initial only works if all tables created in the initial
migration of an app exists. Database routers must be obeyed when doing
that check.
"""
# Make sure no tables are created
for db in connections:
self.assertTableNotExists("migrations_author", using=db)
self.assertTableNotExists("migrations_tribble", using=db)
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
call_command("migrate", "migrations", "0001", verbosity=0, database="other")
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Also check the "other" database
self.assertTableNotExists("migrations_author", using="other")
self.assertTableExists("migrations_tribble", using="other")
# Fake a roll-back
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
call_command("migrate", "migrations", "zero", fake=True, verbosity=0, database="other")
# Make sure the tables still exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble", using="other")
# Try to run initial migration
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", "0001", verbosity=0)
# Run initial migration with an explicit --fake-initial
out = io.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: False):
call_command("migrate", "migrations", "0001", fake_initial=True, stdout=out, verbosity=1)
call_command("migrate", "migrations", "0001", fake_initial=True, verbosity=0, database="other")
self.assertIn(
"migrations.0001_initial... faked",
out.getvalue().lower()
)
# Run migrations all the way
call_command("migrate", verbosity=0)
call_command("migrate", verbosity=0, database="other")
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
self.assertTableNotExists("migrations_author", using="other")
self.assertTableNotExists("migrations_tribble", using="other")
self.assertTableNotExists("migrations_book", using="other")
# Fake a roll-back
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
call_command("migrate", "migrations", "zero", fake=True, verbosity=0, database="other")
# Make sure the tables still exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Try to run initial migration
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", verbosity=0)
# Run initial migration with an explicit --fake-initial
with self.assertRaises(DatabaseError):
# Fails because "migrations_tribble" does not exist but needs to in
# order to make --fake-initial work.
call_command("migrate", "migrations", fake_initial=True, verbosity=0)
# Fake an apply
call_command("migrate", "migrations", fake=True, verbosity=0)
call_command("migrate", "migrations", fake=True, verbosity=0, database="other")
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
call_command("migrate", "migrations", "zero", verbosity=0, database="other")
# Make sure it's all gone
for db in connections:
self.assertTableNotExists("migrations_author", using=db)
self.assertTableNotExists("migrations_tribble", using=db)
self.assertTableNotExists("migrations_book", using=db)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_fake_split_initial"})
def test_migrate_fake_split_initial(self):
"""
Split initial migrations can be faked with --fake-initial.
"""
call_command("migrate", "migrations", "0002", verbosity=0)
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
out = io.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: False):
call_command("migrate", "migrations", "0002", fake_initial=True, stdout=out, verbosity=1)
value = out.getvalue().lower()
self.assertIn("migrations.0001_initial... faked", value)
self.assertIn("migrations.0002_second... faked", value)
# Fake an apply
call_command("migrate", "migrations", fake=True, verbosity=0)
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_migrate_conflict_exit(self):
"""
migrate exits if it detects a conflict.
"""
with self.assertRaisesMessage(CommandError, "Conflicting migrations detected"):
call_command("migrate", "migrations")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_showmigrations_list(self):
"""
showmigrations --list displays migrations and whether or not they're
applied.
"""
out = io.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: True):
call_command("showmigrations", format='list', stdout=out, verbosity=0, no_color=False)
self.assertEqual(
'\x1b[1mmigrations\n\x1b[0m'
' [ ] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
call_command("migrate", "migrations", "0001", verbosity=0)
out = io.StringIO()
# Giving the explicit app_label tests for selective `show_list` in the command
call_command("showmigrations", "migrations", format='list', stdout=out, verbosity=0, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_showmigrations_plan(self):
"""
Tests --plan output of showmigrations command
"""
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third\n"
"[ ] migrations.0002_second\n",
out.getvalue().lower()
)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial, migrations.0003_third)\n",
out.getvalue().lower()
)
call_command("migrate", "migrations", "0003", verbosity=0)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third\n"
"[ ] migrations.0002_second\n",
out.getvalue().lower()
)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial, migrations.0003_third)\n",
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_plan'})
def test_migrate_plan(self):
"""Tests migrate --plan output."""
out = io.StringIO()
# Show the plan up to the third migration.
call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True)
self.assertEqual(
'Planned operations:\n'
'migrations.0001_initial\n'
' Create model Salamander\n'
' Raw Python operation -> Grow salamander tail.\n'
'migrations.0002_second\n'
' Create model Book\n'
" Raw SQL operation -> ['SELECT * FROM migrations_book']\n"
'migrations.0003_third\n'
' Create model Author\n'
" Raw SQL operation -> ['SELECT * FROM migrations_author']\n",
out.getvalue()
)
# Migrate to the third migration.
call_command('migrate', 'migrations', '0003', verbosity=0)
out = io.StringIO()
# Show the plan for when there is nothing to apply.
call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True)
self.assertEqual(
'Planned operations:\n'
' No planned migration operations.\n',
out.getvalue()
)
out = io.StringIO()
# Show the plan for reverse migration back to 0001.
call_command('migrate', 'migrations', '0001', plan=True, stdout=out, no_color=True)
self.assertEqual(
'Planned operations:\n'
'migrations.0003_third\n'
' Undo Create model Author\n'
" Raw SQL operation -> ['SELECT * FROM migrations_book']\n"
'migrations.0002_second\n'
' Undo Create model Book\n'
" Raw SQL operation -> ['SELECT * FROM migrations_salamand…\n",
out.getvalue()
)
out = io.StringIO()
# Show the migration plan to fourth, with truncated details.
call_command('migrate', 'migrations', '0004', plan=True, stdout=out, no_color=True)
self.assertEqual(
'Planned operations:\n'
'migrations.0004_fourth\n'
' Raw SQL operation -> SELECT * FROM migrations_author WHE…\n',
out.getvalue()
)
# Show the plan when an operation is irreversible.
# Migrate to the fourth migration.
call_command('migrate', 'migrations', '0004', verbosity=0)
out = io.StringIO()
call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True)
self.assertEqual(
'Planned operations:\n'
'migrations.0004_fourth\n'
' Raw SQL operation -> IRREVERSIBLE\n',
out.getvalue()
)
# Cleanup by unmigrating everything: fake the irreversible, then
# migrate all to zero.
call_command('migrate', 'migrations', '0003', fake=True, verbosity=0)
call_command('migrate', 'migrations', 'zero', verbosity=0)
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_empty'})
def test_showmigrations_no_migrations(self):
out = io.StringIO()
call_command('showmigrations', stdout=out, no_color=True)
self.assertEqual('migrations\n (no migrations)\n', out.getvalue().lower())
@override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app'])
def test_showmigrations_unmigrated_app(self):
out = io.StringIO()
call_command('showmigrations', 'unmigrated_app', stdout=out, no_color=True)
self.assertEqual('unmigrated_app\n (no migrations)\n', out.getvalue().lower())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"})
def test_showmigrations_plan_no_migrations(self):
"""
Tests --plan output of showmigrations command without migrations
"""
out = io.StringIO()
call_command('showmigrations', format='plan', stdout=out, no_color=True)
self.assertEqual('(no migrations)\n', out.getvalue().lower())
out = io.StringIO()
call_command('showmigrations', format='plan', stdout=out, verbosity=2, no_color=True)
self.assertEqual('(no migrations)\n', out.getvalue().lower())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_showmigrations_plan_squashed(self):
"""
Tests --plan output of showmigrations command with squashed migrations.
"""
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto\n"
"[ ] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower()
)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto ... (migrations.1_auto)\n"
"[ ] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower()
)
call_command("migrate", "migrations", "3_squashed_5", verbosity=0)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto\n"
"[x] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower()
)
out = io.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto ... (migrations.1_auto)\n"
"[x] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower()
)
@override_settings(INSTALLED_APPS=[
'migrations.migrations_test_apps.mutate_state_b',
'migrations.migrations_test_apps.alter_fk.author_app',
'migrations.migrations_test_apps.alter_fk.book_app',
])
def test_showmigrations_plan_single_app_label(self):
"""
`showmigrations --plan app_label` output with a single app_label.
"""
# Single app with no dependencies on other apps.
out = io.StringIO()
call_command('showmigrations', 'mutate_state_b', format='plan', stdout=out)
self.assertEqual(
'[ ] mutate_state_b.0001_initial\n'
'[ ] mutate_state_b.0002_add_field\n',
out.getvalue()
)
# Single app with dependencies.
out = io.StringIO()
call_command('showmigrations', 'author_app', format='plan', stdout=out)
self.assertEqual(
'[ ] author_app.0001_initial\n'
'[ ] book_app.0001_initial\n'
'[ ] author_app.0002_alter_id\n',
out.getvalue()
)
# Some migrations already applied.
call_command('migrate', 'author_app', '0001', verbosity=0)
out = io.StringIO()
call_command('showmigrations', 'author_app', format='plan', stdout=out)
self.assertEqual(
'[X] author_app.0001_initial\n'
'[ ] book_app.0001_initial\n'
'[ ] author_app.0002_alter_id\n',
out.getvalue()
)
# Cleanup by unmigrating author_app.
call_command('migrate', 'author_app', 'zero', verbosity=0)
@override_settings(INSTALLED_APPS=[
'migrations.migrations_test_apps.mutate_state_b',
'migrations.migrations_test_apps.alter_fk.author_app',
'migrations.migrations_test_apps.alter_fk.book_app',
])
def test_showmigrations_plan_multiple_app_labels(self):
"""
`showmigrations --plan app_label` output with multiple app_labels.
"""
# Multiple apps: author_app depends on book_app; mutate_state_b doesn't
# depend on other apps.
out = io.StringIO()
call_command('showmigrations', 'mutate_state_b', 'author_app', format='plan', stdout=out)
self.assertEqual(
'[ ] author_app.0001_initial\n'
'[ ] book_app.0001_initial\n'
'[ ] author_app.0002_alter_id\n'
'[ ] mutate_state_b.0001_initial\n'
'[ ] mutate_state_b.0002_add_field\n',
out.getvalue()
)
# Multiple apps: args order shouldn't matter (the same result is
# expected as above).
out = io.StringIO()
call_command('showmigrations', 'author_app', 'mutate_state_b', format='plan', stdout=out)
self.assertEqual(
'[ ] author_app.0001_initial\n'
'[ ] book_app.0001_initial\n'
'[ ] author_app.0002_alter_id\n'
'[ ] mutate_state_b.0001_initial\n'
'[ ] mutate_state_b.0002_add_field\n',
out.getvalue()
)
@override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app'])
def test_showmigrations_plan_app_label_no_migrations(self):
out = io.StringIO()
call_command('showmigrations', 'unmigrated_app', format='plan', stdout=out, no_color=True)
self.assertEqual('(no migrations)\n', out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_forwards(self):
"""
sqlmigrate outputs forward looking SQL.
"""
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
output = out.getvalue().lower()
index_tx_start = output.find(connection.ops.start_transaction_sql().lower())
index_op_desc_author = output.find('-- create model author')
index_create_table = output.find('create table')
index_op_desc_tribble = output.find('-- create model tribble')
index_op_desc_unique_together = output.find('-- alter unique_together')
index_tx_end = output.find(connection.ops.end_transaction_sql().lower())
self.assertGreater(index_tx_start, -1, "Transaction start not found")
self.assertGreater(
index_op_desc_author, index_tx_start,
"Operation description (author) not found or found before transaction start"
)
self.assertGreater(
index_create_table, index_op_desc_author,
"CREATE TABLE not found or found before operation description (author)"
)
self.assertGreater(
index_op_desc_tribble, index_create_table,
"Operation description (tribble) not found or found before CREATE TABLE (author)"
)
self.assertGreater(
index_op_desc_unique_together, index_op_desc_tribble,
"Operation description (unique_together) not found or found before operation description (tribble)"
)
self.assertGreater(
index_tx_end, index_op_desc_unique_together,
"Transaction end not found or found before operation description (unique_together)"
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_backwards(self):
"""
sqlmigrate outputs reverse looking SQL.
"""
# Cannot generate the reverse SQL unless we've applied the migration.
call_command("migrate", "migrations", verbosity=0)
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out, backwards=True)
output = out.getvalue().lower()
index_tx_start = output.find(connection.ops.start_transaction_sql().lower())
index_op_desc_unique_together = output.find('-- alter unique_together')
index_op_desc_tribble = output.find('-- create model tribble')
index_op_desc_author = output.find('-- create model author')
index_drop_table = output.rfind('drop table')
index_tx_end = output.find(connection.ops.end_transaction_sql().lower())
self.assertGreater(index_tx_start, -1, "Transaction start not found")
self.assertGreater(
index_op_desc_unique_together, index_tx_start,
"Operation description (unique_together) not found or found before transaction start"
)
self.assertGreater(
index_op_desc_tribble, index_op_desc_unique_together,
"Operation description (tribble) not found or found before operation description (unique_together)"
)
self.assertGreater(
index_op_desc_author, index_op_desc_tribble,
"Operation description (author) not found or found before operation description (tribble)"
)
self.assertGreater(
index_drop_table, index_op_desc_author,
"DROP TABLE not found or found before operation description (author)"
)
self.assertGreater(
index_tx_end, index_op_desc_unique_together,
"Transaction end not found or found before DROP TABLE"
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_non_atomic"})
def test_sqlmigrate_for_non_atomic_migration(self):
"""
Transaction wrappers aren't shown for non-atomic migrations.
"""
out = io.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
output = out.getvalue().lower()
queries = [q.strip() for q in output.splitlines()]
if connection.ops.start_transaction_sql():
self.assertNotIn(connection.ops.start_transaction_sql().lower(), queries)
self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.migrated_unapplied_app",
"migrations.migrations_test_apps.unmigrated_app",
],
)
def test_regression_22823_unmigrated_fk_to_migrated_model(self):
"""
Assuming you have 3 apps, `A`, `B`, and `C`, such that:
* `A` has migrations
* `B` has a migration we want to apply
* `C` has no migrations, but has an FK to `A`
When we try to migrate "B", an exception occurs because the
"B" was not included in the ProjectState that is used to detect
soft-applied migrations (#22823).
"""
call_command("migrate", "migrated_unapplied_app", stdout=io.StringIO())
# unmigrated_app.SillyModel has a foreign key to 'migrations.Tribble',
# but that model is only defined in a migration, so the global app
# registry never sees it and the reference is left dangling. Remove it
# to avoid problems in subsequent tests.
del apps._pending_operations[('migrations', 'tribble')]
@override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app_syncdb'])
def test_migrate_syncdb_deferred_sql_executed_with_schemaeditor(self):
"""
For an app without migrations, editor.execute() is used for executing
the syncdb deferred SQL.
"""
stdout = io.StringIO()
with mock.patch.object(BaseDatabaseSchemaEditor, 'execute') as execute:
call_command('migrate', run_syncdb=True, verbosity=1, stdout=stdout, no_color=True)
create_table_count = len([call for call in execute.mock_calls if 'CREATE TABLE' in str(call)])
self.assertEqual(create_table_count, 2)
# There's at least one deferred SQL for creating the foreign key
# index.
self.assertGreater(len(execute.mock_calls), 2)
stdout = stdout.getvalue()
self.assertIn('Synchronize unmigrated apps: unmigrated_app_syncdb', stdout)
self.assertIn('Creating tables...', stdout)
table_name = truncate_name('unmigrated_app_syncdb_classroom', connection.ops.max_name_length())
self.assertIn('Creating table %s' % table_name, stdout)
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'})
def test_migrate_syncdb_app_with_migrations(self):
msg = "Can't use run_syncdb with app 'migrations' as it has migrations."
with self.assertRaisesMessage(CommandError, msg):
call_command('migrate', 'migrations', run_syncdb=True, verbosity=0)
@override_settings(INSTALLED_APPS=[
'migrations.migrations_test_apps.unmigrated_app_syncdb',
'migrations.migrations_test_apps.unmigrated_app_simple',
])
def test_migrate_syncdb_app_label(self):
"""
Running migrate --run-syncdb with an app_label only creates tables for
the specified app.
"""
stdout = io.StringIO()
with mock.patch.object(BaseDatabaseSchemaEditor, 'execute') as execute:
call_command('migrate', 'unmigrated_app_syncdb', run_syncdb=True, stdout=stdout)
create_table_count = len([call for call in execute.mock_calls if 'CREATE TABLE' in str(call)])
self.assertEqual(create_table_count, 2)
self.assertGreater(len(execute.mock_calls), 2)
self.assertIn('Synchronize unmigrated app: unmigrated_app_syncdb', stdout.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_migrate_record_replaced(self):
"""
Running a single squashed migration should record all of the original
replaced migrations as run.
"""
recorder = MigrationRecorder(connection)
out = io.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_squashed_0002 (2 squashed migrations)\n',
out.getvalue().lower()
)
applied_migrations = recorder.applied_migrations()
self.assertIn(("migrations", "0001_initial"), applied_migrations)
self.assertIn(("migrations", "0002_second"), applied_migrations)
self.assertIn(("migrations", "0001_squashed_0002"), applied_migrations)
# Rollback changes
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_migrate_record_squashed(self):
"""
Running migrate for a squashed migration should record as run
if all of the replaced migrations have been run (#25231).
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
out = io.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_squashed_0002 (2 squashed migrations)\n',
out.getvalue().lower()
)
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations()
)
# No changes were actually applied so there is nothing to rollback
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'})
def test_migrate_inconsistent_history(self):
"""
Running migrate with some migrations applied before their dependencies
should not be allowed.
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0002_second")
msg = "Migration migrations.0002_second is applied before its dependency migrations.0001_initial"
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
call_command("migrate")
applied_migrations = recorder.applied_migrations()
self.assertNotIn(("migrations", "0001_initial"), applied_migrations)
class MakeMigrationsTests(MigrationTestBase):
"""
Tests running the makemigrations command.
"""
def setUp(self):
super().setUp()
self._old_models = apps.app_configs['migrations'].models.copy()
def tearDown(self):
apps.app_configs['migrations'].models = self._old_models
apps.all_models['migrations'] = self._old_models
apps.clear_cache()
super().tearDown()
def test_files_content(self):
self.assertTableNotExists("migrations_unicodemodel")
apps.register_model('migrations', UnicodeModel)
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", verbosity=0)
# Check for empty __init__.py file in migrations folder
init_file = os.path.join(migration_dir, "__init__.py")
self.assertTrue(os.path.exists(init_file))
with open(init_file) as fp:
content = fp.read()
self.assertEqual(content, '')
# Check for existing 0001_initial.py file in migration folder
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
with open(initial_file, encoding='utf-8') as fp:
content = fp.read()
self.assertIn('migrations.CreateModel', content)
self.assertIn('initial = True', content)
self.assertIn('úñí©óðé µóðéø', content) # Meta.verbose_name
self.assertIn('úñí©óðé µóðéøß', content) # Meta.verbose_name_plural
self.assertIn('ÚÑÍ¢ÓÐÉ', content) # title.verbose_name
self.assertIn('“Ðjáñgó”', content) # title.default
def test_makemigrations_order(self):
"""
makemigrations should recognize number-only migrations (0001.py).
"""
module = 'migrations.test_migrations_order'
with self.temporary_migration_module(module=module) as migration_dir:
if hasattr(importlib, 'invalidate_caches'):
# importlib caches os.listdir() on some platforms like macOS
# (#23850).
importlib.invalidate_caches()
call_command('makemigrations', 'migrations', '--empty', '-n', 'a', '-v', '0')
self.assertTrue(os.path.exists(os.path.join(migration_dir, '0002_a.py')))
def test_makemigrations_empty_connections(self):
empty_connections = ConnectionHandler({'default': {}})
with mock.patch('django.core.management.commands.makemigrations.connections', new=empty_connections):
# with no apps
out = io.StringIO()
call_command('makemigrations', stdout=out)
self.assertIn('No changes detected', out.getvalue())
# with an app
with self.temporary_migration_module() as migration_dir:
call_command('makemigrations', 'migrations', verbosity=0)
init_file = os.path.join(migration_dir, '__init__.py')
self.assertTrue(os.path.exists(init_file))
@override_settings(INSTALLED_APPS=['migrations', 'migrations2'])
def test_makemigrations_consistency_checks_respect_routers(self):
"""
The history consistency checks in makemigrations respect
settings.DATABASE_ROUTERS.
"""
def patched_has_table(migration_recorder):
if migration_recorder.connection is connections['other']:
raise Exception('Other connection')
else:
return mock.DEFAULT
self.assertTableNotExists('migrations_unicodemodel')
apps.register_model('migrations', UnicodeModel)
with mock.patch.object(
MigrationRecorder, 'has_table',
autospec=True, side_effect=patched_has_table) as has_table:
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", verbosity=0)
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
self.assertEqual(has_table.call_count, 1) # 'default' is checked
# Router says not to migrate 'other' so consistency shouldn't
# be checked.
with self.settings(DATABASE_ROUTERS=['migrations.routers.TestRouter']):
call_command('makemigrations', 'migrations', verbosity=0)
self.assertEqual(has_table.call_count, 2) # 'default' again
# With a router that doesn't prohibit migrating 'other',
# consistency is checked.
with self.settings(DATABASE_ROUTERS=['migrations.routers.EmptyRouter']):
with self.assertRaisesMessage(Exception, 'Other connection'):
call_command('makemigrations', 'migrations', verbosity=0)
self.assertEqual(has_table.call_count, 4) # 'default' and 'other'
# With a router that doesn't allow migrating on any database,
# no consistency checks are made.
with self.settings(DATABASE_ROUTERS=['migrations.routers.TestRouter']):
with mock.patch.object(TestRouter, 'allow_migrate', return_value=False) as allow_migrate:
call_command('makemigrations', 'migrations', verbosity=0)
allow_migrate.assert_any_call('other', 'migrations', model_name='UnicodeModel')
# allow_migrate() is called with the correct arguments.
self.assertGreater(len(allow_migrate.mock_calls), 0)
for mock_call in allow_migrate.mock_calls:
_, call_args, call_kwargs = mock_call
connection_alias, app_name = call_args
self.assertIn(connection_alias, ['default', 'other'])
# Raises an error if invalid app_name/model_name occurs.
apps.get_app_config(app_name).get_model(call_kwargs['model_name'])
self.assertEqual(has_table.call_count, 4)
def test_failing_migration(self):
# If a migration fails to serialize, it shouldn't generate an empty file. #21280
apps.register_model('migrations', UnserializableModel)
with self.temporary_migration_module() as migration_dir:
with self.assertRaisesMessage(ValueError, 'Cannot serialize'):
call_command("makemigrations", "migrations", verbosity=0)
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertFalse(os.path.exists(initial_file))
def test_makemigrations_conflict_exit(self):
"""
makemigrations exits if it detects a conflict.
"""
with self.temporary_migration_module(module="migrations.test_migrations_conflict"):
with self.assertRaises(CommandError) as context:
call_command("makemigrations")
exception_message = str(context.exception)
self.assertIn(
'Conflicting migrations detected; multiple leaf nodes '
'in the migration graph:',
exception_message
)
self.assertIn('0002_second', exception_message)
self.assertIn('0002_conflicting_second', exception_message)
self.assertIn('in migrations', exception_message)
self.assertIn("To fix them run 'python manage.py makemigrations --merge'", exception_message)
def test_makemigrations_merge_no_conflict(self):
"""
makemigrations exits if in merge mode with no conflicts.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("makemigrations", merge=True, stdout=out)
self.assertIn("No conflicts detected to merge.", out.getvalue())
def test_makemigrations_empty_no_app_specified(self):
"""
makemigrations exits if no app is specified with 'empty' mode.
"""
msg = 'You must supply at least one app label when using --empty.'
with self.assertRaisesMessage(CommandError, msg):
call_command("makemigrations", empty=True)
def test_makemigrations_empty_migration(self):
"""
makemigrations properly constructs an empty migration.
"""
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", empty=True, verbosity=0)
# Check for existing 0001_initial.py file in migration folder
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
with open(initial_file, encoding='utf-8') as fp:
content = fp.read()
# Remove all whitespace to check for empty dependencies and operations
content = content.replace(' ', '')
self.assertIn('dependencies=[\n]', content)
self.assertIn('operations=[\n]', content)
@override_settings(MIGRATION_MODULES={"migrations": None})
def test_makemigrations_disabled_migrations_for_app(self):
"""
makemigrations raises a nice error when migrations are disabled for an
app.
"""
msg = (
"Django can't create migrations for app 'migrations' because migrations "
"have been disabled via the MIGRATION_MODULES setting."
)
with self.assertRaisesMessage(ValueError, msg):
call_command("makemigrations", "migrations", empty=True, verbosity=0)
def test_makemigrations_no_changes_no_apps(self):
"""
makemigrations exits when there are no changes and no apps are specified.
"""
out = io.StringIO()
call_command("makemigrations", stdout=out)
self.assertIn("No changes detected", out.getvalue())
def test_makemigrations_no_changes(self):
"""
makemigrations exits when there are no changes to an app.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_changes"):
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("No changes detected in app 'migrations'", out.getvalue())
def test_makemigrations_no_apps_initial(self):
"""
makemigrations should detect initial is needed on empty migration
modules if no app provided.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_empty"):
call_command("makemigrations", stdout=out)
self.assertIn("0001_initial.py", out.getvalue())
def test_makemigrations_no_init(self):
"""Migration directories without an __init__.py file are allowed."""
out = io.StringIO()
with self.temporary_migration_module(module='migrations.test_migrations_no_init'):
call_command('makemigrations', stdout=out)
self.assertIn('0001_initial.py', out.getvalue())
def test_makemigrations_migrations_announce(self):
"""
makemigrations announces the migration at the default verbosity level.
"""
out = io.StringIO()
with self.temporary_migration_module():
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("Migrations for 'migrations'", out.getvalue())
def test_makemigrations_no_common_ancestor(self):
"""
makemigrations fails to merge migrations with no common ancestor.
"""
with self.assertRaises(ValueError) as context:
with self.temporary_migration_module(module="migrations.test_migrations_no_ancestor"):
call_command("makemigrations", "migrations", merge=True)
exception_message = str(context.exception)
self.assertIn("Could not find common ancestor of", exception_message)
self.assertIn("0002_second", exception_message)
self.assertIn("0002_conflicting_second", exception_message)
def test_makemigrations_interactive_reject(self):
"""
makemigrations enters and exits interactive mode properly.
"""
# Monkeypatch interactive questioner to auto reject
with mock.patch('builtins.input', mock.Mock(return_value='N')):
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", name="merge", merge=True, interactive=True, verbosity=0)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
def test_makemigrations_interactive_accept(self):
"""
makemigrations enters interactive mode and merges properly.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch('builtins.input', mock.Mock(return_value='y')):
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", name="merge", merge=True, interactive=True, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertTrue(os.path.exists(merge_file))
self.assertIn("Created new merge migration", out.getvalue())
@mock.patch('django.db.migrations.utils.datetime')
def test_makemigrations_default_merge_name(self, mock_datetime):
mock_datetime.datetime.now.return_value = datetime.datetime(2016, 1, 2, 3, 4)
with mock.patch('builtins.input', mock.Mock(return_value='y')):
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", merge=True, interactive=True, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge_20160102_0304.py')
self.assertTrue(os.path.exists(merge_file))
self.assertIn("Created new merge migration", out.getvalue())
def test_makemigrations_non_interactive_not_null_addition(self):
"""
Non-interactive makemigrations fails when a default is missing on a
new not-null field.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_int = models.IntegerField()
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.assertRaises(SystemExit):
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
def test_makemigrations_non_interactive_not_null_alteration(self):
"""
Non-interactive makemigrations fails when a default is missing on a
field changed to not-null.
"""
class Author(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField()
age = models.IntegerField(default=0)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
self.assertIn("Alter field slug on author", out.getvalue())
def test_makemigrations_non_interactive_no_model_rename(self):
"""
makemigrations adds and removes a possible model rename in
non-interactive mode.
"""
class RenamedModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
self.assertIn("Delete model SillyModel", out.getvalue())
self.assertIn("Create model RenamedModel", out.getvalue())
def test_makemigrations_non_interactive_no_field_rename(self):
"""
makemigrations adds and removes a possible field rename in
non-interactive mode.
"""
class SillyModel(models.Model):
silly_rename = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
self.assertIn("Remove field silly_field from sillymodel", out.getvalue())
self.assertIn("Add field silly_rename to sillymodel", out.getvalue())
def test_makemigrations_handle_merge(self):
"""
makemigrations properly merges the conflicting migrations with --noinput.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", name="merge", merge=True, interactive=False, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertTrue(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertIn("Created new merge migration", output)
def test_makemigration_merge_dry_run(self):
"""
makemigrations respects --dry-run option when fixing migration
conflicts (#24427).
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command(
"makemigrations", "migrations", name="merge", dry_run=True,
merge=True, interactive=False, stdout=out,
)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
def test_makemigration_merge_dry_run_verbosity_3(self):
"""
`makemigrations --merge --dry-run` writes the merge migration file to
stdout with `verbosity == 3` (#24427).
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command(
"makemigrations", "migrations", name="merge", dry_run=True,
merge=True, interactive=False, stdout=out, verbosity=3,
)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
output = out.getvalue()
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
# Additional output caused by verbosity 3
# The complete merge migration file that would be written
self.assertIn("class Migration(migrations.Migration):", output)
self.assertIn("dependencies = [", output)
self.assertIn("('migrations', '0002_second')", output)
self.assertIn("('migrations', '0002_conflicting_second')", output)
self.assertIn("operations = [", output)
self.assertIn("]", output)
def test_makemigrations_dry_run(self):
"""
`makemigrations --dry-run` should not ask for defaults.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_date = models.DateField() # Added field without a default
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", dry_run=True, stdout=out)
# Output the expected changes directly, without asking for defaults
self.assertIn("Add field silly_date to sillymodel", out.getvalue())
def test_makemigrations_dry_run_verbosity_3(self):
"""
Allow `makemigrations --dry-run` to output the migrations file to
stdout (with verbosity == 3).
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_char = models.CharField(default="")
class Meta:
app_label = "migrations"
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", dry_run=True, stdout=out, verbosity=3)
# Normal --dry-run output
self.assertIn("- Add field silly_char to sillymodel", out.getvalue())
# Additional output caused by verbosity 3
# The complete migrations file that would be written
self.assertIn("class Migration(migrations.Migration):", out.getvalue())
self.assertIn("dependencies = [", out.getvalue())
self.assertIn("('migrations', '0001_initial'),", out.getvalue())
self.assertIn("migrations.AddField(", out.getvalue())
self.assertIn("model_name='sillymodel',", out.getvalue())
self.assertIn("name='silly_char',", out.getvalue())
def test_makemigrations_migrations_modules_path_not_exist(self):
"""
makemigrations creates migrations when specifying a custom location
for migration files using MIGRATION_MODULES if the custom path
doesn't already exist.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = io.StringIO()
migration_module = "migrations.test_migrations_path_doesnt_exist.foo.bar"
with self.temporary_migration_module(module=migration_module) as migration_dir:
call_command("makemigrations", "migrations", stdout=out)
# Migrations file is actually created in the expected path.
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
# Command output indicates the migration is created.
self.assertIn(" - Create model SillyModel", out.getvalue())
@override_settings(MIGRATION_MODULES={'migrations': 'some.nonexistent.path'})
def test_makemigrations_migrations_modules_nonexistent_toplevel_package(self):
msg = (
'Could not locate an appropriate location to create migrations '
'package some.nonexistent.path. Make sure the toplevel package '
'exists and can be imported.'
)
with self.assertRaisesMessage(ValueError, msg):
call_command('makemigrations', 'migrations', empty=True, verbosity=0)
def test_makemigrations_interactive_by_default(self):
"""
The user is prompted to merge by default if there are conflicts and
merge is True. Answer negative to differentiate it from behavior when
--noinput is specified.
"""
# Monkeypatch interactive questioner to auto reject
out = io.StringIO()
with mock.patch('builtins.input', mock.Mock(return_value='N')):
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", name="merge", merge=True, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
# This will fail if interactive is False by default
self.assertFalse(os.path.exists(merge_file))
self.assertNotIn("Created new merge migration", out.getvalue())
@override_settings(
INSTALLED_APPS=[
"migrations",
"migrations.migrations_test_apps.unspecified_app_with_conflict"])
def test_makemigrations_unspecified_app_with_conflict_no_merge(self):
"""
makemigrations does not raise a CommandError when an unspecified app
has conflicting migrations.
"""
with self.temporary_migration_module(module="migrations.test_migrations_no_changes"):
call_command("makemigrations", "migrations", merge=False, verbosity=0)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.unspecified_app_with_conflict"])
def test_makemigrations_unspecified_app_with_conflict_merge(self):
"""
makemigrations does not create a merge for an unspecified app even if
it has conflicting migrations.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch('builtins.input', mock.Mock(return_value='y')):
out = io.StringIO()
with self.temporary_migration_module(app_label="migrated_app") as migration_dir:
call_command("makemigrations", "migrated_app", name="merge", merge=True, interactive=True, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
self.assertIn("No conflicts detected to merge.", out.getvalue())
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.conflicting_app_with_dependencies"])
def test_makemigrations_merge_dont_output_dependency_operations(self):
"""
makemigrations --merge does not output any operations from apps that
don't belong to a given app.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch('builtins.input', mock.Mock(return_value='N')):
out = io.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: False):
call_command(
"makemigrations", "conflicting_app_with_dependencies",
merge=True, interactive=True, stdout=out
)
val = out.getvalue().lower()
self.assertIn('merging conflicting_app_with_dependencies\n', val)
self.assertIn(
' branch 0002_conflicting_second\n'
' - create model something\n',
val
)
self.assertIn(
' branch 0002_second\n'
' - delete model tribble\n'
' - remove field silly_field from author\n'
' - add field rating to author\n'
' - create model book\n',
val
)
def test_makemigrations_with_custom_name(self):
"""
makemigrations --name generate a custom migration name.
"""
with self.temporary_migration_module() as migration_dir:
def cmd(migration_count, migration_name, *args):
call_command("makemigrations", "migrations", "--verbosity", "0", "--name", migration_name, *args)
migration_file = os.path.join(migration_dir, "%s_%s.py" % (migration_count, migration_name))
# Check for existing migration file in migration folder
self.assertTrue(os.path.exists(migration_file))
with open(migration_file, encoding='utf-8') as fp:
content = fp.read()
content = content.replace(" ", "")
return content
# generate an initial migration
migration_name_0001 = "my_initial_migration"
content = cmd("0001", migration_name_0001)
self.assertIn("dependencies=[\n]", content)
# importlib caches os.listdir() on some platforms like macOS
# (#23850).
if hasattr(importlib, 'invalidate_caches'):
importlib.invalidate_caches()
# generate an empty migration
migration_name_0002 = "my_custom_migration"
content = cmd("0002", migration_name_0002, "--empty")
self.assertIn("dependencies=[\n('migrations','0001_%s'),\n]" % migration_name_0001, content)
self.assertIn("operations=[\n]", content)
def test_makemigrations_with_invalid_custom_name(self):
msg = 'The migration name must be a valid Python identifier.'
with self.assertRaisesMessage(CommandError, msg):
call_command('makemigrations', 'migrations', '--name', 'invalid name', '--empty')
def test_makemigrations_check(self):
"""
makemigrations --check should exit with a non-zero status when
there are changes to an app requiring migrations.
"""
with self.temporary_migration_module():
with self.assertRaises(SystemExit):
call_command("makemigrations", "--check", "migrations", verbosity=0)
with self.temporary_migration_module(module="migrations.test_migrations_no_changes"):
call_command("makemigrations", "--check", "migrations", verbosity=0)
def test_makemigrations_migration_path_output(self):
"""
makemigrations should print the relative paths to the migrations unless
they are outside of the current tree, in which case the absolute path
should be shown.
"""
out = io.StringIO()
apps.register_model('migrations', UnicodeModel)
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", stdout=out)
self.assertIn(os.path.join(migration_dir, '0001_initial.py'), out.getvalue())
def test_makemigrations_migration_path_output_valueerror(self):
"""
makemigrations prints the absolute path if os.path.relpath() raises a
ValueError when it's impossible to obtain a relative path, e.g. on
Windows if Django is installed on a different drive than where the
migration files are created.
"""
out = io.StringIO()
with self.temporary_migration_module() as migration_dir:
with mock.patch('os.path.relpath', side_effect=ValueError):
call_command('makemigrations', 'migrations', stdout=out)
self.assertIn(os.path.join(migration_dir, '0001_initial.py'), out.getvalue())
def test_makemigrations_inconsistent_history(self):
"""
makemigrations should raise InconsistentMigrationHistory exception if
there are some migrations applied before their dependencies.
"""
recorder = MigrationRecorder(connection)
recorder.record_applied('migrations', '0002_second')
msg = "Migration migrations.0002_second is applied before its dependency migrations.0001_initial"
with self.temporary_migration_module(module="migrations.test_migrations"):
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
call_command("makemigrations")
@mock.patch('builtins.input', return_value='1')
@mock.patch('django.db.migrations.questioner.sys.stdin', mock.MagicMock(encoding=sys.getdefaultencoding()))
def test_makemigrations_auto_now_add_interactive(self, *args):
"""
makemigrations prompts the user when adding auto_now_add to an existing
model.
"""
class Entry(models.Model):
title = models.CharField(max_length=255)
creation_date = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = 'migrations'
# Monkeypatch interactive questioner to auto accept
with mock.patch('django.db.migrations.questioner.sys.stdout', new_callable=io.StringIO) as prompt_stdout:
out = io.StringIO()
with self.temporary_migration_module(module='migrations.test_auto_now_add'):
call_command('makemigrations', 'migrations', interactive=True, stdout=out)
output = out.getvalue()
prompt_output = prompt_stdout.getvalue()
self.assertIn("You can accept the default 'timezone.now' by pressing 'Enter'", prompt_output)
self.assertIn("Add field creation_date to entry", output)
class SquashMigrationsTests(MigrationTestBase):
"""
Tests running the squashmigrations command.
"""
def test_squashmigrations_squashes(self):
"""
squashmigrations squashes migrations.
"""
with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir:
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0)
squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py")
self.assertTrue(os.path.exists(squashed_migration_file))
def test_squashmigrations_initial_attribute(self):
with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir:
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0)
squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py")
with open(squashed_migration_file, encoding='utf-8') as fp:
content = fp.read()
self.assertIn("initial = True", content)
def test_squashmigrations_optimizes(self):
"""
squashmigrations optimizes operations.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=1, stdout=out)
self.assertIn("Optimized from 8 operations to 2 operations.", out.getvalue())
def test_ticket_23799_squashmigrations_no_optimize(self):
"""
squashmigrations --no-optimize doesn't optimize operations.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("squashmigrations", "migrations", "0002",
interactive=False, verbosity=1, no_optimize=True, stdout=out)
self.assertIn("Skipping optimization", out.getvalue())
def test_squashmigrations_valid_start(self):
"""
squashmigrations accepts a starting migration.
"""
out = io.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_changes") as migration_dir:
call_command("squashmigrations", "migrations", "0002", "0003",
interactive=False, verbosity=1, stdout=out)
squashed_migration_file = os.path.join(migration_dir, "0002_second_squashed_0003_third.py")
with open(squashed_migration_file, encoding='utf-8') as fp:
content = fp.read()
self.assertIn(" ('migrations', '0001_initial')", content)
self.assertNotIn("initial = True", content)
out = out.getvalue()
self.assertNotIn(" - 0001_initial", out)
self.assertIn(" - 0002_second", out)
self.assertIn(" - 0003_third", out)
def test_squashmigrations_invalid_start(self):
"""
squashmigrations doesn't accept a starting migration after the ending migration.
"""
with self.temporary_migration_module(module="migrations.test_migrations_no_changes"):
msg = (
"The migration 'migrations.0003_third' cannot be found. Maybe "
"it comes after the migration 'migrations.0002_second'"
)
with self.assertRaisesMessage(CommandError, msg):
call_command("squashmigrations", "migrations", "0003", "0002", interactive=False, verbosity=0)
def test_squashed_name_with_start_migration_name(self):
"""--squashed-name specifies the new migration's name."""
squashed_name = 'squashed_name'
with self.temporary_migration_module(module='migrations.test_migrations') as migration_dir:
call_command(
'squashmigrations', 'migrations', '0001', '0002',
squashed_name=squashed_name, interactive=False, verbosity=0,
)
squashed_migration_file = os.path.join(migration_dir, '0001_%s.py' % squashed_name)
self.assertTrue(os.path.exists(squashed_migration_file))
def test_squashed_name_without_start_migration_name(self):
"""--squashed-name also works if a start migration is omitted."""
squashed_name = 'squashed_name'
with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir:
call_command(
'squashmigrations', 'migrations', '0001',
squashed_name=squashed_name, interactive=False, verbosity=0,
)
squashed_migration_file = os.path.join(migration_dir, '0001_%s.py' % squashed_name)
self.assertTrue(os.path.exists(squashed_migration_file))
class AppLabelErrorTests(TestCase):
"""
This class inherits TestCase because MigrationTestBase uses
`available_apps = ['migrations']` which means that it's the only installed
app. 'django.contrib.auth' must be in INSTALLED_APPS for some of these
tests.
"""
nonexistent_app_error = "No installed app with label 'nonexistent_app'."
did_you_mean_auth_error = (
"No installed app with label 'django.contrib.auth'. Did you mean "
"'auth'?"
)
def test_makemigrations_nonexistent_app_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command('makemigrations', 'nonexistent_app', stderr=err)
self.assertIn(self.nonexistent_app_error, err.getvalue())
def test_makemigrations_app_name_specified_as_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command('makemigrations', 'django.contrib.auth', stderr=err)
self.assertIn(self.did_you_mean_auth_error, err.getvalue())
def test_migrate_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command('migrate', 'nonexistent_app')
def test_migrate_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command('migrate', 'django.contrib.auth')
def test_showmigrations_nonexistent_app_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command('showmigrations', 'nonexistent_app', stderr=err)
self.assertIn(self.nonexistent_app_error, err.getvalue())
def test_showmigrations_app_name_specified_as_label(self):
err = io.StringIO()
with self.assertRaises(SystemExit):
call_command('showmigrations', 'django.contrib.auth', stderr=err)
self.assertIn(self.did_you_mean_auth_error, err.getvalue())
def test_sqlmigrate_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command('sqlmigrate', 'nonexistent_app', '0002')
def test_sqlmigrate_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command('sqlmigrate', 'django.contrib.auth', '0002')
def test_squashmigrations_nonexistent_app_label(self):
with self.assertRaisesMessage(CommandError, self.nonexistent_app_error):
call_command('squashmigrations', 'nonexistent_app', '0002')
def test_squashmigrations_app_name_specified_as_label(self):
with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error):
call_command('squashmigrations', 'django.contrib.auth', '0002')
|
a53749101c4d5b1223a90b08220b9068ea6c2a9ccf4f32da4c0e9d286efab597 | import uuid
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.urls import Resolver404, path, resolve, reverse
from .converters import DynamicConverter
from .views import empty_view
included_kwargs = {'base': b'hello', 'value': b'world'}
converter_test_data = (
# ('url', ('url_name', 'app_name', {kwargs})),
# aGVsbG8= is 'hello' encoded in base64.
('/base64/aGVsbG8=/', ('base64', '', {'value': b'hello'})),
('/base64/aGVsbG8=/subpatterns/d29ybGQ=/', ('subpattern-base64', '', included_kwargs)),
('/base64/aGVsbG8=/namespaced/d29ybGQ=/', ('subpattern-base64', 'namespaced-base64', included_kwargs)),
)
@override_settings(ROOT_URLCONF='urlpatterns.path_urls')
class SimplifiedURLTests(SimpleTestCase):
def test_path_lookup_without_parameters(self):
match = resolve('/articles/2003/')
self.assertEqual(match.url_name, 'articles-2003')
self.assertEqual(match.args, ())
self.assertEqual(match.kwargs, {})
self.assertEqual(match.route, 'articles/2003/')
def test_path_lookup_with_typed_parameters(self):
match = resolve('/articles/2015/')
self.assertEqual(match.url_name, 'articles-year')
self.assertEqual(match.args, ())
self.assertEqual(match.kwargs, {'year': 2015})
self.assertEqual(match.route, 'articles/<int:year>/')
def test_path_lookup_with_multiple_paramaters(self):
match = resolve('/articles/2015/04/12/')
self.assertEqual(match.url_name, 'articles-year-month-day')
self.assertEqual(match.args, ())
self.assertEqual(match.kwargs, {'year': 2015, 'month': 4, 'day': 12})
self.assertEqual(match.route, 'articles/<int:year>/<int:month>/<int:day>/')
def test_two_variable_at_start_of_path_pattern(self):
match = resolve('/en/foo/')
self.assertEqual(match.url_name, 'lang-and-path')
self.assertEqual(match.kwargs, {'lang': 'en', 'url': 'foo'})
self.assertEqual(match.route, '<lang>/<path:url>/')
def test_re_path(self):
match = resolve('/regex/1/')
self.assertEqual(match.url_name, 'regex')
self.assertEqual(match.kwargs, {'pk': '1'})
self.assertEqual(match.route, '^regex/(?P<pk>[0-9]+)/$')
def test_path_lookup_with_inclusion(self):
match = resolve('/included_urls/extra/something/')
self.assertEqual(match.url_name, 'inner-extra')
self.assertEqual(match.route, 'included_urls/extra/<extra>/')
def test_path_lookup_with_empty_string_inclusion(self):
match = resolve('/more/99/')
self.assertEqual(match.url_name, 'inner-more')
self.assertEqual(match.route, r'^more/(?P<extra>\w+)/$')
def test_path_lookup_with_double_inclusion(self):
match = resolve('/included_urls/more/some_value/')
self.assertEqual(match.url_name, 'inner-more')
self.assertEqual(match.route, r'included_urls/more/(?P<extra>\w+)/$')
def test_path_reverse_without_parameter(self):
url = reverse('articles-2003')
self.assertEqual(url, '/articles/2003/')
def test_path_reverse_with_parameter(self):
url = reverse('articles-year-month-day', kwargs={'year': 2015, 'month': 4, 'day': 12})
self.assertEqual(url, '/articles/2015/4/12/')
@override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls')
def test_converter_resolve(self):
for url, (url_name, app_name, kwargs) in converter_test_data:
with self.subTest(url=url):
match = resolve(url)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.kwargs, kwargs)
@override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls')
def test_converter_reverse(self):
for expected, (url_name, app_name, kwargs) in converter_test_data:
if app_name:
url_name = '%s:%s' % (app_name, url_name)
with self.subTest(url=url_name):
url = reverse(url_name, kwargs=kwargs)
self.assertEqual(url, expected)
@override_settings(ROOT_URLCONF='urlpatterns.path_base64_urls')
def test_converter_reverse_with_second_layer_instance_namespace(self):
kwargs = included_kwargs.copy()
kwargs['last_value'] = b'world'
url = reverse('instance-ns-base64:subsubpattern-base64', kwargs=kwargs)
self.assertEqual(url, '/base64/aGVsbG8=/subpatterns/d29ybGQ=/d29ybGQ=/')
def test_path_inclusion_is_matchable(self):
match = resolve('/included_urls/extra/something/')
self.assertEqual(match.url_name, 'inner-extra')
self.assertEqual(match.kwargs, {'extra': 'something'})
def test_path_inclusion_is_reversable(self):
url = reverse('inner-extra', kwargs={'extra': 'something'})
self.assertEqual(url, '/included_urls/extra/something/')
def test_invalid_converter(self):
msg = "URL route 'foo/<nonexistent:var>/' uses invalid converter 'nonexistent'."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
path('foo/<nonexistent:var>/', empty_view)
@override_settings(ROOT_URLCONF='urlpatterns.converter_urls')
class ConverterTests(SimpleTestCase):
def test_matching_urls(self):
def no_converter(x):
return x
test_data = (
('int', {'0', '1', '01', 1234567890}, int),
('str', {'abcxyz'}, no_converter),
('path', {'allows.ANY*characters'}, no_converter),
('slug', {'abcxyz-ABCXYZ_01234567890'}, no_converter),
('uuid', {'39da9369-838e-4750-91a5-f7805cd82839'}, uuid.UUID),
)
for url_name, url_suffixes, converter in test_data:
for url_suffix in url_suffixes:
url = '/%s/%s/' % (url_name, url_suffix)
with self.subTest(url=url):
match = resolve(url)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.kwargs, {url_name: converter(url_suffix)})
# reverse() works with string parameters.
string_kwargs = {url_name: url_suffix}
self.assertEqual(reverse(url_name, kwargs=string_kwargs), url)
# reverse() also works with native types (int, UUID, etc.).
if converter is not no_converter:
# The converted value might be different for int (a
# leading zero is lost in the conversion).
converted_value = match.kwargs[url_name]
converted_url = '/%s/%s/' % (url_name, converted_value)
self.assertEqual(reverse(url_name, kwargs={url_name: converted_value}), converted_url)
def test_nonmatching_urls(self):
test_data = (
('int', {'-1', 'letters'}),
('str', {'', '/'}),
('path', {''}),
('slug', {'', 'stars*notallowed'}),
('uuid', {
'',
'9da9369-838e-4750-91a5-f7805cd82839',
'39da9369-838-4750-91a5-f7805cd82839',
'39da9369-838e-475-91a5-f7805cd82839',
'39da9369-838e-4750-91a-f7805cd82839',
'39da9369-838e-4750-91a5-f7805cd8283',
}),
)
for url_name, url_suffixes in test_data:
for url_suffix in url_suffixes:
url = '/%s/%s/' % (url_name, url_suffix)
with self.subTest(url=url), self.assertRaises(Resolver404):
resolve(url)
class ParameterRestrictionTests(SimpleTestCase):
def test_non_identifier_parameter_name_causes_exception(self):
msg = (
"URL route 'hello/<int:1>/' uses parameter name '1' which isn't "
"a valid Python identifier."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
path(r'hello/<int:1>/', lambda r: None)
def test_allows_non_ascii_but_valid_identifiers(self):
# \u0394 is "GREEK CAPITAL LETTER DELTA", a valid identifier.
p = path('hello/<str:\u0394>/', lambda r: None)
match = p.resolve('hello/1/')
self.assertEqual(match.kwargs, {'\u0394': '1'})
@override_settings(ROOT_URLCONF='urlpatterns.path_dynamic_urls')
class ConversionExceptionTests(SimpleTestCase):
"""How are errors in Converter.to_python() and to_url() handled?"""
def test_resolve_value_error_means_no_match(self):
@DynamicConverter.register_to_python
def raises_value_error(value):
raise ValueError()
with self.assertRaises(Resolver404):
resolve('/dynamic/abc/')
def test_resolve_type_error_propagates(self):
@DynamicConverter.register_to_python
def raises_type_error(value):
raise TypeError('This type error propagates.')
with self.assertRaisesMessage(TypeError, 'This type error propagates.'):
resolve('/dynamic/abc/')
def test_reverse_value_error_propagates(self):
@DynamicConverter.register_to_url
def raises_value_error(value):
raise ValueError('This value error propagates.')
with self.assertRaisesMessage(ValueError, 'This value error propagates.'):
reverse('dynamic', kwargs={'value': object()})
|
db23652cb3bf2efbefea1dc245b29c2b5a02a3c8391a86fdc1b56b822ddfc33d | import os
import unittest
import warnings
from io import StringIO
from unittest import mock
from django.conf import settings
from django.contrib.staticfiles.finders import get_finder, get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage
from django.db import connection, connections, models, router
from django.forms import EmailField, IntegerField
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.html import HTMLParseError, parse_html
from django.test.utils import (
CaptureQueriesContext, TestContextDecorator, isolate_apps,
override_settings, setup_test_environment,
)
from django.urls import NoReverseMatch, path, reverse, reverse_lazy
from .models import Car, Person, PossessedCar
from .views import empty_response
class SkippingTestCase(SimpleTestCase):
def _assert_skipping(self, func, expected_exc, msg=None):
try:
if msg is not None:
self.assertRaisesMessage(expected_exc, msg, func)
else:
self.assertRaises(expected_exc, func)
except unittest.SkipTest:
self.fail('%s should not result in a skipped test.' % func.__name__)
def test_skip_unless_db_feature(self):
"""
Testing the django.test.skipUnlessDBFeature decorator.
"""
# Total hack, but it works, just want an attribute that's always true.
@skipUnlessDBFeature("__class__")
def test_func():
raise ValueError
@skipUnlessDBFeature("notprovided")
def test_func2():
raise ValueError
@skipUnlessDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipUnlessDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
self._assert_skipping(test_func, ValueError)
self._assert_skipping(test_func2, unittest.SkipTest)
self._assert_skipping(test_func3, ValueError)
self._assert_skipping(test_func4, unittest.SkipTest)
class SkipTestCase(SimpleTestCase):
@skipUnlessDBFeature('missing')
def test_foo(self):
pass
self._assert_skipping(
SkipTestCase('test_foo').test_foo,
ValueError,
"skipUnlessDBFeature cannot be used on test_foo (test_utils.tests."
"SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase) "
"as SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase "
"doesn't allow queries against the 'default' database."
)
def test_skip_if_db_feature(self):
"""
Testing the django.test.skipIfDBFeature decorator.
"""
@skipIfDBFeature("__class__")
def test_func():
raise ValueError
@skipIfDBFeature("notprovided")
def test_func2():
raise ValueError
@skipIfDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipIfDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
@skipIfDBFeature("notprovided", "notprovided")
def test_func5():
raise ValueError
self._assert_skipping(test_func, unittest.SkipTest)
self._assert_skipping(test_func2, ValueError)
self._assert_skipping(test_func3, unittest.SkipTest)
self._assert_skipping(test_func4, unittest.SkipTest)
self._assert_skipping(test_func5, ValueError)
class SkipTestCase(SimpleTestCase):
@skipIfDBFeature('missing')
def test_foo(self):
pass
self._assert_skipping(
SkipTestCase('test_foo').test_foo,
ValueError,
"skipIfDBFeature cannot be used on test_foo (test_utils.tests."
"SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase) "
"as SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase "
"doesn't allow queries against the 'default' database."
)
class SkippingClassTestCase(TestCase):
def test_skip_class_unless_db_feature(self):
@skipUnlessDBFeature("__class__")
class NotSkippedTests(TestCase):
def test_dummy(self):
return
@skipUnlessDBFeature("missing")
@skipIfDBFeature("__class__")
class SkippedTests(TestCase):
def test_will_be_skipped(self):
self.fail("We should never arrive here.")
@skipIfDBFeature("__dict__")
class SkippedTestsSubclass(SkippedTests):
pass
test_suite = unittest.TestSuite()
test_suite.addTest(NotSkippedTests('test_dummy'))
try:
test_suite.addTest(SkippedTests('test_will_be_skipped'))
test_suite.addTest(SkippedTestsSubclass('test_will_be_skipped'))
except unittest.SkipTest:
self.fail('SkipTest should not be raised here.')
result = unittest.TextTestRunner(stream=StringIO()).run(test_suite)
self.assertEqual(result.testsRun, 3)
self.assertEqual(len(result.skipped), 2)
self.assertEqual(result.skipped[0][1], 'Database has feature(s) __class__')
self.assertEqual(result.skipped[1][1], 'Database has feature(s) __class__')
def test_missing_default_databases(self):
@skipIfDBFeature('missing')
class MissingDatabases(SimpleTestCase):
def test_assertion_error(self):
pass
suite = unittest.TestSuite()
try:
suite.addTest(MissingDatabases('test_assertion_error'))
except unittest.SkipTest:
self.fail("SkipTest should not be raised at this stage")
runner = unittest.TextTestRunner(stream=StringIO())
msg = (
"skipIfDBFeature cannot be used on <class 'test_utils.tests."
"SkippingClassTestCase.test_missing_default_databases.<locals>."
"MissingDatabases'> as it doesn't allow queries against the "
"'default' database."
)
with self.assertRaisesMessage(ValueError, msg):
runner.run(suite)
@override_settings(ROOT_URLCONF='test_utils.urls')
class AssertNumQueriesTests(TestCase):
def test_assert_num_queries(self):
def test_func():
raise ValueError
with self.assertRaises(ValueError):
self.assertNumQueries(2, test_func)
def test_assert_num_queries_with_client(self):
person = Person.objects.create(name='test')
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
def test_func():
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.assertNumQueries(2, test_func)
@unittest.skipUnless(
connection.vendor != 'sqlite' or not connection.is_in_memory_db(),
'For SQLite in-memory tests, closing the connection destroys the database.'
)
class AssertNumQueriesUponConnectionTests(TransactionTestCase):
available_apps = []
def test_ignores_connection_configuration_queries(self):
real_ensure_connection = connection.ensure_connection
connection.close()
def make_configuration_query():
is_opening_connection = connection.connection is None
real_ensure_connection()
if is_opening_connection:
# Avoid infinite recursion. Creating a cursor calls
# ensure_connection() which is currently mocked by this method.
connection.cursor().execute('SELECT 1' + connection.features.bare_select_suffix)
ensure_connection = 'django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection'
with mock.patch(ensure_connection, side_effect=make_configuration_query):
with self.assertNumQueries(1):
list(Car.objects.all())
class AssertQuerysetEqualTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.p1 = Person.objects.create(name='p1')
cls.p2 = Person.objects.create(name='p2')
def test_ordered(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[repr(self.p1), repr(self.p2)]
)
def test_unordered(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[repr(self.p2), repr(self.p1)],
ordered=False
)
def test_transform(self):
self.assertQuerysetEqual(
Person.objects.all().order_by('name'),
[self.p1.pk, self.p2.pk],
transform=lambda x: x.pk
)
def test_undefined_order(self):
# Using an unordered queryset with more than one ordered value
# is an error.
msg = 'Trying to compare non-ordered queryset against more than one ordered values'
with self.assertRaisesMessage(ValueError, msg):
self.assertQuerysetEqual(
Person.objects.all(),
[repr(self.p1), repr(self.p2)]
)
# No error for one value.
self.assertQuerysetEqual(
Person.objects.filter(name='p1'),
[repr(self.p1)]
)
def test_repeated_values(self):
"""
assertQuerysetEqual checks the number of appearance of each item
when used with option ordered=False.
"""
batmobile = Car.objects.create(name='Batmobile')
k2000 = Car.objects.create(name='K 2000')
PossessedCar.objects.bulk_create([
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
])
with self.assertRaises(AssertionError):
self.assertQuerysetEqual(
self.p1.cars.all(),
[repr(batmobile), repr(k2000)],
ordered=False
)
self.assertQuerysetEqual(
self.p1.cars.all(),
[repr(batmobile)] * 2 + [repr(k2000)] * 4,
ordered=False
)
@override_settings(ROOT_URLCONF='test_utils.urls')
class CaptureQueriesContextManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.person_pk = str(Person.objects.create(name='test').pk)
def test_simple(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
pass
self.assertEqual(0, len(captured_queries))
def test_within(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
def test_nested(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.count()
with CaptureQueriesContext(connection) as nested_captured_queries:
Person.objects.count()
self.assertEqual(1, len(nested_captured_queries))
self.assertEqual(2, len(captured_queries))
def test_failure(self):
with self.assertRaises(TypeError):
with CaptureQueriesContext(connection):
raise TypeError
def test_with_client(self):
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 2)
self.assertIn(self.person_pk, captured_queries[0]['sql'])
self.assertIn(self.person_pk, captured_queries[1]['sql'])
@override_settings(ROOT_URLCONF='test_utils.urls')
class AssertNumQueriesContextManagerTests(TestCase):
def test_simple(self):
with self.assertNumQueries(0):
pass
with self.assertNumQueries(1):
Person.objects.count()
with self.assertNumQueries(2):
Person.objects.count()
Person.objects.count()
def test_failure(self):
with self.assertRaises(AssertionError) as exc_info:
with self.assertNumQueries(2):
Person.objects.count()
exc_lines = str(exc_info.exception).split('\n')
self.assertEqual(exc_lines[0], '1 != 2 : 1 queries executed, 2 expected')
self.assertEqual(exc_lines[1], 'Captured queries were:')
self.assertTrue(exc_lines[2].startswith('1.')) # queries are numbered
with self.assertRaises(TypeError):
with self.assertNumQueries(4000):
raise TypeError
def test_with_client(self):
person = Person.objects.create(name="test")
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(2):
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
@override_settings(ROOT_URLCONF='test_utils.urls')
class AssertTemplateUsedContextManagerTests(SimpleTestCase):
def test_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed(template_name='template_used/base.html'):
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/base.html')
render_to_string('template_used/base.html')
def test_nested_usage(self):
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/include.html'):
render_to_string('template_used/include.html')
with self.assertTemplateUsed('template_used/extends.html'):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateUsed('template_used/base.html'):
with self.assertTemplateUsed('template_used/alternative.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/extends.html')
with self.assertTemplateNotUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
render_to_string('template_used/base.html')
def test_not_used(self):
with self.assertTemplateNotUsed('template_used/base.html'):
pass
with self.assertTemplateNotUsed('template_used/alternative.html'):
pass
def test_error_message(self):
msg = 'template_used/base.html was not rendered. No template was rendered.'
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed('template_used/base.html'):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(template_name='template_used/base.html'):
pass
msg2 = (
'template_used/base.html was not rendered. Following templates '
'were rendered: template_used/alternative.html'
)
with self.assertRaisesMessage(AssertionError, msg2):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
with self.assertRaisesMessage(AssertionError, 'No templates used to render the response'):
response = self.client.get('/test_utils/no_template_used/')
self.assertTemplateUsed(response, 'template_used/base.html')
def test_failure(self):
msg = 'response and/or template_name argument must be provided'
with self.assertRaisesMessage(TypeError, msg):
with self.assertTemplateUsed():
pass
msg = 'No templates used to render the response'
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(''):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(''):
render_to_string('template_used/base.html')
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(template_name=''):
pass
msg = (
'template_used/base.html was not rendered. Following '
'templates were rendered: template_used/alternative.html'
)
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed('template_used/base.html'):
render_to_string('template_used/alternative.html')
def test_assert_used_on_http_response(self):
response = HttpResponse()
error_msg = (
'assertTemplateUsed() and assertTemplateNotUsed() are only '
'usable on responses fetched using the Django test Client.'
)
with self.assertRaisesMessage(ValueError, error_msg):
self.assertTemplateUsed(response, 'template.html')
with self.assertRaisesMessage(ValueError, error_msg):
self.assertTemplateNotUsed(response, 'template.html')
class HTMLEqualTests(SimpleTestCase):
def test_html_parser(self):
element = parse_html('<div><p>Hello</p></div>')
self.assertEqual(len(element.children), 1)
self.assertEqual(element.children[0].name, 'p')
self.assertEqual(element.children[0].children[0], 'Hello')
parse_html('<p>')
parse_html('<p attr>')
dom = parse_html('<p>foo')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.name, 'p')
self.assertEqual(dom[0], 'foo')
def test_parse_html_in_script(self):
parse_html('<script>var a = "<p" + ">";</script>')
parse_html('''
<script>
var js_sha_link='<p>***</p>';
</script>
''')
# script content will be parsed to text
dom = parse_html('''
<script><p>foo</p> '</scr'+'ipt>' <span>bar</span></script>
''')
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.children[0], "<p>foo</p> '</scr'+'ipt>' <span>bar</span>")
def test_self_closing_tags(self):
self_closing_tags = (
'br', 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame',
'base', 'col',
)
for tag in self_closing_tags:
dom = parse_html('<p>Hello <%s> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
dom = parse_html('<p>Hello <%s /> world</p>' % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], 'Hello')
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], 'world')
def test_simple_equal_html(self):
self.assertHTMLEqual('', '')
self.assertHTMLEqual('<p></p>', '<p></p>')
self.assertHTMLEqual('<p></p>', ' <p> </p> ')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div><p>Hello</p></div>')
self.assertHTMLEqual(
'<div><p>Hello</p></div>',
'<div> <p>Hello</p> </div>')
self.assertHTMLEqual(
'<div>\n<p>Hello</p></div>',
'<div><p>Hello</p></div>\n')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<div><p>Hello\nWorld !</p></div>',
'<div><p>Hello World\n!</p></div>')
self.assertHTMLEqual(
'<p>Hello World !</p>',
'<p>Hello World\n\n!</p>')
self.assertHTMLEqual('<p> </p>', '<p></p>')
self.assertHTMLEqual('<p/>', '<p></p>')
self.assertHTMLEqual('<p />', '<p></p>')
self.assertHTMLEqual('<input checked>', '<input checked="checked">')
self.assertHTMLEqual('<p>Hello', '<p> Hello')
self.assertHTMLEqual('<p>Hello</p>World', '<p>Hello</p> World')
def test_ignore_comments(self):
self.assertHTMLEqual(
'<div>Hello<!-- this is a comment --> World!</div>',
'<div>Hello World!</div>')
def test_unequal_html(self):
self.assertHTMLNotEqual('<p>Hello</p>', '<p>Hello!</p>')
self.assertHTMLNotEqual('<p>foobar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo bar</p>', '<p>foo bar</p>')
self.assertHTMLNotEqual('<p>foo nbsp</p>', '<p>foo </p>')
self.assertHTMLNotEqual('<p>foo #20</p>', '<p>foo </p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span><span>World</span></p>',
'<p><span>Hello</span>World</p>')
self.assertHTMLNotEqual(
'<p><span>Hello</span>World</p>',
'<p><span>Hello</span><span>World</span></p>')
def test_attributes(self):
self.assertHTMLEqual(
'<input type="text" id="id_name" />',
'<input id="id_name" type="text" />')
self.assertHTMLEqual(
'''<input type='text' id="id_name" />''',
'<input id="id_name" type="text" />')
self.assertHTMLNotEqual(
'<input type="text" id="id_name" />',
'<input type="password" id="id_name" />')
def test_complex_examples(self):
self.assertHTMLEqual(
"""<tr><th><label for="id_first_name">First name:</label></th>
<td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><input type="text" id="id_last_name" name="last_name" value="Lennon" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><input type="text" value="1940-10-9" name="birthday" id="id_birthday" /></td></tr>""",
"""
<tr><th>
<label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" value="John" id="id_first_name" />
</td></tr>
<tr><th>
<label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
</td></tr>
<tr><th>
<label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
</td></tr>
""")
self.assertHTMLEqual(
"""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p>
This is a valid paragraph
<div> this is a div AFTER the p</div>
</body>
</html>""", """
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p> This is a valid paragraph
<!-- browsers would close the p tag here -->
<div> this is a div AFTER the p</div>
</p> <!-- this is invalid HTML parsing, but it should make no
difference in most cases -->
</body>
</html>""")
def test_html_contain(self):
# equal html contains each other
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertIn(dom1, dom2)
self.assertIn(dom2, dom1)
dom2 = parse_html('<div><p>foo</p></div>')
self.assertIn(dom1, dom2)
self.assertNotIn(dom2, dom1)
self.assertNotIn('<p>foo</p>', dom2)
self.assertIn('foo', dom2)
# when a root element is used ...
dom1 = parse_html('<p>foo</p><p>bar</p>')
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertIn(dom1, dom2)
dom1 = parse_html('<p>foo</p>')
self.assertIn(dom1, dom2)
dom1 = parse_html('<p>bar</p>')
self.assertIn(dom1, dom2)
dom1 = parse_html('<div><p>foo</p><p>bar</p></div>')
self.assertIn(dom2, dom1)
def test_count(self):
# equal html contains each other one time
dom1 = parse_html('<p>foo')
dom2 = parse_html('<p>foo</p>')
self.assertEqual(dom1.count(dom2), 1)
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo</p><p>bar</p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo foo</p><p>foo</p>')
self.assertEqual(dom2.count('foo'), 3)
dom2 = parse_html('<p class="bar">foo</p>')
self.assertEqual(dom2.count('bar'), 0)
self.assertEqual(dom2.count('class'), 0)
self.assertEqual(dom2.count('p'), 0)
self.assertEqual(dom2.count('o'), 2)
dom2 = parse_html('<p>foo</p><p>foo</p>')
self.assertEqual(dom2.count(dom1), 2)
dom2 = parse_html('<div><p>foo<input type=""></p><p>foo</p></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<div><div><p>foo</p></div></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>foo</p></p>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html('<p>foo<p>bar</p></p>')
self.assertEqual(dom2.count(dom1), 0)
# html with a root element contains the same html with no root element
dom1 = parse_html('<p>foo</p><p>bar</p>')
dom2 = parse_html('<div><p>foo</p><p>bar</p></div>')
self.assertEqual(dom2.count(dom1), 1)
def test_parsing_errors(self):
with self.assertRaises(AssertionError):
self.assertHTMLEqual('<p>', '')
with self.assertRaises(AssertionError):
self.assertHTMLEqual('', '<p>')
error_msg = (
"First argument is not valid HTML:\n"
"('Unexpected end tag `div` (Line 1, Column 6)', (1, 6))"
)
with self.assertRaisesMessage(AssertionError, error_msg):
self.assertHTMLEqual('< div></ div>', '<div></div>')
with self.assertRaises(HTMLParseError):
parse_html('</p>')
def test_contains_html(self):
response = HttpResponse('''<body>
This is a form: <form method="get">
<input type="text" name="Hello" />
</form></body>''')
self.assertNotContains(response, "<input name='Hello' type='text'>")
self.assertContains(response, '<form method="get">')
self.assertContains(response, "<input name='Hello' type='text'>", html=True)
self.assertNotContains(response, '<form method="get">', html=True)
invalid_response = HttpResponse('''<body <bad>>''')
with self.assertRaises(AssertionError):
self.assertContains(invalid_response, '<p></p>')
with self.assertRaises(AssertionError):
self.assertContains(response, '<p "whats" that>')
def test_unicode_handling(self):
response = HttpResponse('<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>')
self.assertContains(
response,
'<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>',
html=True
)
class JSONEqualTests(SimpleTestCase):
def test_simple_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_unordered(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz", "attr1": "foo"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(json1, json2)
def test_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONEqual(valid_json, invalid_json)
def test_simple_not_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
self.assertJSONNotEqual(json1, json2)
def test_simple_not_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(json1, json2)
def test_not_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(valid_json, invalid_json)
class XMLEqualTests(SimpleTestCase):
def test_simple_equal(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_unordered(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raise(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raises_message(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
msg = '''{xml1} != {xml2}
- <elem attr1='a' />
+ <elem attr2='b' attr1='a' />
? ++++++++++
'''.format(xml1=repr(xml1), xml2=repr(xml2))
with self.assertRaisesMessage(AssertionError, msg):
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal(self):
xml1 = "<elem attr1='a' attr2='c' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLNotEqual(xml1, xml2)
def test_simple_not_equal_raise(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml1, xml2)
def test_parsing_errors(self):
xml_unvalid = "<elem attr1='a attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml_unvalid, xml2)
def test_comment_root(self):
xml1 = "<?xml version='1.0'?><!-- comment1 --><elem attr1='a' attr2='b' />"
xml2 = "<?xml version='1.0'?><!-- comment2 --><elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_with_leading_or_trailing_whitespace(self):
xml1 = "<elem>foo</elem> \t\n"
xml2 = " \t\n<elem>foo</elem>"
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal_with_whitespace_in_the_middle(self):
xml1 = "<elem>foo</elem><elem>bar</elem>"
xml2 = "<elem>foo</elem> <elem>bar</elem>"
self.assertXMLNotEqual(xml1, xml2)
class SkippingExtraTests(TestCase):
fixtures = ['should_not_be_loaded.json']
# HACK: This depends on internals of our TestCase subclasses
def __call__(self, result=None):
# Detect fixture loading by counting SQL queries, should be zero
with self.assertNumQueries(0):
super().__call__(result)
@unittest.skip("Fixture loading should not be performed for skipped tests.")
def test_fixtures_are_skipped(self):
pass
class AssertRaisesMsgTest(SimpleTestCase):
def test_assert_raises_message(self):
msg = "'Expected message' not found in 'Unexpected message'"
# context manager form of assertRaisesMessage()
with self.assertRaisesMessage(AssertionError, msg):
with self.assertRaisesMessage(ValueError, "Expected message"):
raise ValueError("Unexpected message")
# callable form
def func():
raise ValueError("Unexpected message")
with self.assertRaisesMessage(AssertionError, msg):
self.assertRaisesMessage(ValueError, "Expected message", func)
def test_special_re_chars(self):
"""assertRaisesMessage shouldn't interpret RE special chars."""
def func1():
raise ValueError("[.*x+]y?")
with self.assertRaisesMessage(ValueError, "[.*x+]y?"):
func1()
class AssertWarnsMessageTests(SimpleTestCase):
def test_context_manager(self):
with self.assertWarnsMessage(UserWarning, 'Expected message'):
warnings.warn('Expected message', UserWarning)
def test_context_manager_failure(self):
msg = "Expected message' not found in 'Unexpected message'"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertWarnsMessage(UserWarning, 'Expected message'):
warnings.warn('Unexpected message', UserWarning)
def test_callable(self):
def func():
warnings.warn('Expected message', UserWarning)
self.assertWarnsMessage(UserWarning, 'Expected message', func)
def test_special_re_chars(self):
def func1():
warnings.warn('[.*x+]y?', UserWarning)
with self.assertWarnsMessage(UserWarning, '[.*x+]y?'):
func1()
class AssertFieldOutputTests(SimpleTestCase):
def test_assert_field_output(self):
error_invalid = ['Enter a valid email address.']
self.assertFieldOutput(EmailField, {'[email protected]': '[email protected]'}, {'aaa': error_invalid})
with self.assertRaises(AssertionError):
self.assertFieldOutput(EmailField, {'[email protected]': '[email protected]'}, {'aaa': error_invalid + ['Another error']})
with self.assertRaises(AssertionError):
self.assertFieldOutput(EmailField, {'[email protected]': 'Wrong output'}, {'aaa': error_invalid})
with self.assertRaises(AssertionError):
self.assertFieldOutput(
EmailField, {'[email protected]': '[email protected]'}, {'aaa': ['Come on, gimme some well formatted data, dude.']}
)
def test_custom_required_message(self):
class MyCustomField(IntegerField):
default_error_messages = {
'required': 'This is really required.',
}
self.assertFieldOutput(MyCustomField, {}, {}, empty_value=None)
@override_settings(ROOT_URLCONF='test_utils.urls')
class AssertURLEqualTests(SimpleTestCase):
def test_equal(self):
valid_tests = (
('http://example.com/?', 'http://example.com/'),
('http://example.com/?x=1&', 'http://example.com/?x=1'),
('http://example.com/?x=1&y=2', 'http://example.com/?y=2&x=1'),
('http://example.com/?x=1&y=2', 'http://example.com/?y=2&x=1'),
('http://example.com/?x=1&y=2&a=1&a=2', 'http://example.com/?a=1&a=2&y=2&x=1'),
('/path/to/?x=1&y=2&z=3', '/path/to/?z=3&y=2&x=1'),
('?x=1&y=2&z=3', '?z=3&y=2&x=1'),
('/test_utils/no_template_used/', reverse_lazy('no_template_used')),
)
for url1, url2 in valid_tests:
with self.subTest(url=url1):
self.assertURLEqual(url1, url2)
def test_not_equal(self):
invalid_tests = (
# Protocol must be the same.
('http://example.com/', 'https://example.com/'),
('http://example.com/?x=1&x=2', 'https://example.com/?x=2&x=1'),
('http://example.com/?x=1&y=bar&x=2', 'https://example.com/?y=bar&x=2&x=1'),
# Parameters of the same name must be in the same order.
('/path/to?a=1&a=2', '/path/to/?a=2&a=1')
)
for url1, url2 in invalid_tests:
with self.subTest(url=url1), self.assertRaises(AssertionError):
self.assertURLEqual(url1, url2)
def test_message(self):
msg = (
"Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual('http://example.com/?x=1&x=2', 'https://example.com/?x=2&x=1')
def test_msg_prefix(self):
msg = (
"Prefix: Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual(
'http://example.com/?x=1&x=2', 'https://example.com/?x=2&x=1',
msg_prefix='Prefix: ',
)
class FirstUrls:
urlpatterns = [path('first/', empty_response, name='first')]
class SecondUrls:
urlpatterns = [path('second/', empty_response, name='second')]
class SetupTestEnvironmentTests(SimpleTestCase):
def test_setup_test_environment_calling_more_than_once(self):
with self.assertRaisesMessage(RuntimeError, "setup_test_environment() was already called"):
setup_test_environment()
def test_allowed_hosts(self):
for type_ in (list, tuple):
with self.subTest(type_=type_):
allowed_hosts = type_('*')
with mock.patch('django.test.utils._TestState') as x:
del x.saved_data
with self.settings(ALLOWED_HOSTS=allowed_hosts):
setup_test_environment()
self.assertEqual(settings.ALLOWED_HOSTS, ['*', 'testserver'])
class OverrideSettingsTests(SimpleTestCase):
# #21518 -- If neither override_settings nor a setting_changed receiver
# clears the URL cache between tests, then one of test_first or
# test_second will fail.
@override_settings(ROOT_URLCONF=FirstUrls)
def test_urlconf_first(self):
reverse('first')
@override_settings(ROOT_URLCONF=SecondUrls)
def test_urlconf_second(self):
reverse('second')
def test_urlconf_cache(self):
with self.assertRaises(NoReverseMatch):
reverse('first')
with self.assertRaises(NoReverseMatch):
reverse('second')
with override_settings(ROOT_URLCONF=FirstUrls):
self.client.get(reverse('first'))
with self.assertRaises(NoReverseMatch):
reverse('second')
with override_settings(ROOT_URLCONF=SecondUrls):
with self.assertRaises(NoReverseMatch):
reverse('first')
self.client.get(reverse('second'))
self.client.get(reverse('first'))
with self.assertRaises(NoReverseMatch):
reverse('second')
with self.assertRaises(NoReverseMatch):
reverse('first')
with self.assertRaises(NoReverseMatch):
reverse('second')
def test_override_media_root(self):
"""
Overriding the MEDIA_ROOT setting should be reflected in the
base_location attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, '')
with self.settings(MEDIA_ROOT='test_value'):
self.assertEqual(default_storage.base_location, 'test_value')
def test_override_media_url(self):
"""
Overriding the MEDIA_URL setting should be reflected in the
base_url attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, '')
with self.settings(MEDIA_URL='/test_value/'):
self.assertEqual(default_storage.base_url, '/test_value/')
def test_override_file_upload_permissions(self):
"""
Overriding the FILE_UPLOAD_PERMISSIONS setting should be reflected in
the file_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.file_permissions_mode, 0o644)
with self.settings(FILE_UPLOAD_PERMISSIONS=0o777):
self.assertEqual(default_storage.file_permissions_mode, 0o777)
def test_override_file_upload_directory_permissions(self):
"""
Overriding the FILE_UPLOAD_DIRECTORY_PERMISSIONS setting should be
reflected in the directory_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertIsNone(default_storage.directory_permissions_mode)
with self.settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777):
self.assertEqual(default_storage.directory_permissions_mode, 0o777)
def test_override_database_routers(self):
"""
Overriding DATABASE_ROUTERS should update the master router.
"""
test_routers = [object()]
with self.settings(DATABASE_ROUTERS=test_routers):
self.assertEqual(router.routers, test_routers)
def test_override_static_url(self):
"""
Overriding the STATIC_URL setting should be reflected in the
base_url attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_URL='/test/'):
self.assertEqual(staticfiles_storage.base_url, '/test/')
def test_override_static_root(self):
"""
Overriding the STATIC_ROOT setting should be reflected in the
location attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_ROOT='/tmp/test'):
self.assertEqual(staticfiles_storage.location, os.path.abspath('/tmp/test'))
def test_override_staticfiles_storage(self):
"""
Overriding the STATICFILES_STORAGE setting should be reflected in
the value of django.contrib.staticfiles.storage.staticfiles_storage.
"""
new_class = 'ManifestStaticFilesStorage'
new_storage = 'django.contrib.staticfiles.storage.' + new_class
with self.settings(STATICFILES_STORAGE=new_storage):
self.assertEqual(staticfiles_storage.__class__.__name__, new_class)
def test_override_staticfiles_finders(self):
"""
Overriding the STATICFILES_FINDERS setting should be reflected in
the return value of django.contrib.staticfiles.finders.get_finders.
"""
current = get_finders()
self.assertGreater(len(list(current)), 1)
finders = ['django.contrib.staticfiles.finders.FileSystemFinder']
with self.settings(STATICFILES_FINDERS=finders):
self.assertEqual(len(list(get_finders())), len(finders))
def test_override_staticfiles_dirs(self):
"""
Overriding the STATICFILES_DIRS setting should be reflected in
the locations attribute of the
django.contrib.staticfiles.finders.FileSystemFinder instance.
"""
finder = get_finder('django.contrib.staticfiles.finders.FileSystemFinder')
test_path = '/tmp/test'
expected_location = ('', test_path)
self.assertNotIn(expected_location, finder.locations)
with self.settings(STATICFILES_DIRS=[test_path]):
finder = get_finder('django.contrib.staticfiles.finders.FileSystemFinder')
self.assertIn(expected_location, finder.locations)
class TestBadSetUpTestData(TestCase):
"""
An exception in setUpTestData() shouldn't leak a transaction which would
cascade across the rest of the test suite.
"""
class MyException(Exception):
pass
@classmethod
def setUpClass(cls):
try:
super().setUpClass()
except cls.MyException:
cls._in_atomic_block = connection.in_atomic_block
@classmethod
def tearDownClass(Cls):
# override to avoid a second cls._rollback_atomics() which would fail.
# Normal setUpClass() methods won't have exception handling so this
# method wouldn't typically be run.
pass
@classmethod
def setUpTestData(cls):
# Simulate a broken setUpTestData() method.
raise cls.MyException()
def test_failure_in_setUpTestData_should_rollback_transaction(self):
# setUpTestData() should call _rollback_atomics() so that the
# transaction doesn't leak.
self.assertFalse(self._in_atomic_block)
class DisallowedDatabaseQueriesTests(SimpleTestCase):
def test_disallowed_database_connections(self):
expected_message = (
"Database connections to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(AssertionError, expected_message):
connection.connect()
with self.assertRaisesMessage(AssertionError, expected_message):
connection.temporary_connection()
def test_disallowed_database_queries(self):
expected_message = (
"Database queries to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(AssertionError, expected_message):
Car.objects.first()
def test_disallowed_database_chunked_cursor_queries(self):
expected_message = (
"Database queries to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(AssertionError, expected_message):
next(Car.objects.iterator())
class AllowedDatabaseQueriesTests(SimpleTestCase):
databases = {'default'}
def test_allowed_database_queries(self):
Car.objects.first()
def test_allowed_database_chunked_cursor_queries(self):
next(Car.objects.iterator(), None)
class DatabaseAliasTests(SimpleTestCase):
def setUp(self):
self.addCleanup(setattr, self.__class__, 'databases', self.databases)
def test_no_close_match(self):
self.__class__.databases = {'void'}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'void' which is not defined "
"in settings.DATABASES."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_close_match(self):
self.__class__.databases = {'defualt'}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'defualt' which is not defined "
"in settings.DATABASES. Did you mean 'default'?"
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_match(self):
self.__class__.databases = {'default', 'other'}
self.assertEqual(self._validate_databases(), frozenset({'default', 'other'}))
def test_all(self):
self.__class__.databases = '__all__'
self.assertEqual(self._validate_databases(), frozenset(connections))
@isolate_apps('test_utils', attr_name='class_apps')
class IsolatedAppsTests(SimpleTestCase):
def test_installed_apps(self):
self.assertEqual([app_config.label for app_config in self.class_apps.get_app_configs()], ['test_utils'])
def test_class_decoration(self):
class ClassDecoration(models.Model):
pass
self.assertEqual(ClassDecoration._meta.apps, self.class_apps)
@isolate_apps('test_utils', kwarg_name='method_apps')
def test_method_decoration(self, method_apps):
class MethodDecoration(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
def test_context_manager(self):
with isolate_apps('test_utils') as context_apps:
class ContextManager(models.Model):
pass
self.assertEqual(ContextManager._meta.apps, context_apps)
@isolate_apps('test_utils', kwarg_name='method_apps')
def test_nested(self, method_apps):
class MethodDecoration(models.Model):
pass
with isolate_apps('test_utils') as context_apps:
class ContextManager(models.Model):
pass
with isolate_apps('test_utils') as nested_context_apps:
class NestedContextManager(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
self.assertEqual(ContextManager._meta.apps, context_apps)
self.assertEqual(NestedContextManager._meta.apps, nested_context_apps)
class DoNothingDecorator(TestContextDecorator):
def enable(self):
pass
def disable(self):
pass
class TestContextDecoratorTests(SimpleTestCase):
@mock.patch.object(DoNothingDecorator, 'disable')
def test_exception_in_setup(self, mock_disable):
"""An exception is setUp() is reraised after disable() is called."""
class ExceptionInSetUp(unittest.TestCase):
def setUp(self):
raise NotImplementedError('reraised')
decorator = DoNothingDecorator()
decorated_test_class = decorator.__call__(ExceptionInSetUp)()
self.assertFalse(mock_disable.called)
with self.assertRaisesMessage(NotImplementedError, 'reraised'):
decorated_test_class.setUp()
self.assertTrue(mock_disable.called)
|
ff002e3169d753433f2384acbb1f68662fe26c00b3dc47d975f609ba7ddff5e7 | import decimal
import enum
import json
import unittest
import uuid
from django import forms
from django.core import checks, exceptions, serializers, validators
from django.core.exceptions import FieldError
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TransactionTestCase, modify_settings, override_settings
from django.test.utils import isolate_apps
from django.utils import timezone
from . import (
PostgreSQLSimpleTestCase, PostgreSQLTestCase, PostgreSQLWidgetTestCase,
)
from .models import (
ArrayEnumModel, ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel,
IntegerArrayModel, NestedIntegerArrayModel, NullableIntegerArrayModel,
OtherTypesArrayModel, PostgreSQLModel, Tag,
)
try:
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import (
SimpleArrayField, SplitArrayField, SplitArrayWidget,
)
from psycopg2.extras import NumericRange
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertIsNone(loaded.field)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=[Tag(1), Tag(2), Tag(3)],
json=[{'a': 1}, {'b': 2}],
int_ranges=[NumericRange(10, 20), NumericRange(30, 40)],
bigint_ranges=[
NumericRange(7000000000, 10000000000),
NumericRange(50000000000, 70000000000),
]
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
self.assertEqual(instance.tags, loaded.tags)
self.assertEqual(instance.json, loaded.json)
self.assertEqual(instance.int_ranges, loaded.int_ranges)
self.assertEqual(instance.bigint_ranges, loaded.bigint_ranges)
def test_null_from_db_value_handling(self):
instance = OtherTypesArrayModel.objects.create(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=None,
)
instance.refresh_from_db()
self.assertIsNone(instance.tags)
self.assertEqual(instance.json, [])
self.assertIsNone(instance.int_ranges)
self.assertIsNone(instance.bigint_ranges)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field('field')
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
class TestQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.objs = NullableIntegerArrayModel.objects.bulk_create([
NullableIntegerArrayModel(field=[1]),
NullableIntegerArrayModel(field=[2]),
NullableIntegerArrayModel(field=[2, 3]),
NullableIntegerArrayModel(field=[20, 30, 40]),
NullableIntegerArrayModel(field=None),
])
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=['text'])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=['text']),
[instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]),
[instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
def test_in_subquery(self):
IntegerArrayModel.objects.create(field=[2, 3])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__in=IntegerArrayModel.objects.all().values_list('field', flat=True)
),
self.objs[2:3]
)
@unittest.expectedFailure
def test_in_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[models.F('id')]]),
self.objs[:2]
)
def test_in_as_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[models.F('field')]),
self.objs[:4]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
@unittest.expectedFailure
def test_contained_by_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[models.F('id'), 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_icontains(self):
# Using the __icontains lookup with ArrayField is inefficient.
instance = CharArrayModel.objects.create(field=['FoO'])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__icontains='foo'),
[instance]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=['text']),
[]
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=['text']),
[]
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=['text']),
[]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2),
self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3),
self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1),
[instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]),
[instance]
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3]
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2),
self.objs[0:3]
)
def test_len_empty_array(self):
obj = NullableIntegerArrayModel.objects.create(field=[])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len=0),
[obj]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]),
self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]),
self.objs[2:3]
)
def test_order_by_slice(self):
more_objs = (
NullableIntegerArrayModel.objects.create(field=[1, 637]),
NullableIntegerArrayModel.objects.create(field=[2, 1]),
NullableIntegerArrayModel.objects.create(field=[3, -98123]),
NullableIntegerArrayModel.objects.create(field=[4, 2]),
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.order_by('field__1'),
[
more_objs[2], more_objs[1], more_objs[3], self.objs[2],
self.objs[3], more_objs[0], self.objs[4], self.objs[1],
self.objs[0],
]
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]),
[instance]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]]
)
def test_enum_lookup(self):
class TestEnum(enum.Enum):
VALUE_1 = 'value_1'
instance = ArrayEnumModel.objects.create(array_of_enums=[TestEnum.VALUE_1])
self.assertSequenceEqual(
ArrayEnumModel.objects.filter(array_of_enums__contains=[TestEnum.VALUE_1]),
[instance]
)
def test_unsupported_lookup(self):
msg = "Unsupported lookup '0_bar' for ArrayField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0_bar=[2]))
msg = "Unsupported lookup '0bar' for ArrayField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0bar=[2]))
class TestDateTimeExactQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
now = timezone.now()
cls.datetimes = [now]
cls.dates = [now.date()]
cls.times = [now.time()]
cls.objs = [
DateTimeArrayModel.objects.create(datetimes=cls.datetimes, dates=cls.dates, times=cls.times),
]
def test_exact_datetimes(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(datetimes=self.datetimes),
self.objs
)
def test_exact_dates(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(dates=self.dates),
self.objs
)
def test_exact_times(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(times=self.times),
self.objs
)
class TestOtherTypesExactQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.ips = ['192.168.0.1', '::1']
cls.uuids = [uuid.uuid4()]
cls.decimals = [decimal.Decimal(1.25), 1.75]
cls.tags = [Tag(1), Tag(2), Tag(3)]
cls.objs = [
OtherTypesArrayModel.objects.create(
ips=cls.ips,
uuids=cls.uuids,
decimals=cls.decimals,
tags=cls.tags,
)
]
def test_exact_ip_addresses(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(ips=self.ips),
self.objs
)
def test_exact_uuids(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(uuids=self.uuids),
self.objs
)
def test_exact_decimals(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(decimals=self.decimals),
self.objs
)
def test_exact_tags(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(tags=self.tags),
self.objs
)
@isolate_apps('postgres_tests')
class TestChecks(PostgreSQLSimpleTestCase):
def test_field_checks(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField())
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
def test_invalid_base_fields(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel'))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E002')
def test_invalid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=[])
model = MyModel()
self.assertEqual(model.check(), [
checks.Warning(
msg=(
"ArrayField default should be a callable instead of an "
"instance so that it's not shared between all field "
"instances."
),
hint='Use a callable instead, e.g., use `list` instead of `[]`.',
obj=MyModel._meta.get_field('field'),
id='postgres.E003',
)
])
def test_valid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=list)
model = MyModel()
self.assertEqual(model.check(), [])
def test_valid_default_none(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=None)
model = MyModel()
self.assertEqual(model.check(), [])
def test_nested_field_checks(self):
"""
Nested ArrayFields are permitted.
"""
class MyModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.CharField()))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ['postgres_tests']
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
self.assertIsNot(new.base_field, field.base_field)
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField')
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass')
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
})
def test_adding_field_with_default(self):
# See #22962
table_name = 'postgres_tests_integerarraydefaultmodel'
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
})
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes.
"""
table_name = 'postgres_tests_chartextarrayindexmodel'
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
like_constraint_columns_list = [
v['columns']
for k, v in list(connection.introspection.get_constraints(cursor, table_name).items())
if k.endswith('_like')
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_columns_list, [['char2']])
# All fields should have regular indexes.
with connection.cursor() as cursor:
indexes = [
c['columns'][0]
for c in connection.introspection.get_constraints(cursor, table_name).values()
if c['index'] and len(c['columns']) == 1
]
self.assertIn('char', indexes)
self.assertIn('char2', indexes)
self.assertIn('text', indexes)
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLSimpleTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2, None])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, [1, 2, None])
class TestValidation(PostgreSQLSimpleTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
'Item 2 in the array did not validate: This field cannot be null.'
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.')
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, 'nested_array_mismatch')
self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.')
def test_with_base_field_error_params(self):
field = ArrayField(models.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc'], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 1 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 1, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
def test_with_validators(self):
field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)]))
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 1 in the array did not validate: Ensure this value is greater than or equal to 1.'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 1, 'value': 0, 'limit_value': 1, 'show_value': 0})
class TestSimpleFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean('a,b,c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,9')
self.assertEqual(cm.exception.messages[0], 'Item 1 in the array did not validate: Enter a whole number.')
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,')
self.assertEqual(cm.exception.messages[0], 'Item 3 in the array did not validate: This field is required.')
def test_validate_fail_base_field_error_params(self):
field = SimpleArrayField(forms.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('abc,c,defg')
errors = cm.exception.error_list
self.assertEqual(len(errors), 2)
first_error = errors[0]
self.assertEqual(
first_error.message,
'Item 1 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(first_error.code, 'item_invalid')
self.assertEqual(first_error.params, {'nth': 1, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
second_error = errors[1]
self.assertEqual(
second_error.message,
'Item 3 in the array did not validate: Ensure this value has at most 2 characters (it has 4).'
)
self.assertEqual(second_error.code, 'item_invalid')
self.assertEqual(second_error.params, {'nth': 3, 'value': 'defg', 'limit_value': 2, 'show_value': 4})
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField('[a-e]{2}'))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,bc,de')
self.assertEqual(cm.exception.messages[0], 'Item 1 in the array did not validate: Enter a valid value.')
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter='|')
value = field.clean('a|b|c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|')
value = field.clean('a,b|c,d')
self.assertEqual(value, [['a', 'b'], ['c', 'd']])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(['a', 'b', 'c'])
self.assertEqual(value, 'a,b,c')
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.')
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.')
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('')
self.assertEqual(cm.exception.messages[0], 'This field is required.')
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
def test_model_field_choices(self):
model_field = ArrayField(models.IntegerField(choices=((1, 'A'), (2, 'B'))))
form_field = model_field.formfield()
self.assertEqual(form_field.clean('1,2'), [1, 2])
def test_already_converted_value(self):
field = SimpleArrayField(forms.CharField())
vals = ['a', 'b', 'c']
self.assertEqual(field.clean(vals), vals)
def test_has_changed(self):
field = SimpleArrayField(forms.IntegerField())
self.assertIs(field.has_changed([1, 2], [1, 2]), False)
self.assertIs(field.has_changed([1, 2], '1,2'), False)
self.assertIs(field.has_changed([1, 2], '1,2,3'), True)
self.assertIs(field.has_changed([1, 2], 'a,b'), True)
def test_has_changed_empty(self):
field = SimpleArrayField(forms.CharField())
self.assertIs(field.has_changed(None, None), False)
self.assertIs(field.has_changed(None, ''), False)
self.assertIs(field.has_changed(None, []), False)
self.assertIs(field.has_changed([], None), False)
self.assertIs(field.has_changed([], ''), False)
class TestSplitFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {'array_0': '', 'array_1': '', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['This field is required.']})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True)
data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']})
def test_remove_trailing_nulls_not_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False),
size=2,
remove_trailing_nulls=True,
required=False,
)
data = {'array_0': '', 'array_1': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': []})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['Item 3 in the array did not validate: This field is required.']})
def test_invalid_integer(self):
msg = 'Item 2 in the array did not validate: Ensure this value is less than or equal to 100.'
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
# To locate the widget's template.
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'})
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(str(SplitForm()), '''
<tr>
<th><label for="id_array_0">Array:</label></th>
<td>
<input id="id_array_0" name="array_0" type="text" required>
<input id="id_array_1" name="array_1" type="text" required>
<input id="id_array_2" name="array_2" type="text" required>
</td>
</tr>
''')
def test_invalid_char_length(self):
field = SplitArrayField(forms.CharField(max_length=2), size=3)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc', 'c', 'defg'])
self.assertEqual(cm.exception.messages, [
'Item 1 in the array did not validate: Ensure this value has at most 2 characters (it has 3).',
'Item 3 in the array did not validate: Ensure this value has at most 2 characters (it has 4).',
])
def test_splitarraywidget_value_omitted_from_data(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ('field',)
form = Form({'field_0': '1', 'field_1': '2'})
self.assertEqual(form.errors, {})
obj = form.save(commit=False)
self.assertEqual(obj.field, [1, 2])
class TestSplitFormWidget(PostgreSQLWidgetTestCase):
def test_get_context(self):
self.assertEqual(
SplitArrayWidget(forms.TextInput(), size=2).get_context('name', ['val1', 'val2']),
{
'widget': {
'name': 'name',
'is_hidden': False,
'required': False,
'value': "['val1', 'val2']",
'attrs': {},
'template_name': 'postgres/widgets/split_array.html',
'subwidgets': [
{
'name': 'name_0',
'is_hidden': False,
'required': False,
'value': 'val1',
'attrs': {},
'template_name': 'django/forms/widgets/text.html',
'type': 'text',
},
{
'name': 'name_1',
'is_hidden': False,
'required': False,
'value': 'val2',
'attrs': {},
'template_name': 'django/forms/widgets/text.html',
'type': 'text',
},
]
}
}
)
def test_render(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2), 'array', None,
"""
<input name="array_0" type="text">
<input name="array_1" type="text">
"""
)
def test_render_attrs(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
'array', ['val1', 'val2'], attrs={'id': 'foo'},
html=(
"""
<input id="foo_0" name="array_0" type="text" value="val1">
<input id="foo_1" name="array_1" type="text" value="val2">
"""
)
)
def test_value_omitted_from_data(self):
widget = SplitArrayWidget(forms.TextInput(), size=2)
self.assertIs(widget.value_omitted_from_data({}, {}, 'field'), True)
self.assertIs(widget.value_omitted_from_data({'field_0': 'value'}, {}, 'field'), False)
self.assertIs(widget.value_omitted_from_data({'field_1': 'value'}, {}, 'field'), False)
self.assertIs(widget.value_omitted_from_data({'field_0': 'value', 'field_1': 'value'}, {}, 'field'), False)
|
b3309f43631b5e920e42f71a6bc13b1c29e7a9c71601912cd954f01c7a4cc929 | from urllib.parse import urlencode
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.test import Client
from django.test.client import CONTENT_TYPE_RE
class CustomTestException(Exception):
pass
def no_template_view(request):
"A simple view that expects a GET request, and returns a rendered template"
return HttpResponse("No template used. Sample content: twice once twice. Content ends.")
def staff_only_view(request):
"A view that can only be visited by staff. Non staff members get an exception"
if request.user.is_staff:
return HttpResponse()
else:
raise CustomTestException()
@login_required
def get_view(request):
"A simple login protected view"
return HttpResponse("Hello world")
def request_data(request, template='base.html', data='sausage'):
"A simple view that returns the request data in the context"
return render(request, template, {
'get-foo': request.GET.get('foo'),
'get-bar': request.GET.get('bar'),
'post-foo': request.POST.get('foo'),
'post-bar': request.POST.get('bar'),
'data': data,
})
def view_with_argument(request, name):
"""A view that takes a string argument
The purpose of this view is to check that if a space is provided in
the argument, the test framework unescapes the %20 before passing
the value to the view.
"""
if name == 'Arthur Dent':
return HttpResponse('Hi, Arthur')
else:
return HttpResponse('Howdy, %s' % name)
def nested_view(request):
"""
A view that uses test client to call another view.
"""
c = Client()
c.get("/no_template_view/")
return render(request, 'base.html', {'nested': 'yes'})
@login_required
def login_protected_redirect_view(request):
"A view that redirects all requests to the GET view"
return HttpResponseRedirect('/get_view/')
def redirect_to_self_with_changing_query_view(request):
query = request.GET.copy()
query['counter'] += '0'
return HttpResponseRedirect('/redirect_to_self_with_changing_query_view/?%s' % urlencode(query))
def set_session_view(request):
"A view that sets a session variable"
request.session['session_var'] = 'YES'
return HttpResponse('set_session')
def check_session_view(request):
"A view that reads a session variable"
return HttpResponse(request.session.get('session_var', 'NO'))
def request_methods_view(request):
"A view that responds with the request method"
return HttpResponse('request method: %s' % request.method)
def return_unicode(request):
return render(request, 'unicode.html')
def return_undecodable_binary(request):
return HttpResponse(
b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e ReportLab Generated PDF document http://www.reportlab.com'
)
def return_json_response(request):
content_type = request.GET.get('content_type')
kwargs = {'content_type': content_type} if content_type else {}
return JsonResponse({'key': 'value'}, **kwargs)
def return_text_file(request):
"A view that parses and returns text as a file."
match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE'])
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
response = HttpResponse(request.body, status=200, content_type='text/plain; charset=%s' % charset)
return response
def check_headers(request):
"A view that responds with value of the X-ARG-CHECK header"
return HttpResponse('HTTP_X_ARG_CHECK: %s' % request.META.get('HTTP_X_ARG_CHECK', 'Undefined'))
def body(request):
"A view that is requested with GET and accesses request.body. Refs #14753."
return HttpResponse(request.body)
def read_all(request):
"A view that is requested with accesses request.read()."
return HttpResponse(request.read())
def read_buffer(request):
"A view that is requested with accesses request.read(LARGE_BUFFER)."
return HttpResponse(request.read(99999))
def request_context_view(request):
# Special attribute that won't be present on a plain HttpRequest
request.special_path = request.path
return render(request, 'request_context.html')
def render_template_multiple_times(request):
"""A view that renders a template multiple times."""
return HttpResponse(
render_to_string('base.html') + render_to_string('base.html'))
|
544938fd28a5182f635fed185ac8b7cc61c0c1bea3fee6856e9702c44dff3950 | from decimal import Decimal
from sys import float_info
from django.test import SimpleTestCase
from django.utils.numberformat import format as nformat
class TestNumberFormat(SimpleTestCase):
def test_format_number(self):
self.assertEqual(nformat(1234, '.'), '1234')
self.assertEqual(nformat(1234.2, '.'), '1234.2')
self.assertEqual(nformat(1234, '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(-1234.33, '.', decimal_pos=1), '-1234.3')
# The use_l10n parameter can force thousand grouping behavior.
with self.settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=True):
self.assertEqual(nformat(1234, '.', grouping=3, thousand_sep=',', use_l10n=False), '1234')
with self.settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=False):
self.assertEqual(nformat(1234, '.', grouping=3, thousand_sep=',', use_l10n=True), '1,234')
def test_format_string(self):
self.assertEqual(nformat('1234', '.'), '1234')
self.assertEqual(nformat('1234.2', '.'), '1234.2')
self.assertEqual(nformat('1234', '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat('-1234.33', '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat('10000', '.', grouping=3, thousand_sep='comma', force_grouping=True), '10comma000')
def test_large_number(self):
most_max = (
'{}179769313486231570814527423731704356798070567525844996'
'598917476803157260780028538760589558632766878171540458953'
'514382464234321326889464182768467546703537516986049910576'
'551282076245490090389328944075868508455133942304583236903'
'222948165808559332123348274797826204144723168738177180919'
'29988125040402618412485836{}'
)
most_max2 = (
'{}35953862697246314162905484746340871359614113505168999'
'31978349536063145215600570775211791172655337563430809179'
'07028764928468642653778928365536935093407075033972099821'
'15310256415249098018077865788815173701691026788460916647'
'38064458963316171186642466965495956524082894463374763543'
'61838599762500808052368249716736'
)
int_max = int(float_info.max)
self.assertEqual(nformat(int_max, '.'), most_max.format('', '8'))
self.assertEqual(nformat(int_max + 1, '.'), most_max.format('', '9'))
self.assertEqual(nformat(int_max * 2, '.'), most_max2.format(''))
self.assertEqual(nformat(0 - int_max, '.'), most_max.format('-', '8'))
self.assertEqual(nformat(-1 - int_max, '.'), most_max.format('-', '9'))
self.assertEqual(nformat(-2 * int_max, '.'), most_max2.format('-'))
def test_float_numbers(self):
# A float without a fractional part (3.) results in a ".0" when no
# deimal_pos is given. Contrast that with the Decimal('3.') case in
# test_decimal_numbers which doesn't return a fractional part.
self.assertEqual(nformat(3., '.'), '3.0')
def test_decimal_numbers(self):
self.assertEqual(nformat(Decimal('1234'), '.'), '1234')
self.assertEqual(nformat(Decimal('1234.2'), '.'), '1234.2')
self.assertEqual(nformat(Decimal('1234'), '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(Decimal('-1234.33'), '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat(Decimal('0.00000001'), '.', decimal_pos=8), '0.00000001')
self.assertEqual(nformat(Decimal('9e-19'), '.', decimal_pos=2), '0.00')
self.assertEqual(nformat(Decimal('.00000000000099'), '.', decimal_pos=0), '0')
self.assertEqual(
nformat(Decimal('1e16'), '.', thousand_sep=',', grouping=3, force_grouping=True),
'10,000,000,000,000,000'
)
self.assertEqual(
nformat(Decimal('1e16'), '.', decimal_pos=2, thousand_sep=',', grouping=3, force_grouping=True),
'10,000,000,000,000,000.00'
)
self.assertEqual(nformat(Decimal('3.'), '.'), '3')
self.assertEqual(nformat(Decimal('3.0'), '.'), '3.0')
# Very large & small numbers.
tests = [
('9e9999', None, '9e+9999'),
('9e9999', 3, '9.000e+9999'),
('9e201', None, '9e+201'),
('9e200', None, '9e+200'),
('1.2345e999', 2, '1.23e+999'),
('9e-999', None, '9e-999'),
('1e-7', 8, '0.00000010'),
('1e-8', 8, '0.00000001'),
('1e-9', 8, '0.00000000'),
('1e-10', 8, '0.00000000'),
('1e-11', 8, '0.00000000'),
('1' + ('0' * 300), 3, '1.000e+300'),
('0.{}1234'.format('0' * 299), 3, '1.234e-300'),
]
for value, decimal_pos, expected_value in tests:
with self.subTest(value=value):
self.assertEqual(nformat(Decimal(value), '.', decimal_pos), expected_value)
def test_decimal_subclass(self):
class EuroDecimal(Decimal):
"""
Wrapper for Decimal which prefixes each amount with the € symbol.
"""
def __format__(self, specifier, **kwargs):
amount = super().__format__(specifier, **kwargs)
return '€ {}'.format(amount)
price = EuroDecimal('1.23')
self.assertEqual(nformat(price, ','), '€ 1,23')
|
697cf961c860f1d02c209db5de03f7d3d5157cadf57e7f4ea5fe7ee5081e2aae | import contextlib
import os
import py_compile
import shutil
import sys
import tempfile
import threading
import time
import weakref
import zipfile
from importlib import import_module
from pathlib import Path
from unittest import mock, skip
from django.apps.registry import Apps
from django.test import SimpleTestCase
from django.test.utils import extend_sys_path
from django.utils import autoreload
from django.utils.autoreload import WatchmanUnavailable
class TestIterModulesAndFiles(SimpleTestCase):
def import_and_cleanup(self, name):
import_module(name)
self.addCleanup(lambda: sys.path_importer_cache.clear())
self.addCleanup(lambda: sys.modules.pop(name, None))
def clear_autoreload_caches(self):
autoreload.iter_modules_and_files.cache_clear()
def assertFileFound(self, filename):
# Some temp directories are symlinks. Python resolves these fully while
# importing.
resolved_filename = filename.resolve()
self.clear_autoreload_caches()
# Test uncached access
self.assertIn(resolved_filename, list(autoreload.iter_all_python_module_files()))
# Test cached access
self.assertIn(resolved_filename, list(autoreload.iter_all_python_module_files()))
self.assertEqual(autoreload.iter_modules_and_files.cache_info().hits, 1)
def assertFileNotFound(self, filename):
resolved_filename = filename.resolve()
self.clear_autoreload_caches()
# Test uncached access
self.assertNotIn(resolved_filename, list(autoreload.iter_all_python_module_files()))
# Test cached access
self.assertNotIn(resolved_filename, list(autoreload.iter_all_python_module_files()))
self.assertEqual(autoreload.iter_modules_and_files.cache_info().hits, 1)
def temporary_file(self, filename):
dirname = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, dirname)
return Path(dirname) / filename
def test_paths_are_pathlib_instances(self):
for filename in autoreload.iter_all_python_module_files():
self.assertIsInstance(filename, Path)
def test_file_added(self):
"""
When a file is added, it's returned by iter_all_python_module_files().
"""
filename = self.temporary_file('test_deleted_removed_module.py')
filename.touch()
with extend_sys_path(str(filename.parent)):
self.import_and_cleanup('test_deleted_removed_module')
self.assertFileFound(filename.absolute())
def test_check_errors(self):
"""
When a file containing an error is imported in a function wrapped by
check_errors(), gen_filenames() returns it.
"""
filename = self.temporary_file('test_syntax_error.py')
filename.write_text("Ceci n'est pas du Python.")
with extend_sys_path(str(filename.parent)):
with self.assertRaises(SyntaxError):
autoreload.check_errors(import_module)('test_syntax_error')
self.assertFileFound(filename)
def test_check_errors_catches_all_exceptions(self):
"""
Since Python may raise arbitrary exceptions when importing code,
check_errors() must catch Exception, not just some subclasses.
"""
filename = self.temporary_file('test_exception.py')
filename.write_text('raise Exception')
with extend_sys_path(str(filename.parent)):
with self.assertRaises(Exception):
autoreload.check_errors(import_module)('test_exception')
self.assertFileFound(filename)
def test_zip_reload(self):
"""
Modules imported from zipped files have their archive location included
in the result.
"""
zip_file = self.temporary_file('zip_import.zip')
with zipfile.ZipFile(str(zip_file), 'w', zipfile.ZIP_DEFLATED) as zipf:
zipf.writestr('test_zipped_file.py', '')
with extend_sys_path(str(zip_file)):
self.import_and_cleanup('test_zipped_file')
self.assertFileFound(zip_file)
def test_bytecode_conversion_to_source(self):
""".pyc and .pyo files are included in the files list."""
filename = self.temporary_file('test_compiled.py')
filename.touch()
compiled_file = Path(py_compile.compile(str(filename), str(filename.with_suffix('.pyc'))))
filename.unlink()
with extend_sys_path(str(compiled_file.parent)):
self.import_and_cleanup('test_compiled')
self.assertFileFound(compiled_file)
def test_weakref_in_sys_module(self):
"""iter_all_python_module_file() ignores weakref modules."""
time_proxy = weakref.proxy(time)
sys.modules['time_proxy'] = time_proxy
self.addCleanup(lambda: sys.modules.pop('time_proxy', None))
list(autoreload.iter_all_python_module_files()) # No crash.
class TestCommonRoots(SimpleTestCase):
def test_common_roots(self):
paths = (
Path('/first/second'),
Path('/first/second/third'),
Path('/first/'),
Path('/root/first/'),
)
results = autoreload.common_roots(paths)
self.assertCountEqual(results, [Path('/first/'), Path('/root/first/')])
class TestSysPathDirectories(SimpleTestCase):
def setUp(self):
self._directory = tempfile.TemporaryDirectory()
self.directory = Path(self._directory.name).resolve().absolute()
self.file = self.directory / 'test'
self.file.touch()
def tearDown(self):
self._directory.cleanup()
def test_sys_paths_with_directories(self):
with extend_sys_path(str(self.file)):
paths = list(autoreload.sys_path_directories())
self.assertIn(self.file.parent, paths)
def test_sys_paths_non_existing(self):
nonexistent_file = Path(self.directory.name) / 'does_not_exist'
with extend_sys_path(str(nonexistent_file)):
paths = list(autoreload.sys_path_directories())
self.assertNotIn(nonexistent_file, paths)
self.assertNotIn(nonexistent_file.parent, paths)
def test_sys_paths_absolute(self):
paths = list(autoreload.sys_path_directories())
self.assertTrue(all(p.is_absolute() for p in paths))
def test_sys_paths_directories(self):
with extend_sys_path(str(self.directory)):
paths = list(autoreload.sys_path_directories())
self.assertIn(self.directory, paths)
class GetReloaderTests(SimpleTestCase):
@mock.patch('django.utils.autoreload.WatchmanReloader')
def test_watchman_unavailable(self, mocked_watchman):
mocked_watchman.check_availability.side_effect = WatchmanUnavailable
self.assertIsInstance(autoreload.get_reloader(), autoreload.StatReloader)
@mock.patch.object(autoreload.WatchmanReloader, 'check_availability')
def test_watchman_available(self, mocked_available):
# If WatchmanUnavailable isn't raised, Watchman will be chosen.
mocked_available.return_value = None
result = autoreload.get_reloader()
self.assertIsInstance(result, autoreload.WatchmanReloader)
class RunWithReloaderTests(SimpleTestCase):
@mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'true'})
@mock.patch('django.utils.autoreload.get_reloader')
def test_swallows_keyboard_interrupt(self, mocked_get_reloader):
mocked_get_reloader.side_effect = KeyboardInterrupt()
autoreload.run_with_reloader(lambda: None) # No exception
@mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'false'})
@mock.patch('django.utils.autoreload.restart_with_reloader')
def test_calls_sys_exit(self, mocked_restart_reloader):
mocked_restart_reloader.return_value = 1
with self.assertRaises(SystemExit) as exc:
autoreload.run_with_reloader(lambda: None)
self.assertEqual(exc.exception.code, 1)
@mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'true'})
@mock.patch('django.utils.autoreload.start_django')
@mock.patch('django.utils.autoreload.get_reloader')
def test_calls_start_django(self, mocked_reloader, mocked_start_django):
mocked_reloader.return_value = mock.sentinel.RELOADER
autoreload.run_with_reloader(mock.sentinel.METHOD)
self.assertEqual(mocked_start_django.call_count, 1)
self.assertSequenceEqual(
mocked_start_django.call_args[0],
[mock.sentinel.RELOADER, mock.sentinel.METHOD]
)
class StartDjangoTests(SimpleTestCase):
@mock.patch('django.utils.autoreload.StatReloader')
def test_watchman_becomes_unavailable(self, mocked_stat):
mocked_stat.should_stop.return_value = True
fake_reloader = mock.MagicMock()
fake_reloader.should_stop = False
fake_reloader.run.side_effect = autoreload.WatchmanUnavailable()
autoreload.start_django(fake_reloader, lambda: None)
self.assertEqual(mocked_stat.call_count, 1)
@mock.patch('django.utils.autoreload.ensure_echo_on')
def test_echo_on_called(self, mocked_echo):
fake_reloader = mock.MagicMock()
autoreload.start_django(fake_reloader, lambda: None)
self.assertEqual(mocked_echo.call_count, 1)
@mock.patch('django.utils.autoreload.check_errors')
def test_check_errors_called(self, mocked_check_errors):
fake_method = mock.MagicMock(return_value=None)
fake_reloader = mock.MagicMock()
autoreload.start_django(fake_reloader, fake_method)
self.assertCountEqual(mocked_check_errors.call_args[0], [fake_method])
@mock.patch('threading.Thread')
@mock.patch('django.utils.autoreload.check_errors')
def test_starts_thread_with_args(self, mocked_check_errors, mocked_thread):
fake_reloader = mock.MagicMock()
fake_main_func = mock.MagicMock()
fake_thread = mock.MagicMock()
mocked_check_errors.return_value = fake_main_func
mocked_thread.return_value = fake_thread
autoreload.start_django(fake_reloader, fake_main_func, 123, abc=123)
self.assertEqual(mocked_thread.call_count, 1)
self.assertEqual(
mocked_thread.call_args[1],
{'target': fake_main_func, 'args': (123,), 'kwargs': {'abc': 123}}
)
self.assertSequenceEqual(fake_thread.setDaemon.call_args[0], [True])
self.assertTrue(fake_thread.start.called)
class TestCheckErrors(SimpleTestCase):
def test_mutates_error_files(self):
fake_method = mock.MagicMock(side_effect=RuntimeError())
wrapped = autoreload.check_errors(fake_method)
with mock.patch.object(autoreload, '_error_files') as mocked_error_files:
with self.assertRaises(RuntimeError):
wrapped()
self.assertEqual(mocked_error_files.append.call_count, 1)
class TestRaiseLastException(SimpleTestCase):
@mock.patch('django.utils.autoreload._exception', None)
def test_no_exception(self):
# Should raise no exception if _exception is None
autoreload.raise_last_exception()
def test_raises_exception(self):
class MyException(Exception):
pass
# Create an exception
try:
raise MyException('Test Message')
except MyException:
exc_info = sys.exc_info()
with mock.patch('django.utils.autoreload._exception', exc_info):
with self.assertRaises(MyException, msg='Test Message'):
autoreload.raise_last_exception()
class RestartWithReloaderTests(SimpleTestCase):
executable = '/usr/bin/python'
def patch_autoreload(self, argv):
patch_call = mock.patch('django.utils.autoreload.subprocess.call', return_value=0)
patches = [
mock.patch('django.utils.autoreload.sys.argv', argv),
mock.patch('django.utils.autoreload.sys.executable', self.executable),
mock.patch('django.utils.autoreload.sys.warnoptions', ['all']),
]
for p in patches:
p.start()
self.addCleanup(p.stop)
mock_call = patch_call.start()
self.addCleanup(patch_call.stop)
return mock_call
def test_manage_py(self):
argv = ['./manage.py', 'runserver']
mock_call = self.patch_autoreload(argv)
autoreload.restart_with_reloader()
self.assertEqual(mock_call.call_count, 1)
self.assertEqual(mock_call.call_args[0][0], [self.executable, '-Wall'] + argv)
def test_python_m_django(self):
main = '/usr/lib/pythonX.Y/site-packages/django/__main__.py'
argv = [main, 'runserver']
mock_call = self.patch_autoreload(argv)
with mock.patch('django.__main__.__file__', main):
autoreload.restart_with_reloader()
self.assertEqual(mock_call.call_count, 1)
self.assertEqual(mock_call.call_args[0][0], [self.executable, '-Wall', '-m', 'django'] + argv[1:])
class ReloaderTests(SimpleTestCase):
RELOADER_CLS = None
def setUp(self):
self._tempdir = tempfile.TemporaryDirectory()
self.tempdir = Path(self._tempdir.name).resolve().absolute()
self.existing_file = self.ensure_file(self.tempdir / 'test.py')
self.nonexistent_file = (self.tempdir / 'does_not_exist.py').absolute()
self.reloader = self.RELOADER_CLS()
def tearDown(self):
self._tempdir.cleanup()
self.reloader.stop()
def ensure_file(self, path):
path.parent.mkdir(exist_ok=True, parents=True)
path.touch()
# On Linux and Windows updating the mtime of a file using touch() will set a timestamp
# value that is in the past, as the time value for the last kernel tick is used rather
# than getting the correct absolute time.
# To make testing simpler set the mtime to be the observed time when this function is
# called.
self.set_mtime(path, time.time())
return path.absolute()
def set_mtime(self, fp, value):
os.utime(str(fp), (value, value))
def increment_mtime(self, fp, by=1):
current_time = time.time()
self.set_mtime(fp, current_time + by)
@contextlib.contextmanager
def tick_twice(self):
ticker = self.reloader.tick()
next(ticker)
yield
next(ticker)
class IntegrationTests:
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_file(self, mocked_modules, notify_mock):
self.reloader.watch_file(self.existing_file)
with self.tick_twice():
self.increment_mtime(self.existing_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [self.existing_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_nonexistent_file(self, mocked_modules, notify_mock):
self.reloader.watch_file(self.nonexistent_file)
with self.tick_twice():
self.ensure_file(self.nonexistent_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [self.nonexistent_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_nonexistent_file_in_non_existing_directory(self, mocked_modules, notify_mock):
non_existing_directory = self.tempdir / 'non_existing_dir'
nonexistent_file = non_existing_directory / 'test'
self.reloader.watch_file(nonexistent_file)
with self.tick_twice():
self.ensure_file(nonexistent_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [nonexistent_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_glob(self, mocked_modules, notify_mock):
non_py_file = self.ensure_file(self.tempdir / 'non_py_file')
self.reloader.watch_dir(self.tempdir, '*.py')
with self.tick_twice():
self.increment_mtime(non_py_file)
self.increment_mtime(self.existing_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [self.existing_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_glob_non_existing_directory(self, mocked_modules, notify_mock):
non_existing_directory = self.tempdir / 'does_not_exist'
nonexistent_file = non_existing_directory / 'test.py'
self.reloader.watch_dir(non_existing_directory, '*.py')
with self.tick_twice():
self.ensure_file(nonexistent_file)
self.set_mtime(nonexistent_file, time.time())
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [nonexistent_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_multiple_globs(self, mocked_modules, notify_mock):
self.ensure_file(self.tempdir / 'x.test')
self.reloader.watch_dir(self.tempdir, '*.py')
self.reloader.watch_dir(self.tempdir, '*.test')
with self.tick_twice():
self.increment_mtime(self.existing_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [self.existing_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_overlapping_globs(self, mocked_modules, notify_mock):
self.reloader.watch_dir(self.tempdir, '*.py')
self.reloader.watch_dir(self.tempdir, '*.p*')
with self.tick_twice():
self.increment_mtime(self.existing_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [self.existing_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_glob_recursive(self, mocked_modules, notify_mock):
non_py_file = self.ensure_file(self.tempdir / 'dir' / 'non_py_file')
py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py')
self.reloader.watch_dir(self.tempdir, '**/*.py')
with self.tick_twice():
self.increment_mtime(non_py_file)
self.increment_mtime(py_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [py_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_multiple_recursive_globs(self, mocked_modules, notify_mock):
non_py_file = self.ensure_file(self.tempdir / 'dir' / 'test.txt')
py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py')
self.reloader.watch_dir(self.tempdir, '**/*.txt')
self.reloader.watch_dir(self.tempdir, '**/*.py')
with self.tick_twice():
self.increment_mtime(non_py_file)
self.increment_mtime(py_file)
self.assertEqual(notify_mock.call_count, 2)
self.assertCountEqual(notify_mock.call_args_list, [mock.call(py_file), mock.call(non_py_file)])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_nested_glob_recursive(self, mocked_modules, notify_mock):
inner_py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py')
self.reloader.watch_dir(self.tempdir, '**/*.py')
self.reloader.watch_dir(inner_py_file.parent, '**/*.py')
with self.tick_twice():
self.increment_mtime(inner_py_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [inner_py_file])
@mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed')
@mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset())
def test_overlapping_glob_recursive(self, mocked_modules, notify_mock):
py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py')
self.reloader.watch_dir(self.tempdir, '**/*.p*')
self.reloader.watch_dir(self.tempdir, '**/*.py*')
with self.tick_twice():
self.increment_mtime(py_file)
self.assertEqual(notify_mock.call_count, 1)
self.assertCountEqual(notify_mock.call_args[0], [py_file])
class BaseReloaderTests(ReloaderTests):
RELOADER_CLS = autoreload.BaseReloader
def test_watch_without_absolute(self):
with self.assertRaisesMessage(ValueError, 'test.py must be absolute.'):
self.reloader.watch_file('test.py')
def test_watch_with_single_file(self):
self.reloader.watch_file(self.existing_file)
watched_files = list(self.reloader.watched_files())
self.assertIn(self.existing_file, watched_files)
def test_watch_with_glob(self):
self.reloader.watch_dir(self.tempdir, '*.py')
watched_files = list(self.reloader.watched_files())
self.assertIn(self.existing_file, watched_files)
def test_watch_files_with_recursive_glob(self):
inner_file = self.ensure_file(self.tempdir / 'test' / 'test.py')
self.reloader.watch_dir(self.tempdir, '**/*.py')
watched_files = list(self.reloader.watched_files())
self.assertIn(self.existing_file, watched_files)
self.assertIn(inner_file, watched_files)
def test_run_loop_catches_stopiteration(self):
def mocked_tick():
yield
with mock.patch.object(self.reloader, 'tick', side_effect=mocked_tick) as tick:
self.reloader.run_loop()
self.assertEqual(tick.call_count, 1)
def test_run_loop_stop_and_return(self):
def mocked_tick(*args):
yield
self.reloader.stop()
return # Raises StopIteration
with mock.patch.object(self.reloader, 'tick', side_effect=mocked_tick) as tick:
self.reloader.run_loop()
self.assertEqual(tick.call_count, 1)
def test_wait_for_apps_ready_checks_for_exception(self):
app_reg = Apps()
app_reg.ready_event.set()
# thread.is_alive() is False if it's not started.
dead_thread = threading.Thread()
self.assertFalse(self.reloader.wait_for_apps_ready(app_reg, dead_thread))
def test_wait_for_apps_ready_without_exception(self):
app_reg = Apps()
app_reg.ready_event.set()
thread = mock.MagicMock()
thread.is_alive.return_value = True
self.assertTrue(self.reloader.wait_for_apps_ready(app_reg, thread))
def skip_unless_watchman_available():
try:
autoreload.WatchmanReloader.check_availability()
except WatchmanUnavailable as e:
return skip('Watchman unavailable: %s' % e)
return lambda func: func
@skip_unless_watchman_available()
class WatchmanReloaderTests(ReloaderTests, IntegrationTests):
RELOADER_CLS = autoreload.WatchmanReloader
def test_watch_glob_ignores_non_existing_directories_two_levels(self):
with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe:
self.reloader._watch_glob(self.tempdir / 'does_not_exist' / 'more', ['*'])
self.assertFalse(mocked_subscribe.called)
def test_watch_glob_uses_existing_parent_directories(self):
with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe:
self.reloader._watch_glob(self.tempdir / 'does_not_exist', ['*'])
self.assertSequenceEqual(
mocked_subscribe.call_args[0],
[
self.tempdir, 'glob-parent-does_not_exist:%s' % self.tempdir,
['anyof', ['match', 'does_not_exist/*', 'wholename']]
]
)
def test_watch_glob_multiple_patterns(self):
with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe:
self.reloader._watch_glob(self.tempdir, ['*', '*.py'])
self.assertSequenceEqual(
mocked_subscribe.call_args[0],
[
self.tempdir, 'glob:%s' % self.tempdir,
['anyof', ['match', '*', 'wholename'], ['match', '*.py', 'wholename']]
]
)
def test_watched_roots_contains_files(self):
paths = self.reloader.watched_roots([self.existing_file])
self.assertIn(self.existing_file.parent, paths)
def test_watched_roots_contains_directory_globs(self):
self.reloader.watch_dir(self.tempdir, '*.py')
paths = self.reloader.watched_roots([])
self.assertIn(self.tempdir, paths)
def test_watched_roots_contains_sys_path(self):
with extend_sys_path(str(self.tempdir)):
paths = self.reloader.watched_roots([])
self.assertIn(self.tempdir, paths)
def test_check_server_status(self):
self.assertTrue(self.reloader.check_server_status())
def test_check_server_status_raises_error(self):
with mock.patch.object(self.reloader.client, 'query') as mocked_query:
mocked_query.side_effect = Exception()
with self.assertRaises(autoreload.WatchmanUnavailable):
self.reloader.check_server_status()
@mock.patch('pywatchman.client')
def test_check_availability(self, mocked_client):
mocked_client().capabilityCheck.side_effect = Exception()
with self.assertRaisesMessage(WatchmanUnavailable, 'Cannot connect to the watchman service'):
self.RELOADER_CLS.check_availability()
@mock.patch('pywatchman.client')
def test_check_availability_lower_version(self, mocked_client):
mocked_client().capabilityCheck.return_value = {'version': '4.8.10'}
with self.assertRaisesMessage(WatchmanUnavailable, 'Watchman 4.9 or later is required.'):
self.RELOADER_CLS.check_availability()
def test_pywatchman_not_available(self):
with mock.patch.object(autoreload, 'pywatchman') as mocked:
mocked.__bool__.return_value = False
with self.assertRaisesMessage(WatchmanUnavailable, 'pywatchman not installed.'):
self.RELOADER_CLS.check_availability()
def test_update_watches_raises_exceptions(self):
class TestException(Exception):
pass
with mock.patch.object(self.reloader, '_update_watches') as mocked_watches:
with mock.patch.object(self.reloader, 'check_server_status') as mocked_server_status:
mocked_watches.side_effect = TestException()
mocked_server_status.return_value = True
with self.assertRaises(TestException):
self.reloader.update_watches()
self.assertIsInstance(mocked_server_status.call_args[0][0], TestException)
class StatReloaderTests(ReloaderTests, IntegrationTests):
RELOADER_CLS = autoreload.StatReloader
def setUp(self):
super().setUp()
# Shorten the sleep time to speed up tests.
self.reloader.SLEEP_TIME = 0.01
def test_snapshot_files_ignores_missing_files(self):
with mock.patch.object(self.reloader, 'watched_files', return_value=[self.nonexistent_file]):
self.assertEqual(dict(self.reloader.snapshot_files()), {})
def test_snapshot_files_updates(self):
with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file]):
snapshot1 = dict(self.reloader.snapshot_files())
self.assertIn(self.existing_file, snapshot1)
self.increment_mtime(self.existing_file)
snapshot2 = dict(self.reloader.snapshot_files())
self.assertNotEqual(snapshot1[self.existing_file], snapshot2[self.existing_file])
def test_does_not_fire_without_changes(self):
with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file]), \
mock.patch.object(self.reloader, 'notify_file_changed') as notifier:
mtime = self.existing_file.stat().st_mtime
initial_snapshot = {self.existing_file: mtime}
second_snapshot = self.reloader.loop_files(initial_snapshot, time.time())
self.assertEqual(second_snapshot, {})
notifier.assert_not_called()
def test_fires_when_created(self):
with mock.patch.object(self.reloader, 'watched_files', return_value=[self.nonexistent_file]), \
mock.patch.object(self.reloader, 'notify_file_changed') as notifier:
self.nonexistent_file.touch()
mtime = self.nonexistent_file.stat().st_mtime
second_snapshot = self.reloader.loop_files({}, mtime - 1)
self.assertCountEqual(second_snapshot.keys(), [self.nonexistent_file])
notifier.assert_called_once_with(self.nonexistent_file)
def test_fires_with_changes(self):
with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file]), \
mock.patch.object(self.reloader, 'notify_file_changed') as notifier:
initial_snapshot = {self.existing_file: 1}
second_snapshot = self.reloader.loop_files(initial_snapshot, time.time())
notifier.assert_called_once_with(self.existing_file)
self.assertCountEqual(second_snapshot.keys(), [self.existing_file])
|
1de0610656242a236ada1c1ee8a668705251489c427ead3d5338c1e1c1f7dc54 | import datetime
import pickle
import unittest
from operator import attrgetter
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import Count, F, Q
from django.db.models.sql.constants import LOUTER
from django.db.models.sql.where import NothingNode, WhereNode
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import (
FK1, Annotation, Article, Author, BaseA, Book, CategoryItem,
CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA,
Classroom, CommonMixedCaseForeignKeys, Company, Cover, CustomPk,
CustomPkTag, Detail, DumbCategory, Eaten, Employment, ExtraInfo, Fan, Food,
Identifier, Individual, Item, Job, JobResponsibilities, Join, LeafA, LeafB,
LoopX, LoopZ, ManagedModel, Member, MixedCaseDbColumnCategoryItem,
MixedCaseFieldCategoryItem, ModelA, ModelB, ModelC, ModelD, MyObject,
NamedCategory, Node, Note, NullableName, Number, ObjectA, ObjectB, ObjectC,
OneToOneCategory, Order, OrderItem, Page, Paragraph, Person, Plaything,
PointerA, Program, ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking,
Related, RelatedIndividual, RelatedObject, Report, ReportComment,
ReservedName, Responsibility, School, SharedConnection, SimpleCategory,
SingleObject, SpecialCategory, Staff, StaffUser, Student, Tag, Task,
Teacher, Ticket21203Child, Ticket21203Parent, Ticket23605A, Ticket23605B,
Ticket23605C, TvChef, Valid, X,
)
class Queries1Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name='t3', parent=cls.t1)
t4 = Tag.objects.create(name='t4', parent=cls.t3)
cls.t5 = Tag.objects.create(name='t5', parent=cls.t3)
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
cls.n3 = Note.objects.create(note='n3', misc='foo', id=3)
ann1 = Annotation.objects.create(name='a1', tag=cls.t1)
ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
ann2.notes.add(n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(info='e2', note=n2, value=41)
e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3)
cls.i1.tags.set([cls.t1, cls.t2])
cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=n2)
cls.i2.tags.set([cls.t1, cls.t3])
cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3)
i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3)
i4.tags.set([t4])
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
Report.objects.create(name='r2', creator=a3)
Report.objects.create(name='r3')
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
Cover.objects.create(title="first", item=i4)
Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'})
self.assertIn('v0', str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn('w0', str(qs4.query).lower())
# So, 'U0."id"' is referenced in SELECT and WHERE twice.
self.assertEqual(str(qs4.query).lower().count('u0.'), 4)
def test_ticket1050(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=True),
['<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__id__isnull=True),
['<Item: three>']
)
def test_ticket1801(self):
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i3),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
['<Author: a2>']
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)),
['<Item: one>']
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)),
[]
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)),
[]
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertQuerysetEqual(list(qs), ['<Author: a2>'])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertQuerysetEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
['<Item: two>']
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3],
['<Item: one>', '<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3],
['<Item: one>', '<Item: two>']
)
def test_tickets_2080_3592(self):
self.assertQuerysetEqual(
Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='one') | Q(name='a3')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(name='a3') | Q(item__name='one')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='three') | Q(report__name='r3')),
['<Author: a2>']
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertQuerysetEqual(
Author.objects.filter(Q(id__in=[]) | Q(id__in=[])),
[]
)
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values('creator').distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name='two').values('creator', 'name').distinct().count(),
4
)
self.assertEqual(
(
Item.objects
.exclude(name='two')
.extra(select={'foo': '%s'}, select_params=(1,))
.values('creator', 'name', 'foo')
.distinct()
.count()
),
4
)
self.assertEqual(
(
Item.objects
.exclude(name='two')
.extra(select={'foo': '%s'}, select_params=(1,))
.values('creator', 'name')
.distinct()
.count()
),
4
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values('creator', 'name').count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
self.assertQuerysetEqual(
q1,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(q2, ['<Item: one>'])
self.assertQuerysetEqual(
(q1 | q2).order_by('name'),
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>'])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertQuerysetEqual(
((q1 & q2) | q3).order_by('name'),
['<Item: four>', '<Item: one>']
)
def test_order_by_tables(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by('name').query
self.assertEqual(len([
t for t in combined_query.alias_map if combined_query.alias_refcount[t]
]), 1)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by('greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(qs.query))
qs = qs.order_by('id')
self.assertNotIn('OUTER JOIN', str(qs.query))
def test_get_clears_ordering(self):
"""
get() should clear ordering for optimization purposes.
"""
with CaptureQueriesContext(connection) as captured_queries:
Author.objects.order_by('name').get(pk=self.a1.pk)
self.assertNotIn('order by', captured_queries[0]['sql'].lower())
def test_tickets_4088_4306(self):
self.assertQuerysetEqual(
Report.objects.filter(creator=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__num=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), [])
self.assertQuerysetEqual(
Report.objects.filter(creator__id=self.a1.id),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__name='a1'),
['<Report: r1>']
)
def test_ticket4510(self):
self.assertQuerysetEqual(
Author.objects.filter(report__name='r1'),
['<Author: a1>']
)
def test_ticket7378(self):
self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>'])
def test_tickets_5324_6704(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__name='t4'),
['<Item: four>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct(),
['<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(),
['<Item: two>', '<Item: three>', '<Item: one>']
)
self.assertQuerysetEqual(
Author.objects.exclude(item__name='one').distinct().order_by('name'),
['<Author: a2>', '<Author: a3>', '<Author: a4>']
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'),
['<Item: three>']
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3))
self.assertEqual(
len([
x for x in qs.query.alias_map.values()
if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]
]),
1
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertQuerysetEqual(
Tag.objects.filter(parent__isnull=True).order_by('name'),
['<Tag: t1>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__isnull=True).order_by('name'),
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
def test_ticket2091(self):
t = Tag.objects.get(name='t4')
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[t]),
['<Item: four>']
)
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = 127
msg = 'Maximum recursion depth exceeded: too many subqueries.'
with self.assertRaisesMessage(RuntimeError, msg):
for i in range(local_recursion_limit * 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases, {
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD',
'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN',
}
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'):
Author.objects.all() & Tag.objects.all()
with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'):
Author.objects.all() | Tag.objects.all()
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4)
self.assertEqual(
Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(),
4
)
def test_ticket2400(self):
self.assertQuerysetEqual(
Author.objects.filter(item__isnull=True),
['<Author: a3>']
)
self.assertQuerysetEqual(
Tag.objects.filter(item__isnull=True),
['<Tag: t5>']
)
def test_ticket2496(self):
self.assertQuerysetEqual(
Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1],
['<Item: four>']
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'):
Note.objects.filter({'note': 'n1', 'misc': 'foo'})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertQuerysetEqual(
Item.objects.order_by('note__note', 'name'),
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertQuerysetEqual(
Author.objects.order_by('extra', '-name'),
['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>']
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertQuerysetEqual(
Cover.objects.all(),
['<Cover: first>', '<Cover: second>']
)
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertQuerysetEqual(
Item.objects.order_by('creator', 'name'),
['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>']
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=False).order_by('tags', 'id'),
['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>']
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by('name')
self.assertQuerysetEqual(
qs,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertEqual(len(qs.query.alias_map), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by('note__note', 'name')
self.assertQuerysetEqual(
qs,
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertEqual(repr(qs[0].note), '<Note: n2>')
self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>')
def test_ticket3037(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')),
['<Item: four>']
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertSequenceEqual(
Note.objects.values('misc').distinct().order_by('note', '-misc'),
[{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}]
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn('note_id', ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertSequenceEqual(ExtraInfo.objects.values('note_id'), [{'note_id': 1}, {'note_id': 2}])
# ...or use the field name.
self.assertSequenceEqual(ExtraInfo.objects.values('note'), [{'note': 1}, {'note': 2}])
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertQuerysetEqual(
Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)),
['<Author: a1>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id),
['<Author: a1>']
)
def test_ticket6981(self):
self.assertQuerysetEqual(
Tag.objects.select_related('parent').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket9926(self):
self.assertQuerysetEqual(
Tag.objects.select_related("parent", "category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.select_related('parent', "parent__category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1)
self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2)
self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2)
self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0))
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(select={'a': 1}),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(select={'a': 1}).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
name = "one"
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7155(self):
# Nullable dates
self.assertQuerysetEqual(
Item.objects.datetimes('modified', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7098(self):
# Make sure semi-deprecated ordering by related models syntax still
# works.
self.assertSequenceEqual(
Item.objects.values('note__note').order_by('queries_note.note', 'id'),
[{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}]
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertQuerysetEqual(
Tag.objects.filter(parent=self.t1, name='t3').order_by('name'),
['<Tag: t3>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
# More twisted cases, involving nested negations.
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one')),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'),
['<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'),
['<Item: four>', '<Item: one>', '<Item: three>']
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(
query2.get_compiler(qs.db).as_sql()[0],
query
)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer('name', 'creator')
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertQuerysetEqual(
self.n1.annotation_set.filter(
Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5)
),
['<Annotation: a1>']
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertQuerysetEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
['<Item: one>', '<Item: two>']
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal='m')
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal='m'), [])
self.assertQuerysetEqual(q.exclude(meal='m'), [])
self.assertQuerysetEqual(q.complex_filter({'pk': 1}), [])
self.assertQuerysetEqual(q.select_related('food'), [])
self.assertQuerysetEqual(q.annotate(Count('food')), [])
self.assertQuerysetEqual(q.order_by('meal', 'food'), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(
q.extra(select={'foo': "1"}),
[]
)
self.assertQuerysetEqual(q.reverse(), [])
q.query.low_mark = 1
with self.assertRaisesMessage(AssertionError, 'Cannot change a query once a slice has been taken'):
q.extra(select={'foo': "1"})
self.assertQuerysetEqual(q.defer('meal'), [])
self.assertQuerysetEqual(q.only('meal'), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(
len(Note.objects.order_by('extrainfo__info').distinct()),
3
)
# Pickling of QuerySets using datetimes() should work.
qs = Item.objects.datetimes('created', 'month')
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertQuerysetEqual(
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')),
['<Tag: t2>', '<Tag: t3>']
)
# Multi-valued values() and values_list() querysets should raise errors.
with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'):
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'):
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertSequenceEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{'id': 1}, {'id': 2}, {'id': 3}]
)
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')),
['<Annotation: a1>']
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
yield n_obj.pk
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertQuerysetEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name='a1')),
['<Author: a1>']
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertQuerysetEqual(
Item.objects.exclude(modified=self.time1).order_by('name'),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__name=self.t1.name),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by('parent__name')), 5)
# Empty querysets can be merged with others.
self.assertQuerysetEqual(
Note.objects.none() | Note.objects.all(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(
Note.objects.all() | Note.objects.none(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), [])
self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket9411(self):
# Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's
# sufficient that this query runs without error.
qs = Tag.objects.values_list('id', flat=True).order_by('id')
qs.query.bump_prefix(qs.query)
first = qs[0]
self.assertEqual(list(qs), list(range(first, first + 5)))
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertQuerysetEqual(
Author.objects.filter(Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name='xyz')),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(report=self.r1, name='xyz') | Q(item__note__extrainfo=self.e2)),
['<Author: a2>']
)
self.assertQuerysetEqual(
Annotation.objects.filter(Q(tag__parent=self.t1) | Q(notes__note='n1', name='a1')),
['<Annotation: a1>']
)
xx = ExtraInfo.objects.create(info='xx', note=self.n3)
self.assertQuerysetEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
['<Note: n1>', '<Note: n3>']
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len([x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias]]),
1
)
def test_ticket17429(self):
"""
Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertQuerysetEqual(
Tag.objects.all(),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
ordered=False
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4'),
[repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | Q(tags__name='t3')))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | ~Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | ~Q(tags__name='t3')))])
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))])
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))])
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertQuerysetEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertQuerysetEqual(q, ['<NamedCategory: Generic>'])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 2)
self.assertNotIn('INNER JOIN', str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>',
'<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 4)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q1 & q2
self.assertQuerysetEqual(q3, [])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
def test_ticket19672(self):
self.assertQuerysetEqual(
Report.objects.filter(Q(creator__isnull=False) & ~Q(creator__extra__value=41)),
['<Report: r1>']
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count('item'))
qs = qs.filter(~Q(extra__value=0)).order_by('name')
self.assertIn('SELECT', str(qs.query))
self.assertQuerysetEqual(
qs,
['<Author: a1>', '<Author: a2>', '<Author: a3>', '<Author: a4>']
)
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, note, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name='generic')
def test_common_mixed_case_foreign_keys(self):
"""
Valid query should be generated when fields fetched from joined tables
include FKs whose names only differ by case.
"""
c1 = SimpleCategory.objects.create(name='c1')
c2 = SimpleCategory.objects.create(name='c2')
c3 = SimpleCategory.objects.create(name='c3')
category = CategoryItem.objects.create(category=c1)
mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(CaTeGoRy=c2)
mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(category=c3)
CommonMixedCaseForeignKeys.objects.create(
category=category,
mixed_case_field_category=mixed_case_field_category,
mixed_case_db_column_category=mixed_case_db_column_category,
)
qs = CommonMixedCaseForeignKeys.objects.values(
'category',
'mixed_case_field_category',
'mixed_case_db_column_category',
'category__category',
'mixed_case_field_category__CaTeGoRy',
'mixed_case_db_column_category__category',
)
self.assertTrue(qs.first())
class Queries2Tests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=4)
Number.objects.create(num=8)
Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertQuerysetEqual(Number.objects.filter(num__lt=4), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertQuerysetEqual(
Number.objects.filter(num__gt=8, num__lt=13),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
['<Number: 8>']
)
def test_ticket12239(self):
# Custom lookups are registered to round float values correctly on gte
# and lt IntegerField queries.
self.assertQuerysetEqual(
Number.objects.filter(num__gt=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gt=12), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.0),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12.0),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), [])
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lte=11.9),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.0),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.9),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(TestCase):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_ticket8683(self):
# An error should be raised when QuerySet.datetimes() is passed the
# wrong type of field.
with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."):
Item.objects.datetimes('name', 'month')
def test_ticket22023(self):
with self.assertRaisesMessage(TypeError, "Cannot call only() after .values() or .values_list()"):
Valid.objects.values().only()
with self.assertRaisesMessage(TypeError, "Cannot call defer() after .values() or .values_list()"):
Valid.objects.values().defer()
class Queries4Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
n1 = Note.objects.create(note='n1', misc='foo')
n2 = Note.objects.create(note='n2', misc='bar')
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
cls.r2 = Report.objects.create(name='r2', creator=cls.a3)
cls.r3 = Report.objects.create(name='r3')
Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=cls.a1)
Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=cls.a3)
def test_ticket24525(self):
tag = Tag.objects.create()
anth100 = tag.note_set.create(note='ANTH', misc='100')
math101 = tag.note_set.create(note='MATH', misc='101')
s1 = tag.annotation_set.create(name='1')
s2 = tag.annotation_set.create(name='2')
s1.notes.set([math101, anth100])
s2.notes.set([math101])
result = math101.annotation_set.all() & tag.annotation_set.exclude(notes__in=[anth100])
self.assertEqual(list(result), [s2])
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
msg = 'Unsaved model instance <NamedCategory: Other> cannot be used in an ORM query.'
with self.assertRaisesMessage(ValueError, msg):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1'))
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1'))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True))
q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by()
q2 = (
Item.objects
.filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1'))
.order_by()
)
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by()
q2 = (
Item.objects.filter(Q(creator__report__name='e1')).order_by() |
Item.objects.filter(Q(creator=self.a1)).order_by()
)
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Joins having identical connections are correctly recreated in the
# rhs query, in case the query is ORed together (#18748).
Report.objects.create(name='r4', creator=self.a1)
q1 = Author.objects.filter(report__name='r5')
q2 = Author.objects.filter(report__name='r4').filter(report__name='r1')
combined = q1 | q2
self.assertEqual(str(combined.query).count('JOIN'), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, 'a1')
def test_join_reuse_order(self):
# Join aliases are reused in order. This shouldn't raise AssertionError
# because change_map contains a circular reference (#26522).
s1 = School.objects.create()
s2 = School.objects.create()
s3 = School.objects.create()
t1 = Teacher.objects.create()
otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1)
qs1 = otherteachers.filter(schools=s1).filter(schools=s2)
qs2 = otherteachers.filter(schools=s1).filter(schools=s3)
self.assertQuerysetEqual(qs1 | qs2, [])
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL.
ManagedModel.objects.create(data='mm1', tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data='mm'), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ''
else:
expected_null_charfield_repr = None
self.assertSequenceEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by("name"),
['e1', 'e2', expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertQuerysetEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
['<Report: r1>', '<Report: r2>', '<Report: r3>']
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, 'd2')
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by('name')
self.assertIn('ORDER BY', qs.query.get_compiler(qs.db).as_sql()[0])
def test_order_by_reverse_fk(self):
# It is possible to order by reverse of foreign key, although that can lead
# to duplicate results.
c1 = SimpleCategory.objects.create(name="category1")
c2 = SimpleCategory.objects.create(name="category2")
CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c1)
self.assertSequenceEqual(SimpleCategory.objects.order_by('categoryitem', 'pk'), [c1, c2, c1])
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])),
[]
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False).order_by('pk')
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True).order_by('pk')
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
class Queries5Tests(TestCase):
@classmethod
def setUpTestData(cls):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
a1 = Author.objects.create(name='a1', num=1001, extra=e1)
a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.rank1 = Ranking.objects.create(rank=2, author=a2)
Ranking.objects.create(rank=1, author=a3)
Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
self.assertQuerysetEqual(
Ranking.objects.all().order_by('rank'),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertQuerysetEqual(
Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank')
qs = Ranking.objects.extra(select={'good': sql})
self.assertEqual(
[o.good for o in qs.extra(order_by=('-good',))],
[True, False, False]
)
self.assertQuerysetEqual(
qs.extra(order_by=('-good', 'id')),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values('id', 'rank').order_by('id')
self.assertEqual(
[d['rank'] for d in dicts],
[2, 1, 3]
)
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank')
qs = Ranking.objects.extra(select={'good': sql})
dicts = qs.values().order_by('id')
for d in dicts:
del d['id']
del d['author_id']
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]]
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=['django_site'])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
author_start = Author.objects.get(name='a1')
ranking_start = Ranking.objects.get(author__name='a1')
# Make sure that the IDs from different tables don't happen to match.
self.assertQuerysetEqual(
Ranking.objects.filter(author__name='a1'),
['<Ranking: 3: a1>']
)
self.assertEqual(
Ranking.objects.filter(author__name='a1').update(rank=4636),
1
)
r = Ranking.objects.get(author__name='a1')
self.assertEqual(r.id, ranking_start.id)
self.assertEqual(r.author.id, author_start.id)
self.assertEqual(r.rank, 4636)
r.rank = 3
r.save()
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertQuerysetEqual(
Note.objects.exclude(Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q() | ~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.exclude(~Q() & ~Q()),
['<Note: n1>', '<Note: n2>']
)
def test_extra_select_literal_percent_s(self):
# Allow %%s to escape select clauses
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s'"})[0].foo,
'%s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s bar %%s'"})[0].foo,
'%s bar %s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'bar %%s'"})[0].foo,
'bar %s'
)
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by('custom'), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
Plaything.objects.create(name="p1")
self.assertQuerysetEqual(
Plaything.objects.all(),
['<Plaything: p1>']
)
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name='s')
r = RelatedObject.objects.create(single=s, f=1)
Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk')
self.assertNotIn('JOIN', str(qs.query))
qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk')
self.assertIn('INNER', str(qs.query))
qs = qs.order_by('others__single__name')
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count('LEFT'), 1)
self.assertEqual(str(qs.query).count('INNER'), 1)
self.assertQuerysetEqual(
qs,
['<Plaything: p2>']
)
class DisjunctiveFilterTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
ExtraInfo.objects.create(info='e1', note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object related to the LeafA we create.
LeafA.objects.create(data='first')
self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>'])
self.assertQuerysetEqual(
LeafA.objects.filter(Q(data='first') | Q(join__b__data='second')),
['<LeafA: first>']
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info='e2')).filter(note=self.n1),
['<ExtraInfo: e1>']
)
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(info='e2') | ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1),
['<ExtraInfo: e1>']
)
class Queries6Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name='t3', parent=cls.t1)
cls.t4 = Tag.objects.create(name='t4', parent=cls.t3)
cls.t5 = Tag.objects.create(name='t5', parent=cls.t3)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
ann1 = Annotation.objects.create(name='a1', tag=cls.t1)
ann1.notes.add(n1)
Annotation.objects.create(name='a2', tag=cls.t4)
def test_parallel_iterators(self):
# Parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), '<Tag: t1>')
self.assertEqual(repr(next(i1)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t1>')
self.assertEqual(repr(next(i2)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t3>')
self.assertEqual(repr(next(i1)), '<Tag: t3>')
qs = X.objects.all()
self.assertFalse(qs)
self.assertFalse(qs)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(
qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'),
2
)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# preemptively discovered cases).
self.assertQuerysetEqual(
PointerA.objects.filter(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
PointerA.objects.exclude(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
Tag.objects.exclude(children=None),
['<Tag: t1>', '<Tag: t3>']
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertQuerysetEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertQuerysetEqual(
Annotation.objects.exclude(tag__children__name="t2"),
['<Annotation: a2>']
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
['<Annotation: a1>']
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by('name')
self.assertIsNot(q1, q1.all())
def test_ticket_11320(self):
qs = Tag.objects.exclude(category=None).exclude(category__name='foo')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 1)
def test_distinct_ordered_sliced_subquery_aggregation(self):
self.assertEqual(Tag.objects.distinct().order_by('category__name')[:3].count(), 3)
def test_multiple_columns_with_the_same_name_slice(self):
self.assertEqual(
list(Tag.objects.order_by('name').values_list('name', 'category__name')[:2]),
[('t1', 'Generic'), ('t2', 'Generic')],
)
self.assertSequenceEqual(
Tag.objects.order_by('name').select_related('category')[:2],
[self.t1, self.t2],
)
self.assertEqual(
list(Tag.objects.order_by('-name').values_list('name', 'parent__name')[:2]),
[('t5', 't3'), ('t4', 't3')],
)
self.assertSequenceEqual(
Tag.objects.order_by('-name').select_related('parent')[:2],
[self.t5, self.t4],
)
class RawQueriesTests(TestCase):
def setUp(self):
Note.objects.create(note='n1', misc='foo', id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ['n1']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>")
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ['n1', 'foo']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>")
class GeneratorExpressionTests(SimpleTestCase):
def test_ticket10432(self):
# Using an empty iterator as the rvalue for an "__in"
# lookup is legal.
self.assertCountEqual(Note.objects.filter(pk__in=iter(())), [])
class ComparisonTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="x%Y"),
['<Item: x%y>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__istartswith="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iendswith="A_b"),
['<Item: a_b>']
)
class ExistsSql(TestCase):
def test_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql']
id, name = connection.ops.quote_name('id'), connection.ops.quote_name('name')
self.assertNotIn(id, qstr)
self.assertNotIn(name, qstr)
def test_ticket_18414(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.exists())
self.assertTrue(Article.objects.distinct().exists())
self.assertTrue(Article.objects.distinct()[1:3].exists())
self.assertFalse(Article.objects.distinct()[1:1].exists())
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_18414_distinct_on(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.distinct('name').exists())
self.assertTrue(Article.objects.distinct('name')[1:2].exists())
self.assertFalse(Article.objects.distinct('name')[2:3].exists())
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertIs(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertIs(Tag.objects.all().ordered, True)
self.assertIs(Tag.objects.all().order_by().ordered, False)
def test_explicit_ordering(self):
self.assertIs(Annotation.objects.all().order_by('id').ordered, True)
def test_empty_queryset(self):
self.assertIs(Annotation.objects.none().ordered, True)
def test_order_by_extra(self):
self.assertIs(Annotation.objects.all().extra(order_by=['id']).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count('notes'))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by('num_notes').ordered, True)
@skipUnlessDBFeature('allow_sliced_subqueries_with_in')
class SubqueryTests(TestCase):
@classmethod
def setUpTestData(cls):
NamedCategory.objects.create(id=1, name='first')
NamedCategory.objects.create(id=2, name='second')
NamedCategory.objects.create(id=3, name='third')
NamedCategory.objects.create(id=4, name='fourth')
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])
self.assertEqual(set(query.values_list('id', flat=True)), {1, 2})
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2]
self.assertEqual({x.id for x in query}, {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3]
self.assertEqual({x.id for x in query}, {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:]
self.assertEqual({x.id for x in query}, {2})
def test_related_sliced_subquery(self):
"""
Related objects constraints can safely contain sliced subqueries.
refs #22434
"""
generic = NamedCategory.objects.create(id=5, name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', category=generic)
ManagedModel.objects.create(data='mm1', tag=t1, public=True)
mm2 = ManagedModel.objects.create(data='mm2', tag=t2, public=True)
query = ManagedModel.normal_manager.filter(
tag__in=Tag.objects.order_by('-id')[:1]
)
self.assertEqual({x.id for x in query}, {mm2.id})
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 2, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {3})
def test_distinct_ordered_sliced_subquery(self):
# Implicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by('name')[0:2],
).order_by('name').values_list('name', flat=True), ['first', 'fourth']
)
# Explicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by('-name').values('id')[0:2],
).order_by('name').values_list('name', flat=True), ['second', 'third']
)
# Annotated value.
self.assertSequenceEqual(
DumbCategory.objects.filter(
id__in=DumbCategory.objects.annotate(
double_id=F('id') * 2
).order_by('id').distinct().values('double_id')[0:2],
).order_by('id').values_list('id', flat=True), [2, 4]
)
@skipUnlessDBFeature('allow_sliced_subqueries_with_in')
class QuerySetBitwiseOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
school = School.objects.create()
cls.room_1 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 1')
cls.room_2 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 2')
cls.room_3 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 3')
cls.room_4 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 4')
def test_or_with_rhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)
qs2 = Classroom.objects.filter(has_blackboard=False)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3])
def test_or_with_lhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)[:1]
qs2 = Classroom.objects.filter(has_blackboard=False)
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4])
def test_or_with_both_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=False)[:1]
qs2 = Classroom.objects.filter(has_blackboard=True)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2])
def test_or_with_both_slice_and_ordering(self):
qs1 = Classroom.objects.filter(has_blackboard=False).order_by('-pk')[:1]
qs2 = Classroom.objects.filter(has_blackboard=True).order_by('-name')[:1]
self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4])
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"#13227 -- If a queryset is already evaluated, it can still be used as a query arg"
n = Note(note='Test1', misc='misc')
n.save()
e = ExtraInfo(info='good', note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good')
def test_no_model_options_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.")
try:
Note.objects.filter(pk__lte=F('pk') + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned")
try:
Note.objects.filter(note=F('misc')).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(SimpleTestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertCountEqual(Number.objects.none().values('num').order_by('num'), [])
def test_values_subquery(self):
self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values('pk')), [])
self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values_list('pk')), [])
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertCountEqual(q.values(), [])
self.assertCountEqual(q.values_list(), [])
class ValuesQuerysetTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=72)
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(1, 2))
qs = qs.order_by('value_minus_x')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'})
qs = qs.order_by('value_minus_one').order_by('value_plus_one')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(select={
'value_plus_one': 'num+1',
'value_minus_one': 'num-1',
'constant_value': '1'
})
qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'},
order_by=['value_minus_one'],
)
qs = qs.values('num')
def test_extra_select_params_values_order_in_extra(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={'value_plus_x': 'num+%s'},
select_params=[1],
order_by=['value_plus_x'],
)
qs = qs.filter(num=72)
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_multiple_select_params_values_order_by(self):
# testing for 23259 issue
qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(72, 72))
qs = qs.order_by('value_minus_x')
qs = qs.filter(num=1)
qs = qs.values('num')
self.assertSequenceEqual(qs, [])
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num')
self.assertSequenceEqual(qs, [(72,)])
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num', flat=True)
self.assertSequenceEqual(qs, [72])
def test_field_error_values_list(self):
# see #23443
msg = "Cannot resolve keyword %r into field. Join on 'name' not permitted." % 'foo'
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.values_list('name__foo')
def test_named_values_list_flat(self):
msg = "'flat' and 'named' can't be used together."
with self.assertRaisesMessage(TypeError, msg):
Number.objects.values_list('num', flat=True, named=True)
def test_named_values_list_bad_field_name(self):
msg = "Type names and field names must be valid identifiers: '1'"
with self.assertRaisesMessage(ValueError, msg):
Number.objects.extra(select={'1': 'num+1'}).values_list('1', named=True).first()
def test_named_values_list_with_fields(self):
qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id'))
values = qs.values_list('num', 'num2', named=True).first()
self.assertEqual(type(values).__name__, 'Row')
self.assertEqual(values._fields, ('num', 'num2'))
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
def test_named_values_list_without_fields(self):
qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id'))
values = qs.values_list(named=True).first()
self.assertEqual(type(values).__name__, 'Row')
self.assertEqual(values._fields, ('num2', 'id', 'num', 'id__count'))
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
self.assertEqual(values.id__count, 1)
def test_named_values_list_expression_with_default_alias(self):
expr = Count('id')
values = Number.objects.annotate(id__count1=expr).values_list(expr, 'id__count1', named=True).first()
self.assertEqual(values._fields, ('id__count2', 'id__count1'))
def test_named_values_list_expression(self):
expr = F('num') + 1
qs = Number.objects.annotate(combinedexpression1=expr).values_list(expr, 'combinedexpression1', named=True)
values = qs.first()
self.assertEqual(values._fields, ('combinedexpression2', 'combinedexpression1'))
class QuerySetSupportsPythonIdioms(TestCase):
@classmethod
def setUpTestData(cls):
some_date = datetime.datetime(2014, 5, 16, 12, 1)
for i in range(1, 8):
Article.objects.create(
name="Article {}".format(i), created=some_date)
def get_ordered_articles(self):
return Article.objects.all().order_by('name')
def test_can_get_items_using_index_and_slice_notation(self):
self.assertEqual(self.get_ordered_articles()[0].name, 'Article 1')
self.assertQuerysetEqual(
self.get_ordered_articles()[1:3],
["<Article: Article 2>", "<Article: Article 3>"]
)
def test_slicing_with_steps_can_be_used(self):
self.assertQuerysetEqual(
self.get_ordered_articles()[::2], [
"<Article: Article 1>",
"<Article: Article 3>",
"<Article: Article 5>",
"<Article: Article 7>"
]
)
def test_slicing_without_step_is_lazy(self):
with self.assertNumQueries(0):
self.get_ordered_articles()[0:5]
def test_slicing_with_tests_is_not_lazy(self):
with self.assertNumQueries(1):
self.get_ordered_articles()[0:5:3]
def test_slicing_can_slice_again_after_slicing(self):
self.assertQuerysetEqual(
self.get_ordered_articles()[0:5][0:2],
["<Article: Article 1>", "<Article: Article 2>"]
)
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][4:], ["<Article: Article 5>"])
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][5:], [])
# Some more tests!
self.assertQuerysetEqual(
self.get_ordered_articles()[2:][0:2],
["<Article: Article 3>", "<Article: Article 4>"]
)
self.assertQuerysetEqual(
self.get_ordered_articles()[2:][:2],
["<Article: Article 3>", "<Article: Article 4>"]
)
self.assertQuerysetEqual(self.get_ordered_articles()[2:][2:3], ["<Article: Article 5>"])
# Using an offset without a limit is also possible.
self.assertQuerysetEqual(
self.get_ordered_articles()[5:],
["<Article: Article 6>", "<Article: Article 7>"]
)
def test_slicing_cannot_filter_queryset_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot filter a query once a slice has been taken."):
Article.objects.all()[0:5].filter(id=1)
def test_slicing_cannot_reorder_queryset_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot reorder a query once a slice has been taken."):
Article.objects.all()[0:5].order_by('id')
def test_slicing_cannot_combine_queries_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot combine queries once a slice has been taken."):
Article.objects.all()[0:1] & Article.objects.all()[4:5]
def test_slicing_negative_indexing_not_supported_for_single_element(self):
"""hint: inverting your ordering might do what you need"""
with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."):
Article.objects.all()[-1]
def test_slicing_negative_indexing_not_supported_for_range(self):
"""hint: inverting your ordering might do what you need"""
with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."):
Article.objects.all()[0:-5]
def test_can_get_number_of_items_in_queryset_using_standard_len(self):
self.assertEqual(len(Article.objects.filter(name__exact='Article 1')), 1)
def test_can_combine_queries_using_and_and_or_operators(self):
s1 = Article.objects.filter(name__exact='Article 1')
s2 = Article.objects.filter(name__exact='Article 2')
self.assertQuerysetEqual(
(s1 | s2).order_by('name'),
["<Article: Article 1>", "<Article: Article 2>"]
)
self.assertQuerysetEqual(s1 & s2, [])
class WeirdQuerysetSlicingTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
Article.objects.create(name='three', created=datetime.datetime.now())
Article.objects.create(name='four', created=datetime.datetime.now())
food = Food.objects.create(name='spam')
Eaten.objects.create(meal='spam with eggs', food=food)
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
with self.assertRaisesMessage(TypeError, 'Cannot reverse a query once a slice has been taken.'):
Article.objects.all()[:0].latest('created')
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
def test_empty_sliced_subquery(self):
self.assertEqual(Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0)
def test_empty_sliced_subquery_exclude(self):
self.assertEqual(Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1)
def test_zero_length_values_slicing(self):
n = 42
with self.assertNumQueries(0):
self.assertQuerysetEqual(Article.objects.values()[n:n], [])
self.assertQuerysetEqual(Article.objects.values_list()[n:n], [])
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
ReservedName.objects.create(name='a', order=42)
ReservedName.objects.create(name='b', order=37)
self.assertQuerysetEqual(
ReservedName.objects.all().order_by('order'),
['<ReservedName: b>', '<ReservedName: a>']
)
self.assertQuerysetEqual(
ReservedName.objects.extra(select={'stuff': 'name'}, order_by=('order', 'stuff')),
['<ReservedName: b>', '<ReservedName: a>']
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_in_subquery(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple'))),
{lunch}
)
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple').values('eaten__meal'))),
set()
)
self.assertEqual(
set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal='lunch'))),
{apple}
)
def test_nested_in_subquery(self):
extra = ExtraInfo.objects.create()
author = Author.objects.create(num=42, extra=extra)
report = Report.objects.create(creator=author)
comment = ReportComment.objects.create(report=report)
comments = ReportComment.objects.filter(
report__in=Report.objects.filter(
creator__in=extra.author_set.all(),
),
)
self.assertSequenceEqual(comments, [comment])
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])),
{apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food=apple)),
{lunch, dinner}
)
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Food.objects.filter(eaten=lunch)),
{apple}
)
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(parent=node1)),
[node2]
)
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(node=node2)),
[node1]
)
class IsNullTests(TestCase):
def test_primary_key(self):
custom = CustomPk.objects.create(name='pk')
null = Related.objects.create()
notnull = Related.objects.create(custom=custom)
self.assertSequenceEqual(Related.objects.filter(custom__isnull=False), [notnull])
self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null])
def test_to_field(self):
apple = Food.objects.create(name="apple")
Eaten.objects.create(food=apple, meal="lunch")
Eaten.objects.create(meal="lunch")
self.assertQuerysetEqual(
Eaten.objects.filter(food__isnull=False),
['<Eaten: apple at lunch>']
)
self.assertQuerysetEqual(
Eaten.objects.filter(food__isnull=True),
['<Eaten: None at lunch>']
)
class ConditionalTests(TestCase):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'):
list(LoopX.objects.all()) # Force queryset evaluation with list()
with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'):
list(LoopZ.objects.all()) # Force queryset evaluation with list()
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by('parent')), 5)
# ... but you can still order in a non-recursive fashion among linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(
LoopX.objects.all().order_by('y__x__y__x__id'),
[]
)
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping')
def test_null_ordering_added(self):
query = Tag.objects.values_list('parent_id', flat=True).order_by().query
query.group_by = ['parent_id']
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
def test_in_list_limit(self):
# The "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = list(range(2050))
max_query_params = connection.features.max_query_params
if max_query_params is None or max_query_params >= len(numbers):
Number.objects.bulk_create(Number(num=num) for num in numbers)
for number in [1000, 1001, 2000, len(numbers)]:
with self.subTest(number=number):
self.assertEqual(Number.objects.filter(num__in=numbers[:number]).count(), number)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ['one', 'two', 'three']
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux', objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name='deux')
Q2 = Q(objectc__objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__name='ein')
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name='one', objectc__objecta__name='two')
Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
"""
Can create an instance of a model with only the PK field (#17056)."
"""
DumbCategory.objects.create()
class ExcludeTests(TestCase):
@classmethod
def setUpTestData(cls):
f1 = Food.objects.create(name='apples')
Food.objects.create(name='oranges')
Eaten.objects.create(food=f1, meal='dinner')
j1 = Job.objects.create(name='Manager')
r1 = Responsibility.objects.create(description='Playing golf')
j2 = Job.objects.create(name='Programmer')
r2 = Responsibility.objects.create(description='Programming')
JobResponsibilities.objects.create(job=j1, responsibility=r1)
JobResponsibilities.objects.create(job=j2, responsibility=r2)
def test_to_field(self):
self.assertQuerysetEqual(
Food.objects.exclude(eaten__meal='dinner'),
['<Food: oranges>'])
self.assertQuerysetEqual(
Job.objects.exclude(responsibilities__description='Playing golf'),
['<Job: Programmer>'])
self.assertQuerysetEqual(
Responsibility.objects.exclude(jobs__name='Manager'),
['<Responsibility: Programming>'])
def test_ticket14511(self):
alex = Person.objects.get_or_create(name='Alex')[0]
jane = Person.objects.get_or_create(name='Jane')[0]
oracle = Company.objects.get_or_create(name='Oracle')[0]
google = Company.objects.get_or_create(name='Google')[0]
microsoft = Company.objects.get_or_create(name='Microsoft')[0]
intel = Company.objects.get_or_create(name='Intel')[0]
def employ(employer, employee, title):
Employment.objects.get_or_create(employee=employee, employer=employer, title=title)
employ(oracle, alex, 'Engineer')
employ(oracle, alex, 'Developer')
employ(google, alex, 'Engineer')
employ(google, alex, 'Manager')
employ(microsoft, alex, 'Manager')
employ(intel, alex, 'Manager')
employ(microsoft, jane, 'Developer')
employ(intel, jane, 'Manager')
alex_tech_employers = alex.employers.filter(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertSequenceEqual(alex_tech_employers, [google, oracle])
alex_nontech_employers = alex.employers.exclude(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft])
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
@classmethod
def setUpTestData(cls):
# Create a few Orders.
cls.o1 = Order.objects.create(pk=1)
cls.o2 = Order.objects.create(pk=2)
cls.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1)
cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2)
cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2)
cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3)
cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1),
['<Order: 3>'])
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)),
['<Order: 3>'])
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
['<Order: 1>'])
class Exclude15786(TestCase):
"""Regression test for #15786"""
def test_ticket15786(self):
c1 = SimpleCategory.objects.create(name='c1')
c2 = SimpleCategory.objects.create(name='c2')
OneToOneCategory.objects.create(category=c1)
OneToOneCategory.objects.create(category=c2)
rel = CategoryRelationship.objects.create(first=c1, second=c2)
self.assertEqual(
CategoryRelationship.objects.exclude(
first__onetoonecategory=F('second__onetoonecategory')
).get(), rel
)
class NullInExcludeTest(TestCase):
@classmethod
def setUpTestData(cls):
NullableName.objects.create(name='i1')
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = '' if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
['i1', none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i1']),
[none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i3']),
['i1', none_val], attrgetter('name'))
inner_qs = NullableName.objects.filter(name='i1').values_list('name')
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val], attrgetter('name'))
# The inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]),
['i1'], attrgetter('name'))
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name='i1'))),
list(NullableName.objects.filter(Q(name='i1'))))
self.assertNotIn(
'IS NOT NULL',
str(NullableName.objects.filter(~~Q(name='i1')).query))
class EmptyStringsAsNullTest(TestCase):
"""
Filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
@classmethod
def setUpTestData(cls):
cls.nc = NamedCategory.objects.create(name='')
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=['nonexistent']),
[self.nc.pk], attrgetter('pk')
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=['nonexistent']),
[self.nc.pk], attrgetter('pk')
)
def test_21001(self):
foo = NamedCategory.objects.create(name='foo')
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name=''),
[foo.pk], attrgetter('pk')
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(SimpleTestCase):
class DummyNode:
def as_sql(self, compiler, connection):
return 'dummy', []
class MockCompiler:
def compile(self, node):
return node.as_sql(self, connection)
def __call__(self, name):
return connection.ops.quote_name(name)
def test_empty_full_handling_conjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(compiler, connection), ('(dummy AND dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy AND dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
def test_empty_full_handling_disjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()], connector='OR')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('(dummy OR dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy OR dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('dummy', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy)', []))
def test_empty_nodes(self):
compiler = WhereNodeTest.MockCompiler()
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w.negate()
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.connector = 'OR'
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='AND')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
class QuerySetExceptionTests(SimpleTestCase):
def test_iter_exceptions(self):
qs = ExtraInfo.objects.only('author')
msg = "'ManyToOneRel' object has no attribute 'attname'"
with self.assertRaisesMessage(AttributeError, msg):
list(qs)
def test_invalid_qs_list(self):
# Test for #19895 - second iteration over invalid queryset
# raises errors.
qs = Article.objects.order_by('invalid_column')
msg = "Cannot resolve keyword 'invalid_column' into field."
with self.assertRaisesMessage(FieldError, msg):
list(qs)
with self.assertRaisesMessage(FieldError, msg):
list(qs)
def test_invalid_order_by(self):
msg = "Invalid order_by arguments: ['*']"
with self.assertRaisesMessage(FieldError, msg):
list(Article.objects.order_by('*'))
def test_invalid_queryset_model(self):
msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".'
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.filter(extra=Article.objects.all()))
class NullJoinPromotionOrTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name='foo')
d2 = ModelD.objects.create(name='bar')
cls.a1 = ModelA.objects.create(name='a1', d=cls.d1)
c = ModelC.objects.create(name='c')
b = ModelB.objects.create(name='b', c=c)
cls.a2 = ModelA.objects.create(name='a2', b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = (
Q(d__name='foo') |
Q(b__name='foo') |
Q(b__c__name='foo')
)
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count('INNER JOIN'), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note='n', misc='m')
e = ExtraInfo.objects.create(info='info', note=n)
a = Author.objects.create(name='Author1', num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name='Foo', creator=a)
r2 = Report.objects.create(name='Bar')
Report.objects.create(name='Bar', creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) |
Q(creator__ranking__rank=1, name='Foo')
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count(' JOIN '), 2)
self.assertSequenceEqual(qs.order_by('name'), [r2, r1])
def test_ticket_21748(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
i3 = Identifier.objects.create(name='i3')
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertSequenceEqual(Identifier.objects.filter(program=None, channel=None), [i3])
self.assertSequenceEqual(Identifier.objects.exclude(program=None, channel=None).order_by('name'), [i1, i2])
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk')
qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneg. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)
).order_by('pk')
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id))
).order_by('pk')
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count('JOIN'),
str(qs2.query).count('JOIN'))
self.assertEqual(0, str(qs1.query).count('INNER JOIN'))
self.assertEqual(str(qs1.query).count('INNER JOIN'),
str(qs2.query).count('INNER JOIN'))
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# We don't accidentally trim reverse joins - we can't know if there is
# anything on the other side of the join, so trimming reverse joins
# can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn('INNER JOIN', str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
The queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name='foo').select_related('tag')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name='foo').filter(member__name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name='r4').filter(report__name='r1')
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_inverted_q_across_relations(self):
"""
When a trimmable join is specified in the query (here school__), the
ORM detects it and removes unnecessary joins. The set of reusable joins
are updated after trimming the query so that other lookups don't
consider that the outer query's filters are in effect for the subquery
(#26551).
"""
springfield_elementary = School.objects.create()
hogward = School.objects.create()
Student.objects.create(school=springfield_elementary)
hp = Student.objects.create(school=hogward)
Classroom.objects.create(school=hogward, name='Potion')
Classroom.objects.create(school=springfield_elementary, name='Main')
qs = Student.objects.filter(
~(Q(school__classroom__name='Main') & Q(school__classroom__has_blackboard=None))
)
self.assertSequenceEqual(qs, [hp])
class DisjunctionPromotionTests(TestCase):
def test_disjunction_promotion_select_related(self):
fk1 = FK1.objects.create(f1='f1', f2='f2')
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(' JOIN '), 0)
qs = qs.select_related('a', 'b')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 0)
self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2)
with self.assertNumQueries(1):
self.assertSequenceEqual(qs, [basea])
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion3_demote(self):
# This one needs demotion logic: the first filter causes a to be
# outer joined, the second filter makes it inner join again.
qs = BaseA.objects.filter(
Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion4_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
# Demote needed for the "a" join. It is marked as outer join by
# above filter (even if it is trimmed away).
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion5_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# But b__f1 does.
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar')))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo'))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | (Q(a__f1='bar')) & (Q(b__f1='bar') | Q(c__f1='foo')))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
Identifier.objects.create(name='extra')
program = Program.objects.create(identifier=Identifier.objects.create(name='program'))
channel = Channel.objects.create(identifier=Identifier.objects.create(name='channel'))
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=channel).order_by('name'),
['<Identifier: channel>', '<Identifier: extra>']
)
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=None).order_by('name'),
['<Identifier: program>']
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text='pg3')
pg2 = Page.objects.create(text='pg2')
pg1 = Page.objects.create(text='pg1')
pa1 = Paragraph.objects.create(text='pa1')
pa1.page.set([pg1, pg2])
pa2 = Paragraph.objects.create(text='pa2')
pa2.page.set([pg2, pg3])
pa3 = Paragraph.objects.create(text='pa3')
ch1 = Chapter.objects.create(title='ch1', paragraph=pa1)
ch2 = Chapter.objects.create(title='ch2', paragraph=pa2)
ch3 = Chapter.objects.create(title='ch3', paragraph=pa3)
b1 = Book.objects.create(title='b1', chapter=ch1)
b2 = Book.objects.create(title='b2', chapter=ch2)
b3 = Book.objects.create(title='b3', chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text='pg1')
self.assertNotIn('IS NOT NULL', str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data='foo')
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data='bar', parent=my1)
parents = MyObject.objects.filter(parent=F('id'))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id'))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name='foo')
a1 = Annotation.objects.create(tag=t, name='a1')
a2 = Annotation.objects.create(tag=t, name='a2')
a3 = Annotation.objects.create(tag=t, name='a3')
n = Note.objects.create(note='foo', misc='bar')
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
class EmptyStringPromotionTests(SimpleTestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name='')
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn('LEFT OUTER JOIN', str(qs.query))
else:
self.assertNotIn('LEFT OUTER JOIN', str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# If a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertSequenceEqual(Order.objects.filter(items__in=OrderItem.objects.values_list('status')), [o1])
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data='foo')
lfa2 = LeafA.objects.create(data='bar')
lfb1 = LeafB.objects.create(data='lfb1')
lfb2 = LeafB.objects.create(data='lfb2')
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertSequenceEqual(qs, [lfb1])
class Ticket18785Tests(SimpleTestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = Item.objects.exclude(
note__isnull=False
).filter(
name='something', creator__extra__isnull=True
).order_by()
self.assertEqual(1, str(qs.query).count('INNER JOIN'))
self.assertEqual(0, str(qs.query).count('OUTER JOIN'))
class Ticket20788Tests(TestCase):
def test_ticket_20788(self):
Paragraph.objects.create()
paragraph = Paragraph.objects.create()
page = paragraph.page.create()
chapter = Chapter.objects.create(paragraph=paragraph)
Book.objects.create(chapter=chapter)
paragraph2 = Paragraph.objects.create()
Page.objects.create()
chapter2 = Chapter.objects.create(paragraph=paragraph2)
book2 = Book.objects.create(chapter=chapter2)
sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page)
self.assertSequenceEqual(sentences_not_in_pub, [book2])
class Ticket12807Tests(TestCase):
def test_ticket_12807(self):
p1 = Paragraph.objects.create()
p2 = Paragraph.objects.create()
# The ORed condition below should have no effect on the query - the
# ~Q(pk__in=[]) will always be True.
qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk))
self.assertSequenceEqual(qs, [p1])
class RelatedLookupTypeTests(TestCase):
error = 'Cannot query "%s": Must be "%s" instance.'
@classmethod
def setUpTestData(cls):
cls.oa = ObjectA.objects.create(name="oa")
cls.poa = ProxyObjectA.objects.get(name="oa")
cls.coa = ChildObjectA.objects.create(name="coa")
cls.wrong_type = Order.objects.create(id=cls.oa.pk)
cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1)
ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2)
cls.pob = ProxyObjectB.objects.all()
ObjectC.objects.create(childobjecta=cls.coa)
def test_wrong_type_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup.
"""
# Passing incorrect object type
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.get(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.wrong_type])
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob])
# Passing an object of the class on which query is done.
with self.assertRaisesMessage(ValueError, self.error % (self.ob, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.poa, self.ob])
with self.assertRaisesMessage(ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)):
ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob])
def test_wrong_backward_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup for backward relations.
"""
with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.oa, self.ob])
with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.exclude(objectb=self.oa)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.get(objectb=self.wrong_type)
def test_correct_lookup(self):
"""
When passing proxy model objects, child objects, or parent objects,
lookups work fine.
"""
out_a = ['<ObjectA: oa>']
out_b = ['<ObjectB: ob>', '<ObjectB: pob>']
out_c = ['<ObjectC: >']
# proxy model objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta=self.poa).order_by('name'), out_b)
self.assertQuerysetEqual(ObjectA.objects.filter(objectb__in=self.pob).order_by('pk'), out_a * 2)
# child objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.coa]), [])
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by('name'), out_b)
self.assertQuerysetEqual(
ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by('name'),
out_b
)
# parent objects
self.assertQuerysetEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c)
# QuerySet related object type checking shouldn't issue queries
# (the querysets aren't evaluated here, hence zero queries) (#23266).
with self.assertNumQueries(0):
ObjectB.objects.filter(objecta__in=ObjectA.objects.all())
def test_values_queryset_lookup(self):
"""
#23396 - Ensure ValueQuerySets are not checked for compatibility with the lookup field
"""
# Make sure the num and objecta field values match.
ob = ObjectB.objects.get(name='ob')
ob.num = ob.objecta.pk
ob.save()
pob = ObjectB.objects.get(name='pob')
pob.num = pob.objecta.pk
pob.save()
self.assertQuerysetEqual(ObjectB.objects.filter(
objecta__in=ObjectB.objects.all().values_list('num')
).order_by('pk'), ['<ObjectB: ob>', '<ObjectB: pob>'])
class Ticket14056Tests(TestCase):
def test_ticket_14056(self):
s1 = SharedConnection.objects.create(data='s1')
s2 = SharedConnection.objects.create(data='s2')
s3 = SharedConnection.objects.create(data='s3')
PointerA.objects.create(connection=s2)
expected_ordering = (
[s1, s3, s2] if connection.features.nulls_order_largest
else [s2, s1, s3]
)
self.assertSequenceEqual(SharedConnection.objects.order_by('-pointera__connection', 'pk'), expected_ordering)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name='jackstaff')
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name='jillstaff')
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
'creator__staffuser__staff', 'owner__staffuser__staff')
self.assertEqual(str(qs.query).count(' JOIN '), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff)
self.assertEqual(task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related('parent').defer('parent__created')
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].parent.parent_bool, True)
class ValuesJoinPromotionTests(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
qs = qs.values('parent__parent__id')
self.assertIn(' INNER JOIN ', str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values('parent__parent__id')
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values('objecta__name')
self.assertIn(' INNER JOIN ', str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(' LEFT OUTER JOIN %s' % tblname, str(qs.query))
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1')
sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2')
sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3')
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertSequenceEqual(SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by('name'), [sc2, sc3])
self.assertSequenceEqual(SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1])
class ReverseM2MCustomPkTests(TestCase):
def test_ticket_21879(self):
cpt1 = CustomPkTag.objects.create(id='cpt1', tag='cpt1')
cp1 = CustomPk.objects.create(name='cp1', extra='extra')
cp1.custompktag_set.add(cpt1)
self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1])
self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1])
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F('school')))
self.assertSequenceEqual(queryset, [st2])
class Ticket23605Tests(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
# See also #24090.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True,
modelc_fk=c1, modela_fk=a1)
complex_q = Q(pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000 /
F("ticket23605b__modelc_fk__field_c0")
) &
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True) & ~Q(ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True) &
Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
))).filter(ticket23605b__field_b1=True))
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertSequenceEqual(qs1, [a1])
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertSequenceEqual(qs2, [a2])
class TestTicket24279(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertQuerysetEqual(qs, [])
class TestInvalidValuesRelation(SimpleTestCase):
def test_invalid_values(self):
msg = "invalid literal for int() with base 10: 'abc'"
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag='abc')
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag__in=[123, 'abc'])
class TestTicket24605(TestCase):
def test_ticket_24605(self):
"""
Subquery table names should be quoted.
"""
i1 = Individual.objects.create(alive=True)
RelatedIndividual.objects.create(related=i1)
i2 = Individual.objects.create(alive=False)
RelatedIndividual.objects.create(related=i2)
i3 = Individual.objects.create(alive=True)
i4 = Individual.objects.create(alive=False)
self.assertSequenceEqual(Individual.objects.filter(Q(alive=False), Q(related_individual__isnull=True)), [i4])
self.assertSequenceEqual(
Individual.objects.exclude(Q(alive=False), Q(related_individual__isnull=True)).order_by('pk'),
[i1, i2, i3]
)
class Ticket23622Tests(TestCase):
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_23622(self):
"""
Make sure __pk__in and __in work the same for related fields when
using a distinct on subquery.
"""
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=0.0)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=123,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=23,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=234,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=12,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=567,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=76,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=7,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=56,
field_b1=True,
modelc_fk=c1,
)
qx = (
Q(ticket23605b__pk__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) &
Q(ticket23605b__field_b0__gte=300)
)
qy = (
Q(ticket23605b__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) &
Q(ticket23605b__field_b0__gte=300)
)
self.assertEqual(
set(Ticket23605A.objects.filter(qx).values_list('pk', flat=True)),
set(Ticket23605A.objects.filter(qy).values_list('pk', flat=True))
)
self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
|
a173b7fd93e3e0773088639a70b27a17d9688dd6d325d5bc52e3318ceae279a7 | """Tests related to django.db.backends that haven't been organized."""
import datetime
import threading
import unittest
import warnings
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections,
reset_queries, transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper
from django.db.models.sql.constants import CURSOR
from django.test import (
TestCase, TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import (
Article, Object, ObjectReference, Person, Post, RawData, Reporter,
ReporterProxy, SchoolClass, Square,
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ,
)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
years = SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
classes = SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
with connection.cursor() as cursor:
connection.ops.last_executed_query(cursor, '', ())
def test_debug_sql(self):
list(Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""last_executed_query() returns a string."""
data = RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, str)
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
with connection.cursor() as cursor:
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.identifier_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
with self.assertRaises(Exception):
cursor.executemany(query, [(1, 2, 3)])
with self.assertRaises(Exception):
cursor.executemany(query, [(1,)])
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""
An m2m save of a model with a long name and a long m2m field name
doesn't error (#8901).
"""
obj = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""
Sequence resetting as part of a flush with model with long name and
long pk name doesn't error (#8901).
"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
sql_list = connection.ops.sql_flush(no_style(), tables, sequences)
with connection.cursor() as cursor:
for statement in sql_list:
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [Post])
with connection.cursor() as cursor:
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
with connection.cursor() as cursor:
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
with connection.cursor() as cursor:
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
opts = Square._meta
tbl = connection.introspection.identifier_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
with connection.cursor() as cursor:
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = ((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 5)
args = ((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = ({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 5)
args = ({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as unicode objects #6254
qn = connection.ops.quote_name
Person(first_name="John", last_name="Doe").save()
Person(first_name="Jane", last_name="Doe").save()
Person(first_name="Mary", last_name="Agnelline").save()
Person(first_name="Peter", last_name="Parker").save()
Person(first_name="Clark", last_name="Kent").save()
opts2 = Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
with connection.cursor() as cursor:
cursor.execute(
'SELECT %s, %s FROM %s ORDER BY %s' % (
qn(f3.column),
qn(f4.column),
connection.introspection.identifier_converter(opts2.db_table),
qn(f3.column),
)
)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Creating an existing table returns a DatabaseError """
query = 'CREATE TABLE %s (id INTEGER);' % Article._meta.db_table
with connection.cursor() as cursor:
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
is_usable() doesn't crash when the database disconnects (#21553).
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
self.assertCountEqual(connection.queries[0], ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
The backend doesn't store an unlimited number of queries (#12581).
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connection = connection.copy()
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
msg = "Limit for query logging exceeded, only the last 3 queries will be returned."
with self.assertWarnsMessage(UserWarning, msg):
self.assertEqual(3, len(new_connection.queries))
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
def test_timezone_none_use_tz_false(self):
connection.ensure_connection()
with self.settings(TIME_ZONE=None, USE_TZ=False):
connection.init_connection_state()
# These tests aren't conditional because it would require differentiating
# between MySQL+InnoDB and MySQL+MYISAM (something we currently can't do).
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
a2 = Article(
headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30,
)
with self.assertRaises(IntegrityError):
a2.save()
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
# Create another article
r_proxy = ReporterProxy.objects.get(pk=self.r.pk)
Article.objects.create(
headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy,
)
# Retrieve the second article from the DB
a2 = Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
with self.assertRaises(IntegrityError):
a2.save()
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data
without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be
able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
The default connection (i.e. django.db.connection) is different for
each thread (#17258).
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Each created connection got different inner connection.
self.assertEqual(len({conn.connection for conn in connections_dict.values()}), 3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
The connections are different for each thread (#17258).
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
A connection can be passed from one thread to the other (#17258).
"""
Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
A connection that is not explicitly shareable cannot be closed by
another thread (#17258).
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = Object.objects.create()
ref = ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(Object.objects.filter(id=12345).exists())
ref = ObjectReference.objects.create(obj_id=12345)
ref_new = ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = Object.objects.create()
obj.related_objects.create()
self.assertEqual(Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = Object._meta.get_field("related_objects").remote_field.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
|
28a3eb62b1322cadd128d5e829c1b13dd99affca4a5ebd7532db1c99b0ad3036 | import datetime
from unittest import mock, skipIf, skipUnless
from django.core.exceptions import FieldError
from django.db import NotSupportedError, connection
from django.db.models import (
F, OuterRef, RowRange, Subquery, Value, ValueRange, Window, WindowFrame,
)
from django.db.models.aggregates import Avg, Max, Min, Sum
from django.db.models.functions import (
CumeDist, DenseRank, ExtractYear, FirstValue, Lag, LastValue, Lead,
NthValue, Ntile, PercentRank, Rank, RowNumber, Upper,
)
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from .models import Employee
@skipUnlessDBFeature('supports_over_clause')
class WindowFunctionTests(TestCase):
@classmethod
def setUpTestData(cls):
Employee.objects.bulk_create([
Employee(name=e[0], salary=e[1], department=e[2], hire_date=e[3], age=e[4])
for e in [
('Jones', 45000, 'Accounting', datetime.datetime(2005, 11, 1), 20),
('Williams', 37000, 'Accounting', datetime.datetime(2009, 6, 1), 20),
('Jenson', 45000, 'Accounting', datetime.datetime(2008, 4, 1), 20),
('Adams', 50000, 'Accounting', datetime.datetime(2013, 7, 1), 50),
('Smith', 55000, 'Sales', datetime.datetime(2007, 6, 1), 30),
('Brown', 53000, 'Sales', datetime.datetime(2009, 9, 1), 30),
('Johnson', 40000, 'Marketing', datetime.datetime(2012, 3, 1), 30),
('Smith', 38000, 'Marketing', datetime.datetime(2009, 10, 1), 20),
('Wilkinson', 60000, 'IT', datetime.datetime(2011, 3, 1), 40),
('Moore', 34000, 'IT', datetime.datetime(2013, 8, 1), 40),
('Miller', 100000, 'Management', datetime.datetime(2005, 6, 1), 40),
('Johnson', 80000, 'Management', datetime.datetime(2005, 7, 1), 50),
]
])
def test_dense_rank(self):
qs = Employee.objects.annotate(rank=Window(
expression=DenseRank(),
order_by=ExtractYear(F('hire_date')).asc(),
))
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 1),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 1),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 1),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 2),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 3),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 4),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 4),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 4),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 5),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 6),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 7),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 7),
], lambda entry: (entry.name, entry.salary, entry.department, entry.hire_date, entry.rank), ordered=False)
def test_department_salary(self):
qs = Employee.objects.annotate(department_sum=Window(
expression=Sum('salary'),
partition_by=F('department'),
order_by=[F('hire_date').asc()],
)).order_by('department', 'department_sum')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 45000, 45000),
('Jenson', 'Accounting', 45000, 90000),
('Williams', 'Accounting', 37000, 127000),
('Adams', 'Accounting', 50000, 177000),
('Wilkinson', 'IT', 60000, 60000),
('Moore', 'IT', 34000, 94000),
('Miller', 'Management', 100000, 100000),
('Johnson', 'Management', 80000, 180000),
('Smith', 'Marketing', 38000, 38000),
('Johnson', 'Marketing', 40000, 78000),
('Smith', 'Sales', 55000, 55000),
('Brown', 'Sales', 53000, 108000),
], lambda entry: (entry.name, entry.department, entry.salary, entry.department_sum))
def test_rank(self):
"""
Rank the employees based on the year they're were hired. Since there
are multiple employees hired in different years, this will contain
gaps.
"""
qs = Employee.objects.annotate(rank=Window(
expression=Rank(),
order_by=ExtractYear(F('hire_date')).asc(),
))
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 1),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 1),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 1),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 4),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 5),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 6),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 6),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 6),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 9),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 10),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 11),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 11),
], lambda entry: (entry.name, entry.salary, entry.department, entry.hire_date, entry.rank), ordered=False)
def test_row_number(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted. Depending on the backend,
Oracle requires an ordering-clause in the Window expression.
"""
qs = Employee.objects.annotate(row_number=Window(
expression=RowNumber(),
order_by=F('pk').asc(),
)).order_by('pk')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 1),
('Williams', 'Accounting', 2),
('Jenson', 'Accounting', 3),
('Adams', 'Accounting', 4),
('Smith', 'Sales', 5),
('Brown', 'Sales', 6),
('Johnson', 'Marketing', 7),
('Smith', 'Marketing', 8),
('Wilkinson', 'IT', 9),
('Moore', 'IT', 10),
('Miller', 'Management', 11),
('Johnson', 'Management', 12),
], lambda entry: (entry.name, entry.department, entry.row_number))
@skipIf(connection.vendor == 'oracle', "Oracle requires ORDER BY in row_number, ANSI:SQL doesn't")
def test_row_number_no_ordering(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted.
"""
# Add a default ordering for consistent results across databases.
qs = Employee.objects.annotate(row_number=Window(
expression=RowNumber(),
)).order_by('pk')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 1),
('Williams', 'Accounting', 2),
('Jenson', 'Accounting', 3),
('Adams', 'Accounting', 4),
('Smith', 'Sales', 5),
('Brown', 'Sales', 6),
('Johnson', 'Marketing', 7),
('Smith', 'Marketing', 8),
('Wilkinson', 'IT', 9),
('Moore', 'IT', 10),
('Miller', 'Management', 11),
('Johnson', 'Management', 12),
], lambda entry: (entry.name, entry.department, entry.row_number))
def test_avg_salary_department(self):
qs = Employee.objects.annotate(avg_salary=Window(
expression=Avg('salary'),
order_by=F('department').asc(),
partition_by='department',
)).order_by('department', '-salary', 'name')
self.assertQuerysetEqual(qs, [
('Adams', 50000, 'Accounting', 44250.00),
('Jenson', 45000, 'Accounting', 44250.00),
('Jones', 45000, 'Accounting', 44250.00),
('Williams', 37000, 'Accounting', 44250.00),
('Wilkinson', 60000, 'IT', 47000.00),
('Moore', 34000, 'IT', 47000.00),
('Miller', 100000, 'Management', 90000.00),
('Johnson', 80000, 'Management', 90000.00),
('Johnson', 40000, 'Marketing', 39000.00),
('Smith', 38000, 'Marketing', 39000.00),
('Smith', 55000, 'Sales', 54000.00),
('Brown', 53000, 'Sales', 54000.00),
], transform=lambda row: (row.name, row.salary, row.department, row.avg_salary))
def test_lag(self):
"""
Compute the difference between an employee's salary and the next
highest salary in the employee's department. Return None if the
employee has the lowest salary.
"""
qs = Employee.objects.annotate(lag=Window(
expression=Lag(expression='salary', offset=1),
partition_by=F('department'),
order_by=[F('salary').asc(), F('name').asc()],
)).order_by('department', F('salary').asc(), F('name').asc())
self.assertQuerysetEqual(qs, [
('Williams', 37000, 'Accounting', None),
('Jenson', 45000, 'Accounting', 37000),
('Jones', 45000, 'Accounting', 45000),
('Adams', 50000, 'Accounting', 45000),
('Moore', 34000, 'IT', None),
('Wilkinson', 60000, 'IT', 34000),
('Johnson', 80000, 'Management', None),
('Miller', 100000, 'Management', 80000),
('Smith', 38000, 'Marketing', None),
('Johnson', 40000, 'Marketing', 38000),
('Brown', 53000, 'Sales', None),
('Smith', 55000, 'Sales', 53000),
], transform=lambda row: (row.name, row.salary, row.department, row.lag))
def test_first_value(self):
qs = Employee.objects.annotate(first_value=Window(
expression=FirstValue('salary'),
partition_by=F('department'),
order_by=F('hire_date').asc(),
)).order_by('department', 'hire_date')
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 45000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 45000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 45000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 60000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 100000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 38000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 55000),
], lambda row: (row.name, row.salary, row.department, row.hire_date, row.first_value))
def test_last_value(self):
qs = Employee.objects.annotate(last_value=Window(
expression=LastValue('hire_date'),
partition_by=F('department'),
order_by=F('hire_date').asc(),
))
self.assertQuerysetEqual(qs, [
('Adams', 'Accounting', datetime.date(2013, 7, 1), 50000, datetime.date(2013, 7, 1)),
('Jenson', 'Accounting', datetime.date(2008, 4, 1), 45000, datetime.date(2008, 4, 1)),
('Jones', 'Accounting', datetime.date(2005, 11, 1), 45000, datetime.date(2005, 11, 1)),
('Williams', 'Accounting', datetime.date(2009, 6, 1), 37000, datetime.date(2009, 6, 1)),
('Moore', 'IT', datetime.date(2013, 8, 1), 34000, datetime.date(2013, 8, 1)),
('Wilkinson', 'IT', datetime.date(2011, 3, 1), 60000, datetime.date(2011, 3, 1)),
('Miller', 'Management', datetime.date(2005, 6, 1), 100000, datetime.date(2005, 6, 1)),
('Johnson', 'Management', datetime.date(2005, 7, 1), 80000, datetime.date(2005, 7, 1)),
('Johnson', 'Marketing', datetime.date(2012, 3, 1), 40000, datetime.date(2012, 3, 1)),
('Smith', 'Marketing', datetime.date(2009, 10, 1), 38000, datetime.date(2009, 10, 1)),
('Brown', 'Sales', datetime.date(2009, 9, 1), 53000, datetime.date(2009, 9, 1)),
('Smith', 'Sales', datetime.date(2007, 6, 1), 55000, datetime.date(2007, 6, 1)),
], transform=lambda row: (row.name, row.department, row.hire_date, row.salary, row.last_value), ordered=False)
def test_function_list_of_values(self):
qs = Employee.objects.annotate(lead=Window(
expression=Lead(expression='salary'),
order_by=[F('hire_date').asc(), F('name').desc()],
partition_by='department',
)).values_list('name', 'salary', 'department', 'hire_date', 'lead') \
.order_by('department', F('hire_date').asc(), F('name').desc())
self.assertNotIn('GROUP BY', str(qs.query))
self.assertSequenceEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 37000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 50000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 34000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 80000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 40000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 53000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
])
def test_min_department(self):
"""An alternative way to specify a query for FirstValue."""
qs = Employee.objects.annotate(min_salary=Window(
expression=Min('salary'),
partition_by=F('department'),
order_by=[F('salary').asc(), F('name').asc()]
)).order_by('department', 'salary', 'name')
self.assertQuerysetEqual(qs, [
('Williams', 'Accounting', 37000, 37000),
('Jenson', 'Accounting', 45000, 37000),
('Jones', 'Accounting', 45000, 37000),
('Adams', 'Accounting', 50000, 37000),
('Moore', 'IT', 34000, 34000),
('Wilkinson', 'IT', 60000, 34000),
('Johnson', 'Management', 80000, 80000),
('Miller', 'Management', 100000, 80000),
('Smith', 'Marketing', 38000, 38000),
('Johnson', 'Marketing', 40000, 38000),
('Brown', 'Sales', 53000, 53000),
('Smith', 'Sales', 55000, 53000),
], lambda row: (row.name, row.department, row.salary, row.min_salary))
def test_max_per_year(self):
"""
Find the maximum salary awarded in the same year as the
employee was hired, regardless of the department.
"""
qs = Employee.objects.annotate(max_salary_year=Window(
expression=Max('salary'),
order_by=ExtractYear('hire_date').asc(),
partition_by=ExtractYear('hire_date')
)).order_by(ExtractYear('hire_date'), 'salary')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 45000, 2005, 100000),
('Johnson', 'Management', 80000, 2005, 100000),
('Miller', 'Management', 100000, 2005, 100000),
('Smith', 'Sales', 55000, 2007, 55000),
('Jenson', 'Accounting', 45000, 2008, 45000),
('Williams', 'Accounting', 37000, 2009, 53000),
('Smith', 'Marketing', 38000, 2009, 53000),
('Brown', 'Sales', 53000, 2009, 53000),
('Wilkinson', 'IT', 60000, 2011, 60000),
('Johnson', 'Marketing', 40000, 2012, 40000),
('Moore', 'IT', 34000, 2013, 50000),
('Adams', 'Accounting', 50000, 2013, 50000),
], lambda row: (row.name, row.department, row.salary, row.hire_date.year, row.max_salary_year))
def test_cume_dist(self):
"""
Compute the cumulative distribution for the employees based on the
salary in increasing order. Equal to rank/total number of rows (12).
"""
qs = Employee.objects.annotate(cume_dist=Window(
expression=CumeDist(),
order_by=F('salary').asc(),
)).order_by('salary', 'name')
# Round result of cume_dist because Oracle uses greater precision.
self.assertQuerysetEqual(qs, [
('Moore', 'IT', 34000, 0.0833333333),
('Williams', 'Accounting', 37000, 0.1666666667),
('Smith', 'Marketing', 38000, 0.25),
('Johnson', 'Marketing', 40000, 0.3333333333),
('Jenson', 'Accounting', 45000, 0.5),
('Jones', 'Accounting', 45000, 0.5),
('Adams', 'Accounting', 50000, 0.5833333333),
('Brown', 'Sales', 53000, 0.6666666667),
('Smith', 'Sales', 55000, 0.75),
('Wilkinson', 'IT', 60000, 0.8333333333),
('Johnson', 'Management', 80000, 0.9166666667),
('Miller', 'Management', 100000, 1),
], lambda row: (row.name, row.department, row.salary, round(row.cume_dist, 10)))
def test_nthvalue(self):
qs = Employee.objects.annotate(
nth_value=Window(expression=NthValue(
expression='salary', nth=2),
order_by=[F('hire_date').asc(), F('name').desc()],
partition_by=F('department'),
)
).order_by('department', 'hire_date', 'name')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', datetime.date(2005, 11, 1), 45000, None),
('Jenson', 'Accounting', datetime.date(2008, 4, 1), 45000, 45000),
('Williams', 'Accounting', datetime.date(2009, 6, 1), 37000, 45000),
('Adams', 'Accounting', datetime.date(2013, 7, 1), 50000, 45000),
('Wilkinson', 'IT', datetime.date(2011, 3, 1), 60000, None),
('Moore', 'IT', datetime.date(2013, 8, 1), 34000, 34000),
('Miller', 'Management', datetime.date(2005, 6, 1), 100000, None),
('Johnson', 'Management', datetime.date(2005, 7, 1), 80000, 80000),
('Smith', 'Marketing', datetime.date(2009, 10, 1), 38000, None),
('Johnson', 'Marketing', datetime.date(2012, 3, 1), 40000, 40000),
('Smith', 'Sales', datetime.date(2007, 6, 1), 55000, None),
('Brown', 'Sales', datetime.date(2009, 9, 1), 53000, 53000),
], lambda row: (row.name, row.department, row.hire_date, row.salary, row.nth_value))
def test_lead(self):
"""
Determine what the next person hired in the same department makes.
Because the dataset is ambiguous, the name is also part of the
ordering clause. No default is provided, so None/NULL should be
returned.
"""
qs = Employee.objects.annotate(lead=Window(
expression=Lead(expression='salary'),
order_by=[F('hire_date').asc(), F('name').desc()],
partition_by='department',
)).order_by('department', F('hire_date').asc(), F('name').desc())
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 37000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 50000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 34000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 80000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 40000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 53000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.lead))
def test_lead_offset(self):
"""
Determine what the person hired after someone makes. Due to
ambiguity, the name is also included in the ordering.
"""
qs = Employee.objects.annotate(lead=Window(
expression=Lead('salary', offset=2),
partition_by='department',
order_by=F('hire_date').asc(),
))
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 37000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 50000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), None),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), None),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), None),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), None),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), None),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.lead),
ordered=False
)
@skipUnlessDBFeature('supports_default_in_lead_lag')
def test_lead_default(self):
qs = Employee.objects.annotate(lead_default=Window(
expression=Lead(expression='salary', offset=5, default=60000),
partition_by=F('department'),
order_by=F('department').asc(),
))
self.assertEqual(list(qs.values_list('lead_default', flat=True).distinct()), [60000])
def test_ntile(self):
"""
Compute the group for each of the employees across the entire company,
based on how high the salary is for them. There are twelve employees
so it divides evenly into four groups.
"""
qs = Employee.objects.annotate(ntile=Window(
expression=Ntile(num_buckets=4),
order_by=F('salary').desc(),
)).order_by('ntile', '-salary', 'name')
self.assertQuerysetEqual(qs, [
('Miller', 'Management', 100000, 1),
('Johnson', 'Management', 80000, 1),
('Wilkinson', 'IT', 60000, 1),
('Smith', 'Sales', 55000, 2),
('Brown', 'Sales', 53000, 2),
('Adams', 'Accounting', 50000, 2),
('Jenson', 'Accounting', 45000, 3),
('Jones', 'Accounting', 45000, 3),
('Johnson', 'Marketing', 40000, 3),
('Smith', 'Marketing', 38000, 4),
('Williams', 'Accounting', 37000, 4),
('Moore', 'IT', 34000, 4),
], lambda x: (x.name, x.department, x.salary, x.ntile))
def test_percent_rank(self):
"""
Calculate the percentage rank of the employees across the entire
company based on salary and name (in case of ambiguity).
"""
qs = Employee.objects.annotate(percent_rank=Window(
expression=PercentRank(),
order_by=[F('salary').asc(), F('name').asc()],
)).order_by('percent_rank')
# Round to account for precision differences among databases.
self.assertQuerysetEqual(qs, [
('Moore', 'IT', 34000, 0.0),
('Williams', 'Accounting', 37000, 0.0909090909),
('Smith', 'Marketing', 38000, 0.1818181818),
('Johnson', 'Marketing', 40000, 0.2727272727),
('Jenson', 'Accounting', 45000, 0.3636363636),
('Jones', 'Accounting', 45000, 0.4545454545),
('Adams', 'Accounting', 50000, 0.5454545455),
('Brown', 'Sales', 53000, 0.6363636364),
('Smith', 'Sales', 55000, 0.7272727273),
('Wilkinson', 'IT', 60000, 0.8181818182),
('Johnson', 'Management', 80000, 0.9090909091),
('Miller', 'Management', 100000, 1.0),
], transform=lambda row: (row.name, row.department, row.salary, round(row.percent_rank, 10)))
def test_nth_returns_null(self):
"""
Find the nth row of the data set. None is returned since there are
fewer than 20 rows in the test data.
"""
qs = Employee.objects.annotate(nth_value=Window(
expression=NthValue('salary', nth=20),
order_by=F('salary').asc()
))
self.assertEqual(list(qs.values_list('nth_value', flat=True).distinct()), [None])
def test_multiple_partitioning(self):
"""
Find the maximum salary for each department for people hired in the
same year.
"""
qs = Employee.objects.annotate(max=Window(
expression=Max('salary'),
partition_by=[F('department'), ExtractYear(F('hire_date'))],
)).order_by('department', 'hire_date', 'name')
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 45000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 100000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.max))
def test_multiple_ordering(self):
"""
Accumulate the salaries over the departments based on hire_date.
If two people were hired on the same date in the same department, the
ordering clause will render a different result for those people.
"""
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
partition_by='department',
order_by=[F('hire_date').asc(), F('name').asc()],
)).order_by('department', 'sum')
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 90000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 127000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 177000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 94000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 180000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 78000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 108000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum))
@skipUnlessDBFeature('supports_frame_range_fixed_distance')
def test_range_n_preceding_and_following(self):
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
order_by=F('salary').asc(),
partition_by='department',
frame=ValueRange(start=-2, end=2),
))
self.assertIn('RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING', str(qs.query))
self.assertQuerysetEqual(qs, [
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000),
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 90000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 90000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 80000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum), ordered=False)
def test_range_unbound(self):
"""A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING."""
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
partition_by='age',
order_by=[F('age').asc()],
frame=ValueRange(start=None, end=None),
)).order_by('department', 'hire_date', 'name')
self.assertIn('RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING', str(qs.query))
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 45000, datetime.date(2005, 11, 1), 165000),
('Jenson', 'Accounting', 45000, datetime.date(2008, 4, 1), 165000),
('Williams', 'Accounting', 37000, datetime.date(2009, 6, 1), 165000),
('Adams', 'Accounting', 50000, datetime.date(2013, 7, 1), 130000),
('Wilkinson', 'IT', 60000, datetime.date(2011, 3, 1), 194000),
('Moore', 'IT', 34000, datetime.date(2013, 8, 1), 194000),
('Miller', 'Management', 100000, datetime.date(2005, 6, 1), 194000),
('Johnson', 'Management', 80000, datetime.date(2005, 7, 1), 130000),
('Smith', 'Marketing', 38000, datetime.date(2009, 10, 1), 165000),
('Johnson', 'Marketing', 40000, datetime.date(2012, 3, 1), 148000),
('Smith', 'Sales', 55000, datetime.date(2007, 6, 1), 148000),
('Brown', 'Sales', 53000, datetime.date(2009, 9, 1), 148000)
], transform=lambda row: (row.name, row.department, row.salary, row.hire_date, row.sum))
@skipIf(
connection.vendor == 'sqlite' and connection.Database.sqlite_version_info < (3, 27),
'Nondeterministic failure on SQLite < 3.27.'
)
def test_subquery_row_range_rank(self):
qs = Employee.objects.annotate(
highest_avg_salary_date=Subquery(
Employee.objects.filter(
department=OuterRef('department'),
).annotate(
avg_salary=Window(
expression=Avg('salary'),
order_by=[F('hire_date').asc()],
frame=RowRange(start=-1, end=1),
),
).order_by('-avg_salary', 'hire_date').values('hire_date')[:1],
),
).order_by('department', 'name')
self.assertQuerysetEqual(qs, [
('Adams', 'Accounting', datetime.date(2005, 11, 1)),
('Jenson', 'Accounting', datetime.date(2005, 11, 1)),
('Jones', 'Accounting', datetime.date(2005, 11, 1)),
('Williams', 'Accounting', datetime.date(2005, 11, 1)),
('Moore', 'IT', datetime.date(2011, 3, 1)),
('Wilkinson', 'IT', datetime.date(2011, 3, 1)),
('Johnson', 'Management', datetime.date(2005, 6, 1)),
('Miller', 'Management', datetime.date(2005, 6, 1)),
('Johnson', 'Marketing', datetime.date(2009, 10, 1)),
('Smith', 'Marketing', datetime.date(2009, 10, 1)),
('Brown', 'Sales', datetime.date(2007, 6, 1)),
('Smith', 'Sales', datetime.date(2007, 6, 1)),
], transform=lambda row: (row.name, row.department, row.highest_avg_salary_date))
def test_row_range_rank(self):
"""
A query with ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING.
The resulting sum is the sum of the three next (if they exist) and all
previous rows according to the ordering clause.
"""
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
order_by=[F('hire_date').asc(), F('name').desc()],
frame=RowRange(start=None, end=3),
)).order_by('sum', 'hire_date')
self.assertIn('ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING', str(qs.query))
self.assertQuerysetEqual(qs, [
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 280000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 325000),
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 362000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 415000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 453000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 513000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 553000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 603000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 637000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 637000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 637000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 637000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum))
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_window_function(self):
"""
Window functions are not aggregates, and hence a query to filter out
duplicates may be useful.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum('salary'),
partition_by=ExtractYear('hire_date'),
order_by=ExtractYear('hire_date')
),
year=ExtractYear('hire_date'),
).values('year', 'sum').distinct('year').order_by('year')
results = [
{'year': 2005, 'sum': 225000}, {'year': 2007, 'sum': 55000},
{'year': 2008, 'sum': 45000}, {'year': 2009, 'sum': 128000},
{'year': 2011, 'sum': 60000}, {'year': 2012, 'sum': 40000},
{'year': 2013, 'sum': 84000},
]
for idx, val in zip(range(len(results)), results):
with self.subTest(result=val):
self.assertEqual(qs[idx], val)
def test_fail_update(self):
"""Window expressions can't be used in an UPDATE statement."""
msg = 'Window expressions are not allowed in this query'
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.filter(department='Management').update(
salary=Window(expression=Max('salary'), partition_by='department'),
)
def test_fail_insert(self):
"""Window expressions can't be used in an INSERT statement."""
msg = 'Window expressions are not allowed in this query'
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.create(
name='Jameson', department='Management', hire_date=datetime.date(2007, 7, 1),
salary=Window(expression=Sum(Value(10000), order_by=F('pk').asc())),
)
def test_window_expression_within_subquery(self):
subquery_qs = Employee.objects.annotate(
highest=Window(FirstValue('id'), partition_by=F('department'), order_by=F('salary').desc())
).values('highest')
highest_salary = Employee.objects.filter(pk__in=subquery_qs)
self.assertSequenceEqual(highest_salary.values('department', 'salary'), [
{'department': 'Accounting', 'salary': 50000},
{'department': 'Sales', 'salary': 55000},
{'department': 'Marketing', 'salary': 40000},
{'department': 'IT', 'salary': 60000},
{'department': 'Management', 'salary': 100000}
])
def test_invalid_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got '3'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(start=3),
)))
def test_invalid_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got '-3'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(end=-3),
)))
def test_invalid_type_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(end='a'),
)))
def test_invalid_type_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
frame=ValueRange(start='a'),
)))
def test_invalid_type_end_row_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
frame=RowRange(end='a'),
)))
@skipUnless(connection.vendor == 'postgresql', 'Frame construction not allowed on PostgreSQL')
def test_postgresql_illegal_range_frame_start(self):
msg = 'PostgreSQL only supports UNBOUNDED together with PRECEDING and FOLLOWING.'
with self.assertRaisesMessage(NotSupportedError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(start=-1),
)))
@skipUnless(connection.vendor == 'postgresql', 'Frame construction not allowed on PostgreSQL')
def test_postgresql_illegal_range_frame_end(self):
msg = 'PostgreSQL only supports UNBOUNDED together with PRECEDING and FOLLOWING.'
with self.assertRaisesMessage(NotSupportedError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(end=1),
)))
def test_invalid_type_start_row_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=RowRange(start='a'),
)))
class NonQueryWindowTests(SimpleTestCase):
def test_window_repr(self):
self.assertEqual(
repr(Window(expression=Sum('salary'), partition_by='department')),
'<Window: Sum(F(salary)) OVER (PARTITION BY F(department))>'
)
self.assertEqual(
repr(Window(expression=Avg('salary'), order_by=F('department').asc())),
'<Window: Avg(F(salary)) OVER (ORDER BY OrderBy(F(department), descending=False))>'
)
def test_window_frame_repr(self):
self.assertEqual(
repr(RowRange(start=-1)),
'<RowRange: ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING>'
)
self.assertEqual(
repr(ValueRange(start=None, end=1)),
'<ValueRange: RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING>'
)
self.assertEqual(
repr(ValueRange(start=0, end=0)),
'<ValueRange: RANGE BETWEEN CURRENT ROW AND CURRENT ROW>'
)
self.assertEqual(
repr(RowRange(start=0, end=0)),
'<RowRange: ROWS BETWEEN CURRENT ROW AND CURRENT ROW>'
)
def test_empty_group_by_cols(self):
window = Window(expression=Sum('pk'))
self.assertEqual(window.get_group_by_cols(), [])
self.assertFalse(window.contains_aggregate)
def test_frame_empty_group_by_cols(self):
frame = WindowFrame()
self.assertEqual(frame.get_group_by_cols(), [])
def test_frame_window_frame_notimplemented(self):
frame = WindowFrame()
msg = 'Subclasses must implement window_frame_start_end().'
with self.assertRaisesMessage(NotImplementedError, msg):
frame.window_frame_start_end(None, None, None)
def test_invalid_filter(self):
msg = 'Window is disallowed in the filter clause'
with self.assertRaisesMessage(NotSupportedError, msg):
Employee.objects.annotate(dense_rank=Window(expression=DenseRank())).filter(dense_rank__gte=1)
def test_unsupported_backend(self):
msg = 'This backend does not support window expressions.'
with mock.patch.object(connection.features, 'supports_over_clause', False):
with self.assertRaisesMessage(NotSupportedError, msg):
Employee.objects.annotate(dense_rank=Window(expression=DenseRank())).get()
def test_invalid_order_by(self):
msg = 'order_by must be either an Expression or a sequence of expressions'
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Sum('power'), order_by='-horse')
def test_invalid_source_expression(self):
msg = "Expression 'Upper' isn't compatible with OVER clauses."
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Upper('name'))
|
d76af84b68e55a28a342efae089acdad2e06c5a6d26bf79e5691c35eb0b5ab4c | import unittest
from django.core.checks import Error, Warning as DjangoWarning
from django.db import connection, models
from django.test import SimpleTestCase, TestCase, skipIfDBFeature
from django.test.utils import isolate_apps, override_settings
from django.utils.functional import lazy
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
@isolate_apps('invalid_models_tests')
class AutoFieldTests(SimpleTestCase):
def test_valid_case(self):
class Model(models.Model):
id = models.AutoField(primary_key=True)
field = Model._meta.get_field('id')
self.assertEqual(field.check(), [])
def test_primary_key(self):
# primary_key must be True. Refs #12467.
class Model(models.Model):
field = models.AutoField(primary_key=False)
# Prevent Django from autocreating `id` AutoField, which would
# result in an error, because a model must have exactly one
# AutoField.
another = models.IntegerField(primary_key=True)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
'AutoFields must set primary_key=True.',
obj=field,
id='fields.E100',
),
])
@isolate_apps('invalid_models_tests')
class CharFieldTests(SimpleTestCase):
def test_valid_field(self):
class Model(models.Model):
field = models.CharField(
max_length=255,
choices=[
('1', 'item1'),
('2', 'item2'),
],
db_index=True,
)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [])
def test_missing_max_length(self):
class Model(models.Model):
field = models.CharField()
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"CharFields must define a 'max_length' attribute.",
obj=field,
id='fields.E120',
),
])
def test_negative_max_length(self):
class Model(models.Model):
field = models.CharField(max_length=-1)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'max_length' must be a positive integer.",
obj=field,
id='fields.E121',
),
])
def test_bad_max_length_value(self):
class Model(models.Model):
field = models.CharField(max_length="bad")
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'max_length' must be a positive integer.",
obj=field,
id='fields.E121',
),
])
def test_str_max_length_value(self):
class Model(models.Model):
field = models.CharField(max_length='20')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'max_length' must be a positive integer.",
obj=field,
id='fields.E121',
),
])
def test_str_max_length_type(self):
class Model(models.Model):
field = models.CharField(max_length=True)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'max_length' must be a positive integer.",
obj=field,
id='fields.E121'
),
])
def test_non_iterable_choices(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices='bad')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'choices' must be an iterable (e.g., a list or tuple).",
obj=field,
id='fields.E004',
),
])
def test_non_iterable_choices_two_letters(self):
"""Two letters isn't a valid choice pair."""
class Model(models.Model):
field = models.CharField(max_length=10, choices=['ab'])
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'choices' must be an iterable containing (actual value, "
"human readable name) tuples.",
obj=field,
id='fields.E005',
),
])
def test_iterable_of_iterable_choices(self):
class ThingItem:
def __init__(self, value, display):
self.value = value
self.display = display
def __iter__(self):
return iter((self.value, self.display))
def __len__(self):
return 2
class Things:
def __iter__(self):
return iter((ThingItem(1, 2), ThingItem(3, 4)))
class ThingWithIterableChoices(models.Model):
thing = models.CharField(max_length=100, blank=True, choices=Things())
self.assertEqual(ThingWithIterableChoices._meta.get_field('thing').check(), [])
def test_choices_containing_non_pairs(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices=[(1, 2, 3), (1, 2, 3)])
class Model2(models.Model):
field = models.IntegerField(choices=[0])
for model in (Model, Model2):
with self.subTest(model.__name__):
field = model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'choices' must be an iterable containing (actual "
"value, human readable name) tuples.",
obj=field,
id='fields.E005',
),
])
def test_choices_containing_lazy(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices=[['1', _('1')], ['2', _('2')]])
self.assertEqual(Model._meta.get_field('field').check(), [])
def test_lazy_choices(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices=lazy(lambda: [[1, '1'], [2, '2']], tuple)())
self.assertEqual(Model._meta.get_field('field').check(), [])
def test_choices_named_group(self):
class Model(models.Model):
field = models.CharField(
max_length=10, choices=[
['knights', [['L', 'Lancelot'], ['G', 'Galahad']]],
['wizards', [['T', 'Tim the Enchanter']]],
['R', 'Random character'],
],
)
self.assertEqual(Model._meta.get_field('field').check(), [])
def test_choices_named_group_non_pairs(self):
class Model(models.Model):
field = models.CharField(
max_length=10,
choices=[['knights', [['L', 'Lancelot', 'Du Lac']]]],
)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'choices' must be an iterable containing (actual value, "
"human readable name) tuples.",
obj=field,
id='fields.E005',
),
])
def test_choices_named_group_bad_structure(self):
class Model(models.Model):
field = models.CharField(
max_length=10, choices=[
['knights', [
['Noble', [['G', 'Galahad']]],
['Combative', [['L', 'Lancelot']]],
]],
],
)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'choices' must be an iterable containing (actual value, "
"human readable name) tuples.",
obj=field,
id='fields.E005',
),
])
def test_choices_named_group_lazy(self):
class Model(models.Model):
field = models.CharField(
max_length=10, choices=[
[_('knights'), [['L', _('Lancelot')], ['G', _('Galahad')]]],
['R', _('Random character')],
],
)
self.assertEqual(Model._meta.get_field('field').check(), [])
def test_bad_db_index_value(self):
class Model(models.Model):
field = models.CharField(max_length=10, db_index='bad')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'db_index' must be None, True or False.",
obj=field,
id='fields.E006',
),
])
def test_bad_validators(self):
class Model(models.Model):
field = models.CharField(max_length=10, validators=[True])
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"All 'validators' must be callable.",
hint=(
"validators[0] (True) isn't a function or instance of a "
"validator class."
),
obj=field,
id='fields.E008',
),
])
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_too_long_char_field_under_mysql(self):
from django.db.backends.mysql.validation import DatabaseValidation
class Model(models.Model):
field = models.CharField(unique=True, max_length=256)
field = Model._meta.get_field('field')
validator = DatabaseValidation(connection=connection)
self.assertEqual(validator.check_field(field), [
Error(
'MySQL does not allow unique CharFields to have a max_length > 255.',
obj=field,
id='mysql.E001',
)
])
@isolate_apps('invalid_models_tests')
class DateFieldTests(SimpleTestCase):
maxDiff = None
def test_auto_now_and_auto_now_add_raise_error(self):
class Model(models.Model):
field0 = models.DateTimeField(auto_now=True, auto_now_add=True, default=now)
field1 = models.DateTimeField(auto_now=True, auto_now_add=False, default=now)
field2 = models.DateTimeField(auto_now=False, auto_now_add=True, default=now)
field3 = models.DateTimeField(auto_now=True, auto_now_add=True, default=None)
expected = []
checks = []
for i in range(4):
field = Model._meta.get_field('field%d' % i)
expected.append(Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=field,
id='fields.E160',
))
checks.extend(field.check())
self.assertEqual(checks, expected)
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.DateField(default=now())
field_d = models.DateField(default=now().date())
field_now = models.DateField(default=now)
field_dt = Model._meta.get_field('field_dt')
field_d = Model._meta.get_field('field_d')
field_now = Model._meta.get_field('field_now')
errors = field_dt.check()
errors.extend(field_d.check())
errors.extend(field_now.check()) # doesn't raise a warning
self.assertEqual(errors, [
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_dt,
id='fields.W161',
),
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_d,
id='fields.W161',
)
])
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
@isolate_apps('invalid_models_tests')
class DateTimeFieldTests(SimpleTestCase):
maxDiff = None
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.DateTimeField(default=now())
field_d = models.DateTimeField(default=now().date())
field_now = models.DateTimeField(default=now)
field_dt = Model._meta.get_field('field_dt')
field_d = Model._meta.get_field('field_d')
field_now = Model._meta.get_field('field_now')
errors = field_dt.check()
errors.extend(field_d.check())
errors.extend(field_now.check()) # doesn't raise a warning
self.assertEqual(errors, [
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_dt,
id='fields.W161',
),
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_d,
id='fields.W161',
)
])
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
@isolate_apps('invalid_models_tests')
class DecimalFieldTests(SimpleTestCase):
def test_required_attributes(self):
class Model(models.Model):
field = models.DecimalField()
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=field,
id='fields.E130',
),
Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=field,
id='fields.E132',
),
])
def test_negative_max_digits_and_decimal_places(self):
class Model(models.Model):
field = models.DecimalField(max_digits=-1, decimal_places=-1)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'decimal_places' must be a non-negative integer.",
obj=field,
id='fields.E131',
),
Error(
"'max_digits' must be a positive integer.",
obj=field,
id='fields.E133',
),
])
def test_bad_values_of_max_digits_and_decimal_places(self):
class Model(models.Model):
field = models.DecimalField(max_digits="bad", decimal_places="bad")
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'decimal_places' must be a non-negative integer.",
obj=field,
id='fields.E131',
),
Error(
"'max_digits' must be a positive integer.",
obj=field,
id='fields.E133',
),
])
def test_decimal_places_greater_than_max_digits(self):
class Model(models.Model):
field = models.DecimalField(max_digits=9, decimal_places=10)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=field,
id='fields.E134',
),
])
def test_valid_field(self):
class Model(models.Model):
field = models.DecimalField(max_digits=10, decimal_places=10)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [])
@isolate_apps('invalid_models_tests')
class FileFieldTests(SimpleTestCase):
def test_valid_default_case(self):
class Model(models.Model):
field = models.FileField()
self.assertEqual(Model._meta.get_field('field').check(), [])
def test_valid_case(self):
class Model(models.Model):
field = models.FileField(upload_to='somewhere')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [])
def test_primary_key(self):
class Model(models.Model):
field = models.FileField(primary_key=False, upload_to='somewhere')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'primary_key' is not a valid argument for a FileField.",
obj=field,
id='fields.E201',
)
])
def test_upload_to_starts_with_slash(self):
class Model(models.Model):
field = models.FileField(upload_to='/somewhere')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"FileField's 'upload_to' argument must be a relative path, not "
"an absolute path.",
obj=field,
id='fields.E202',
hint='Remove the leading slash.',
)
])
def test_upload_to_callable_not_checked(self):
def callable(instance, filename):
return '/' + filename
class Model(models.Model):
field = models.FileField(upload_to=callable)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [])
@isolate_apps('invalid_models_tests')
class FilePathFieldTests(SimpleTestCase):
def test_forbidden_files_and_folders(self):
class Model(models.Model):
field = models.FilePathField(allow_files=False, allow_folders=False)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
obj=field,
id='fields.E140',
),
])
@isolate_apps('invalid_models_tests')
class GenericIPAddressFieldTests(SimpleTestCase):
def test_non_nullable_blank(self):
class Model(models.Model):
field = models.GenericIPAddressField(null=False, blank=True)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
obj=field,
id='fields.E150',
),
])
@isolate_apps('invalid_models_tests')
class ImageFieldTests(SimpleTestCase):
def test_pillow_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
pillow_installed = False
else:
pillow_installed = True
class Model(models.Model):
field = models.ImageField(upload_to='somewhere')
field = Model._meta.get_field('field')
errors = field.check()
expected = [] if pillow_installed else [
Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.org/project/Pillow/ '
'or run command "pip install Pillow".'),
obj=field,
id='fields.E210',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class IntegerFieldTests(SimpleTestCase):
def test_max_length_warning(self):
class Model(models.Model):
integer = models.IntegerField(max_length=2)
biginteger = models.BigIntegerField(max_length=2)
smallinteger = models.SmallIntegerField(max_length=2)
positiveinteger = models.PositiveIntegerField(max_length=2)
positivesmallinteger = models.PositiveSmallIntegerField(max_length=2)
for field in Model._meta.get_fields():
if field.auto_created:
continue
with self.subTest(name=field.name):
self.assertEqual(field.check(), [
DjangoWarning(
"'max_length' is ignored when used with %s." % field.__class__.__name__,
hint="Remove 'max_length' from field",
obj=field,
id='fields.W122',
)
])
@isolate_apps('invalid_models_tests')
class TimeFieldTests(SimpleTestCase):
maxDiff = None
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.TimeField(default=now())
field_t = models.TimeField(default=now().time())
field_now = models.DateField(default=now)
field_dt = Model._meta.get_field('field_dt')
field_t = Model._meta.get_field('field_t')
field_now = Model._meta.get_field('field_now')
errors = field_dt.check()
errors.extend(field_t.check())
errors.extend(field_now.check()) # doesn't raise a warning
self.assertEqual(errors, [
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_dt,
id='fields.W161',
),
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_t,
id='fields.W161',
)
])
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
@isolate_apps('invalid_models_tests')
class TextFieldTests(TestCase):
@skipIfDBFeature('supports_index_on_text_field')
def test_max_length_warning(self):
class Model(models.Model):
value = models.TextField(db_index=True)
field = Model._meta.get_field('value')
field_type = field.db_type(connection)
self.assertEqual(field.check(), [
DjangoWarning(
'%s does not support a database index on %s columns.'
% (connection.display_name, field_type),
hint=(
"An index won't be created. Silence this warning if you "
"don't care about it."
),
obj=field,
id='fields.W162',
)
])
|
8e8471d4d678839b4191e79a7f45717521ebef46a94124e89459a1a7c7dc6eee | import asyncore
import base64
import mimetypes
import os
import shutil
import smtpd
import sys
import tempfile
import threading
from email import charset, message_from_binary_file, message_from_bytes
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr
from io import StringIO
from smtplib import SMTP, SMTPAuthenticationError, SMTPException
from ssl import SSLError
from django.core import mail
from django.core.mail import (
EmailMessage, EmailMultiAlternatives, mail_admins, mail_managers,
send_mail, send_mass_mail,
)
from django.core.mail.backends import console, dummy, filebased, locmem, smtp
from django.core.mail.message import BadHeaderError, sanitize_address
from django.test import SimpleTestCase, override_settings
from django.test.utils import requires_tz_support
from django.utils.translation import gettext_lazy
class HeadersCheckMixin:
def assertMessageHasHeaders(self, message, headers):
"""
Asserts that the `message` has all `headers`.
message: can be an instance of an email.Message subclass or a string
with the contents of an email message.
headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, bytes):
message = message_from_bytes(message)
msg_headers = set(message.items())
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing '
'the following headers: %s' % (headers - msg_headers),)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def get_decoded_attachments(self, django_message):
"""
Encode the specified django.core.mail.message.EmailMessage, then decode
it using Python's email.parser module and, for each attachment of the
message, return a list of tuples with (filename, content, mimetype).
"""
msg_bytes = django_message.message().as_bytes()
email_message = message_from_bytes(msg_bytes)
def iter_attachments():
for i in email_message.walk():
if i.get_content_disposition() == 'attachment':
filename = i.get_filename()
content = i.get_payload(decode=True)
mimetype = i.get_content_type()
yield filename, content, mimetype
return list(iter_attachments())
def test_ascii(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], '[email protected]')
self.assertEqual(message['To'], '[email protected]')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], '[email protected]')
self.assertEqual(message['To'], '[email protected], [email protected]')
def test_header_omitted_for_no_to_recipients(self):
message = EmailMessage('Subject', 'Content', '[email protected]', cc=['[email protected]']).message()
self.assertNotIn('To', message)
def test_recipients_with_empty_strings(self):
"""
Empty strings in various recipient arguments are always stripped
off the final recipient list.
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]', ''],
cc=['[email protected]', ''],
bcc=['', '[email protected]'],
reply_to=['', None],
)
self.assertEqual(
email.recipients(),
['[email protected]', '[email protected]', '[email protected]']
)
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'])
message = email.message()
self.assertEqual(message['Cc'], '[email protected]')
self.assertEqual(email.recipients(), ['[email protected]', '[email protected]'])
# Test multiple CC with multiple To
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'],
cc=['[email protected]', '[email protected]']
)
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(
email.recipients(),
['[email protected]', '[email protected]', '[email protected]', '[email protected]']
)
# Testing with Bcc
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'],
cc=['[email protected]', '[email protected]'], bcc=['[email protected]']
)
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(
email.recipients(),
['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]']
)
def test_cc_headers(self):
message = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
cc=['[email protected]'], headers={'Cc': '[email protected]'},
).message()
self.assertEqual(message['Cc'], '[email protected]')
def test_cc_in_headers_only(self):
message = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'Cc': '[email protected]'},
).message()
self.assertEqual(message['Cc'], '[email protected]')
def test_reply_to(self):
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
reply_to=['[email protected]'],
)
message = email.message()
self.assertEqual(message['Reply-To'], '[email protected]')
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
reply_to=['[email protected]', '[email protected]']
)
message = email.message()
self.assertEqual(message['Reply-To'], '[email protected], [email protected]')
def test_recipients_as_tuple(self):
email = EmailMessage(
'Subject', 'Content', '[email protected]', ('[email protected]', '[email protected]'),
cc=('[email protected]', '[email protected]'), bcc=('[email protected]',)
)
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(
email.recipients(),
['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]']
)
def test_recipients_as_string(self):
with self.assertRaisesMessage(TypeError, '"to" argument must be a list or tuple'):
EmailMessage(to='[email protected]')
with self.assertRaisesMessage(TypeError, '"cc" argument must be a list or tuple'):
EmailMessage(cc='[email protected]')
with self.assertRaisesMessage(TypeError, '"bcc" argument must be a list or tuple'):
EmailMessage(bcc='[email protected]')
with self.assertRaisesMessage(TypeError, '"reply_to" argument must be a list or tuple'):
EmailMessage(reply_to='[email protected]')
def test_header_injection(self):
email = EmailMessage('Subject\nInjection Test', 'Content', '[email protected]', ['[email protected]'])
with self.assertRaises(BadHeaderError):
email.message()
email = EmailMessage(
gettext_lazy('Subject\nInjection Test'), 'Content', '[email protected]', ['[email protected]']
)
with self.assertRaises(BadHeaderError):
email.message()
def test_space_continuation(self):
"""
Test for space continuation character in long (ASCII) subject headers (#7747)
"""
email = EmailMessage(
'Long subject lines that get wrapped should contain a space '
'continuation character to get expected behavior in Outlook and Thunderbird',
'Content', '[email protected]', ['[email protected]']
)
message = email.message()
self.assertEqual(
message['Subject'].encode(),
b'Long subject lines that get wrapped should contain a space continuation\n'
b' character to get expected behavior in Outlook and Thunderbird'
)
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', '[email protected]', ['[email protected]'], headers=headers)
self.assertMessageHasHeaders(email.message(), {
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', '[email protected]'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', '[email protected]'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
})
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
message = email.message()
self.assertEqual(message['From'], '[email protected]')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', '[email protected]',
['[email protected]', '[email protected]'],
headers={'To': '[email protected]'})
message = email.message()
self.assertEqual(message['To'], '[email protected]')
self.assertEqual(email.to, ['[email protected]', '[email protected]'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', '[email protected]',
['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['To'], '[email protected], [email protected]')
self.assertEqual(email.to, ['[email protected]', '[email protected]'])
def test_to_in_headers_only(self):
message = EmailMessage(
'Subject', 'Content', '[email protected]',
headers={'To': '[email protected]'},
).message()
self.assertEqual(message['To'], '[email protected]')
def test_reply_to_header(self):
"""
Specifying 'Reply-To' in headers should override reply_to.
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
reply_to=['[email protected]'], headers={'Reply-To': '[email protected]'},
)
message = email.message()
self.assertEqual(message['Reply-To'], '[email protected]')
def test_reply_to_in_headers_only(self):
message = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'Reply-To': '[email protected]'},
).message()
self.assertEqual(message['Reply-To'], '[email protected]')
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
message = email.message()
self.assertEqual(message['From'], '[email protected]')
message = email.message()
self.assertEqual(message['From'], '[email protected]')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]',
['"Firstname Sürname" <[email protected]>', '[email protected]'],
)
self.assertEqual(
email.message()['To'],
'=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>, [email protected]'
)
email = EmailMessage(
'Subject', 'Content', '[email protected]',
['"Sürname, Firstname" <[email protected]>', '[email protected]'],
)
self.assertEqual(
email.message()['To'],
'=?utf-8?q?S=C3=BCrname=2C_Firstname?= <[email protected]>, [email protected]'
)
def test_unicode_headers(self):
email = EmailMessage(
'Gżegżółka', 'Content', '[email protected]', ['[email protected]'],
headers={
'Sender': '"Firstname Sürname" <[email protected]>',
'Comments': 'My Sürname is non-ASCII',
},
)
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
from_email, to = '[email protected]', '"Sürname, Firstname" <[email protected]>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <[email protected]>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_safe_mime_multipart_with_attachments(self):
"""
EmailMultiAlternatives includes alternatives if the body is empty and
it has attachments.
"""
msg = EmailMultiAlternatives(body='')
html_content = '<p>This is <strong>html</strong></p>'
msg.attach_alternative(html_content, 'text/html')
msg.attach('example.txt', 'Text file content', 'text/plain')
self.assertIn(html_content, msg.message().as_string())
def test_none_body(self):
msg = EmailMessage('subject', None, '[email protected]', ['[email protected]'])
self.assertEqual(msg.body, '')
self.assertEqual(msg.message().get_payload(), '')
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', '[email protected]', ['[email protected]'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'),
('Subject', 'Subject'),
('From', '[email protected]'),
('To', '[email protected]')})
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, '[email protected]', ['[email protected]'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(payload0, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.'))
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(payload1, {
('MIME-Version', '1.0'),
('Content-Type', 'text/html; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(
payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>')
)
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '[email protected]'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_attachments_two_tuple(self):
msg = EmailMessage(attachments=[('filename1', 'content1')])
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'filename1')
self.assertEqual(content, b'content1')
self.assertEqual(mimetype, 'application/octet-stream')
def test_attachments_MIMEText(self):
txt = MIMEText('content1')
msg = EmailMessage(attachments=[txt])
payload = msg.message().get_payload()
self.assertEqual(payload[0], txt)
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '[email protected]'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_attach_file(self):
"""
Test attaching a file against different mimetypes and make sure that
a file will be attached and sent properly even if an invalid mimetype
is specified.
"""
files = (
# filename, actual mimetype
('file.txt', 'text/plain'),
('file.png', 'image/png'),
('file_txt', None),
('file_png', None),
('file_txt.png', 'image/png'),
('file_png.txt', 'text/plain'),
('file.eml', 'message/rfc822'),
)
test_mimetypes = ['text/plain', 'image/png', None]
for basename, real_mimetype in files:
for mimetype in test_mimetypes:
email = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
self.assertEqual(mimetypes.guess_type(basename)[0], real_mimetype)
self.assertEqual(email.attachments, [])
file_path = os.path.join(os.path.dirname(__file__), 'attachments', basename)
email.attach_file(file_path, mimetype=mimetype)
self.assertEqual(len(email.attachments), 1)
self.assertIn(basename, email.attachments[0])
msgs_sent_num = email.send()
self.assertEqual(msgs_sent_num, 1)
def test_attach_text_as_bytes(self):
msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
msg.attach('file.txt', b'file content')
sent_num = msg.send()
self.assertEqual(sent_num, 1)
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
self.assertEqual(content, b'file content')
self.assertEqual(mimetype, 'text/plain')
def test_attach_utf8_text_as_bytes(self):
"""
Non-ASCII characters encoded as valid UTF-8 are correctly transported
and decoded.
"""
msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
msg.attach('file.txt', b'\xc3\xa4') # UTF-8 encoded a umlaut.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
self.assertEqual(content, b'\xc3\xa4')
self.assertEqual(mimetype, 'text/plain')
def test_attach_non_utf8_text_as_bytes(self):
"""
Binary data that can't be decoded as UTF-8 overrides the MIME type
instead of decoding the data.
"""
msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
msg.attach('file.txt', b'\xff') # Invalid UTF-8.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
# Content should be passed through unmodified.
self.assertEqual(content, b'\xff')
self.assertEqual(mimetype, 'application/octet-stream')
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.locmem.EmailBackend'),
locmem.EmailBackend
)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.console.EmailBackend'),
console.EmailBackend
)
with tempfile.TemporaryDirectory() as tmp_dir:
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir),
filebased.EmailBackend
)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', '[email protected]')],
MANAGERS=[('nobody', '[email protected]')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', '[email protected]', ['[email protected]']),
('Subject2', 'Content2', '[email protected]', ['[email protected]']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage(
'Subject', 'From the future', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
self.assertNotIn(b'>From the future', email.message().as_bytes())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage(
'Subject', 'UTF-8 encoded body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
self.assertIn(b'Content-Transfer-Encoding: 7bit', msg.message().as_bytes())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage(
'Subject', 'Body with only ASCII characters.', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 7bit', s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage(
'Subject', 'Body with latin characters: àáä.', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
s = msg.message().as_string()
self.assertIn('Content-Transfer-Encoding: 8bit', s)
msg = EmailMessage(
'Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', '[email protected]',
['[email protected]'], headers={'From': '[email protected]'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
s = msg.message().as_string()
self.assertIn('Content-Transfer-Encoding: 8bit', s)
def test_dont_base64_encode_message_rfc822(self):
# Ticket #18967
# Shouldn't use base64 encoding for a child EmailMessage attachment.
# Create a child message first
child_msg = EmailMessage(
'Child Subject', 'Some body of child message', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
child_s = child_msg.message().as_string()
# Now create a parent
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
# Attach to parent as a string
parent_msg.attach(content=child_s, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn('Child Subject', parent_s)
# Feature test: try attaching email.Message object directly to the mail.
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
parent_msg.attach(content=child_msg.message(), mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn('Child Subject', parent_s)
# Feature test: try attaching Django's EmailMessage object directly to the mail.
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
parent_msg.attach(content=child_msg, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn('Child Subject', parent_s)
def test_custom_utf8_encoding(self):
"""A UTF-8 charset with a custom body encoding is respected."""
body = 'Body with latin characters: àáä.'
msg = EmailMessage('Subject', body, '[email protected]', ['[email protected]'])
encoding = charset.Charset('utf-8')
encoding.body_encoding = charset.QP
msg.encoding = encoding
message = msg.message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', 'quoted-printable'),
})
self.assertEqual(message.get_payload(), encoding.body_encode(body))
def test_sanitize_address(self):
"""
Email addresses are properly sanitized.
"""
# Simple ASCII address - string form
self.assertEqual(sanitize_address('[email protected]', 'ascii'), '[email protected]')
self.assertEqual(sanitize_address('[email protected]', 'utf-8'), '[email protected]')
# Simple ASCII address - tuple form
self.assertEqual(
sanitize_address(('A name', '[email protected]'), 'ascii'),
'A name <[email protected]>'
)
self.assertEqual(
sanitize_address(('A name', '[email protected]'), 'utf-8'),
'=?utf-8?q?A_name?= <[email protected]>'
)
# Unicode characters are are supported in RFC-6532.
self.assertEqual(
sanitize_address('tó@example.com', 'utf-8'),
'[email protected]'
)
self.assertEqual(
sanitize_address(('Tó Example', 'tó@example.com'), 'utf-8'),
'=?utf-8?q?T=C3=B3_Example?= <[email protected]>'
)
@requires_tz_support
class MailTimeZoneTests(SimpleTestCase):
@override_settings(EMAIL_USE_LOCALTIME=False, USE_TZ=True, TIME_ZONE='Africa/Algiers')
def test_date_header_utc(self):
"""
EMAIL_USE_LOCALTIME=False creates a datetime in UTC.
"""
email = EmailMessage('Subject', 'Body', '[email protected]', ['[email protected]'])
self.assertTrue(email.message()['Date'].endswith('-0000'))
@override_settings(EMAIL_USE_LOCALTIME=True, USE_TZ=True, TIME_ZONE='Africa/Algiers')
def test_date_header_localtime(self):
"""
EMAIL_USE_LOCALTIME=True creates a datetime in the local time zone.
"""
email = EmailMessage('Subject', 'Body', '[email protected]', ['[email protected]'])
self.assertTrue(email.message()['Date'].endswith('+0100')) # Africa/Algiers is UTC+1
class PythonGlobalState(SimpleTestCase):
"""
Tests for #12422 -- Django smarts (#2472/#11212) with charset of utf-8 text
parts shouldn't pollute global email Python package charset registry when
django.mail.message is imported.
"""
def test_utf8(self):
txt = MIMEText('UTF-8 encoded body', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_7bit(self):
txt = MIMEText('Body with only ASCII characters.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_8bit_latin(self):
txt = MIMEText('Body with latin characters: àáä.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_8bit_non_latin(self):
txt = MIMEText('Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
class BaseEmailBackendTests(HeadersCheckMixin):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests must provide a get_mailbox_content() method')
def flush_mailbox(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests may require a flush_mailbox() method')
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(
len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [m.as_string() for m in mailbox])
)
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "[email protected]")
self.assertEqual(message.get_all("to"), ["[email protected]"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(message.get_payload(decode=True).decode(), 'Je t\'aime très fort')
def test_send_long_lines(self):
"""
Email line length is limited to 998 chars by the RFC:
https://tools.ietf.org/html/rfc5322#section-2.1.1
Message body containing longer lines are converted to Quoted-Printable
to avoid having to insert newlines, which could be hairy to do properly.
"""
# Unencoded body length is < 998 (840) but > 998 when utf-8 encoded.
email = EmailMessage('Subject', 'В южных морях ' * 60, '[email protected]', ['[email protected]'])
email.send()
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', 'quoted-printable'),
})
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', '[email protected]', ['[email protected]'])
email2 = EmailMessage('Subject', 'Content2', '[email protected]', ['[email protected]'])
# send_messages() may take a list or an iterator.
emails_lists = ([email1, email2], iter((email1, email2)))
for emails_list in emails_lists:
num_sent = mail.get_connection().send_messages(emails_list)
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), 'Content1')
self.assertEqual(messages[1].get_payload(), 'Content2')
self.flush_mailbox()
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <[email protected]>',
["[email protected]"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>")
def test_plaintext_send_mail(self):
"""
Test send_mail without the html_message
regression test for adding html_message parameter to send_mail()
"""
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'])
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message.get_content_type(), 'text/plain')
def test_html_send_mail(self):
"""Test html_message argument to send_mail"""
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(MANAGERS=[('nobody', '[email protected]')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', '[email protected]')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', '[email protected]')],
MANAGERS=[('nobody', '[email protected]')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(gettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(gettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=[], MANAGERS=[])
def test_empty_admins(self):
"""
mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', '[email protected]'),
('To', '[email protected]'),
('Cc', '[email protected]')})
self.assertIn('\nDate: ', message.as_string())
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com', ['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
self.assertEqual(message.get('cc'), '[email protected]')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_lazy_addresses(self):
"""
Email sending should support lazy email addresses (#24416).
"""
_ = gettext_lazy
self.assertTrue(send_mail('Subject', 'Content', _('tester'), [_('django')]))
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'django')
self.flush_mailbox()
m = EmailMessage(
'Subject', 'Content', _('tester'), [_('to1'), _('to2')],
cc=[_('cc1'), _('cc2')],
bcc=[_('bcc')],
reply_to=[_('reply')],
)
self.assertEqual(m.recipients(), ['to1', 'to2', 'cc1', 'cc2', 'bcc'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'to1, to2')
self.assertEqual(message.get('cc'), 'cc1, cc2')
self.assertEqual(message.get('Reply-To'), 'reply')
def test_close_connection(self):
"""
Connection can be closed (even when not explicitly opened)
"""
conn = mail.get_connection(username='', password='')
conn.close()
def test_use_as_contextmanager(self):
"""
The connection can be used as a contextmanager.
"""
opened = [False]
closed = [False]
conn = mail.get_connection(username='', password='')
def open():
opened[0] = True
conn.open = open
def close():
closed[0] = True
conn.close = close
with conn as same_conn:
self.assertTrue(opened[0])
self.assertIs(same_conn, conn)
self.assertFalse(closed[0])
self.assertTrue(closed[0])
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super().tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', '[email protected]', ['[email protected]'])
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super().setUp()
self.tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super().tearDown()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'rb') as fp:
session = fp.read().split(b'\n' + (b'-' * 79) + b'\n')
messages.extend(message_from_bytes(m) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp:
message = message_from_binary_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class ConsoleBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super().setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super().tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = self.stream.getvalue().split('\n' + ('-' * 79) + '\n')
return [message_from_bytes(m.encode()) for m in messages if m]
def test_console_stream_kwarg(self):
"""
The console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection)
message = s.getvalue().split('\n' + ('-' * 79) + '\n')[0].encode()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', '[email protected]'),
('To', '[email protected]')})
self.assertIn(b'\nDate: ', message)
class FakeSMTPChannel(smtpd.SMTPChannel):
def collect_incoming_data(self, data):
try:
smtpd.SMTPChannel.collect_incoming_data(self, data)
except UnicodeDecodeError:
# Ignore decode error in SSL/TLS connection tests as the test only
# cares whether the connection attempt was made.
pass
def smtp_AUTH(self, arg):
if arg == 'CRAM-MD5':
# This is only the first part of the login process. But it's enough
# for our tests.
challenge = base64.b64encode(b'somerandomstring13579')
self.push('334 %s' % challenge.decode())
else:
self.push('502 Error: login "%s" not implemented' % arg)
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
channel_class = FakeSMTPChannel
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
smtpd.SMTPServer.__init__(self, *args, decode_data=True, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
def process_message(self, peer, mailfrom, rcpttos, data):
data = data.encode()
m = message_from_bytes(data)
maddr = parseaddr(m.get('from'))[1]
if mailfrom != maddr:
# According to the spec, mailfrom does not necessarily match the
# From header - this is the case where the local part isn't
# encoded, so try to correct that.
lp, domain = mailfrom.split('@', 1)
lp = Header(lp, 'utf-8').encode()
mailfrom = '@'.join([lp, domain])
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class FakeAUTHSMTPConnection(SMTP):
"""
A SMTP connection pretending support for the AUTH command. It does not, but
at least this can allow testing the first part of the AUTH process.
"""
def ehlo(self, name=''):
response = SMTP.ehlo(self, name=name)
self.esmtp_features.update({
'auth': 'CRAM-MD5 PLAIN LOGIN',
})
return response
class SMTPBackendTestsBase(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.server.start()
@classmethod
def tearDownClass(cls):
cls._settings_override.disable()
cls.server.stop()
super().tearDownClass()
class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
def setUp(self):
super().setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super().tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD='not empty password',
)
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD='not empty password',
)
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD='not empty password',
)
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_auth_attempted(self):
"""
Opening the backend with non empty username/password tries
to authenticate against the SMTP server.
"""
backend = smtp.EmailBackend(
username='not empty username', password='not empty password')
with self.assertRaisesMessage(SMTPException, 'SMTP AUTH extension not supported by server.'):
with backend:
pass
def test_server_open(self):
"""
open() returns whether it opened a connection.
"""
backend = smtp.EmailBackend(username='', password='')
self.assertIsNone(backend.connection)
opened = backend.open()
backend.close()
self.assertIs(opened, True)
def test_reopen_connection(self):
backend = smtp.EmailBackend()
# Simulate an already open connection.
backend.connection = True
self.assertIs(backend.open(), False)
def test_server_login(self):
"""
Even if the Python SMTP server doesn't support authentication, the
login process starts and the appropriate exception is raised.
"""
class CustomEmailBackend(smtp.EmailBackend):
connection_class = FakeAUTHSMTPConnection
backend = CustomEmailBackend(username='username', password='password')
with self.assertRaises(SMTPAuthenticationError):
with backend:
pass
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_override_settings(self):
backend = smtp.EmailBackend(use_tls=False)
self.assertFalse(backend.use_tls)
def test_email_tls_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_tls)
def test_ssl_tls_mutually_exclusive(self):
msg = (
'EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set '
'one of those settings to True.'
)
with self.assertRaisesMessage(ValueError, msg):
smtp.EmailBackend(use_ssl=True, use_tls=True)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_override_settings(self):
backend = smtp.EmailBackend(use_ssl=False)
self.assertFalse(backend.use_ssl)
def test_email_ssl_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_ssl)
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_certfile, 'foo')
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_override_settings(self):
backend = smtp.EmailBackend(ssl_certfile='bar')
self.assertEqual(backend.ssl_certfile, 'bar')
def test_email_ssl_certfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertIsNone(backend.ssl_certfile)
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_keyfile, 'foo')
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_override_settings(self):
backend = smtp.EmailBackend(ssl_keyfile='bar')
self.assertEqual(backend.ssl_keyfile, 'bar')
def test_email_ssl_keyfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertIsNone(backend.ssl_keyfile)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_attempts_starttls(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
with self.assertRaisesMessage(SMTPException, 'STARTTLS extension not supported by server.'):
with backend:
pass
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_attempts_ssl_connection(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
with self.assertRaises(SSLError):
with backend:
pass
def test_connection_timeout_default(self):
"""The connection's timeout value is None by default."""
connection = mail.get_connection('django.core.mail.backends.smtp.EmailBackend')
self.assertIsNone(connection.timeout)
def test_connection_timeout_custom(self):
"""The timeout parameter can be customized."""
class MyEmailBackend(smtp.EmailBackend):
def __init__(self, *args, **kwargs):
kwargs.setdefault('timeout', 42)
super().__init__(*args, **kwargs)
myemailbackend = MyEmailBackend()
myemailbackend.open()
self.assertEqual(myemailbackend.timeout, 42)
self.assertEqual(myemailbackend.connection.timeout, 42)
myemailbackend.close()
@override_settings(EMAIL_TIMEOUT=10)
def test_email_timeout_override_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.timeout, 10)
def test_email_msg_uses_crlf(self):
"""#23063 -- RFC-compliant messages are sent over SMTP."""
send = SMTP.send
try:
smtp_messages = []
def mock_send(self, s):
smtp_messages.append(s)
return send(self, s)
SMTP.send = mock_send
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
mail.get_connection().send_messages([email])
# Find the actual message
msg = None
for i, m in enumerate(smtp_messages):
if m[:4] == 'data':
msg = smtp_messages[i + 1]
break
self.assertTrue(msg)
msg = msg.decode()
# The message only contains CRLF and not combinations of CRLF, LF, and CR.
msg = msg.replace('\r\n', '')
self.assertNotIn('\r', msg)
self.assertNotIn('\n', msg)
finally:
SMTP.send = send
def test_send_messages_after_open_failed(self):
"""
send_messages() shouldn't try to send messages if open() raises an
exception after initializing the connection.
"""
backend = smtp.EmailBackend()
# Simulate connection initialization success and a subsequent
# connection exception.
backend.connection = True
backend.open = lambda: None
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
self.assertEqual(backend.send_messages([email]), 0)
def test_send_messages_empty_list(self):
backend = smtp.EmailBackend()
backend.connection = True
self.assertEqual(backend.send_messages([]), 0)
def test_send_messages_zero_sent(self):
"""A message isn't sent if it doesn't have any recipients."""
backend = smtp.EmailBackend()
backend.connection = True
email = EmailMessage('Subject', 'Content', '[email protected]', to=[])
sent = backend.send_messages([email])
self.assertEqual(sent, 0)
class SMTPBackendStoppedServerTests(SMTPBackendTestsBase):
"""
These tests require a separate class, because the FakeSMTPServer is shut
down in setUpClass(), and it cannot be restarted ("RuntimeError: threads
can only be started once").
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.backend = smtp.EmailBackend(username='', password='')
cls.server.stop()
def test_server_stopped(self):
"""
Closing the backend while the SMTP server is stopped doesn't raise an
exception.
"""
self.backend.close()
def test_fail_silently_on_connection_error(self):
"""
A socket connection error is silenced with fail_silently=True.
"""
with self.assertRaises(ConnectionError):
self.backend.open()
self.backend.fail_silently = True
self.backend.open()
|
93c6c7e4fca615179a891ac8313299186a04c325dad203a7a85a204c137749fe | import collections.abc
from datetime import datetime
from math import ceil
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models.functions import Substr
from django.test import TestCase, skipUnlessDBFeature
from .models import (
Article, Author, Game, IsNullWithNoneAsRHS, Player, Season, Tag,
)
class LookupTests(TestCase):
@classmethod
def setUpTestData(cls):
# Create a few Authors.
cls.au1 = Author.objects.create(name='Author 1', alias='a1')
cls.au2 = Author.objects.create(name='Author 2', alias='a2')
# Create a few Articles.
cls.a1 = Article.objects.create(
headline='Article 1',
pub_date=datetime(2005, 7, 26),
author=cls.au1,
slug='a1',
)
cls.a2 = Article.objects.create(
headline='Article 2',
pub_date=datetime(2005, 7, 27),
author=cls.au1,
slug='a2',
)
cls.a3 = Article.objects.create(
headline='Article 3',
pub_date=datetime(2005, 7, 27),
author=cls.au1,
slug='a3',
)
cls.a4 = Article.objects.create(
headline='Article 4',
pub_date=datetime(2005, 7, 28),
author=cls.au1,
slug='a4',
)
cls.a5 = Article.objects.create(
headline='Article 5',
pub_date=datetime(2005, 8, 1, 9, 0),
author=cls.au2,
slug='a5',
)
cls.a6 = Article.objects.create(
headline='Article 6',
pub_date=datetime(2005, 8, 1, 8, 0),
author=cls.au2,
slug='a6',
)
cls.a7 = Article.objects.create(
headline='Article 7',
pub_date=datetime(2005, 7, 27),
author=cls.au2,
slug='a7',
)
# Create a few Tags.
cls.t1 = Tag.objects.create(name='Tag 1')
cls.t1.articles.add(cls.a1, cls.a2, cls.a3)
cls.t2 = Tag.objects.create(name='Tag 2')
cls.t2.articles.add(cls.a3, cls.a4, cls.a5)
cls.t3 = Tag.objects.create(name='Tag 3')
cls.t3.articles.add(cls.a5, cls.a6, cls.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(
Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertIsInstance(Article.objects.iterator(), collections.abc.Iterator)
self.assertQuerysetEqual(
Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline')
)
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(
Article.objects.in_bulk(),
{
self.a1.id: self.a1,
self.a2.id: self.a2,
self.a3.id: self.a3,
self.a4.id: self.a4,
self.a5.id: self.a5,
self.a6.id: self.a6,
self.a7.id: self.a7,
}
)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
with self.assertRaises(TypeError):
Article.objects.in_bulk(headline__startswith='Blah')
def test_in_bulk_lots_of_ids(self):
test_range = 2000
max_query_params = connection.features.max_query_params
expected_num_queries = ceil(test_range / max_query_params) if max_query_params else 1
Author.objects.bulk_create([Author() for i in range(test_range - Author.objects.count())])
authors = {author.pk: author for author in Author.objects.all()}
with self.assertNumQueries(expected_num_queries):
self.assertEqual(Author.objects.in_bulk(authors), authors)
def test_in_bulk_with_field(self):
self.assertEqual(
Article.objects.in_bulk([self.a1.slug, self.a2.slug, self.a3.slug], field_name='slug'),
{
self.a1.slug: self.a1,
self.a2.slug: self.a2,
self.a3.slug: self.a3,
}
)
def test_in_bulk_non_unique_field(self):
msg = "in_bulk()'s field_name must be a unique field but 'author' isn't."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.in_bulk([self.au1], field_name='author')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
self.assertSequenceEqual(
Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
)
self.assertSequenceEqual(
Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertSequenceEqual(
list(Article.objects.values('id', 'headline').iterator()),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
)
# The values() method works with "extra" fields specified in extra(select).
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertSequenceEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}],
)
# You can specify fields from forward and reverse relations, just like filter().
self.assertSequenceEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
],
)
self.assertSequenceEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
],
)
self.assertSequenceEqual(
(
Author.objects
.values('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
],
)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a nonexistent field name in values() (a field that is neither in the
# model nor in extra(select)).
msg = (
"Cannot resolve keyword 'id_plus_two' into field. Choices are: "
"author, author_id, headline, id, id_plus_one, pub_date, slug, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertSequenceEqual(
Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0),
'slug': 'a5',
}],
)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
self.assertSequenceEqual(
Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
],
)
self.assertSequenceEqual(
Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
)
self.assertSequenceEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id)
],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1)
],
)
args = ('name', 'article__headline', 'article__tag__name')
self.assertSequenceEqual(
Author.objects.values_list(*args).order_by(*args),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
],
)
with self.assertRaises(TypeError):
Article.objects.values_list('id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')), '<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()), '<Article: Article 6>')
with self.assertRaises(Article.DoesNotExist):
self.a5.get_next_by_pub_date()
self.assertEqual(repr(self.a6.get_next_by_pub_date()), '<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()), '<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()), '<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>']
)
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>']
)
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='\\'),
[r'<Article: Article with \ backslash>']
)
def test_exclude(self):
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
]
)
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual(Article.objects.none().iterator(), [])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(
Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
def test_in_different_database(self):
with self.assertRaisesMessage(
ValueError,
"Subqueries aren't allowed across different databases. Force the "
"inner query to be evaluated using `list(inner_query)`."
):
list(Article.objects.filter(id__in=Article.objects.using('other').all()))
def test_in_keeps_value_ordering(self):
query = Article.objects.filter(slug__in=['a%d' % i for i in range(1, 8)]).values('pk').query
self.assertIn(' IN (a1, a2, a3, a4, a5, a6, a7) ', str(query))
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
with self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'pub_date_year' into field. Choices are: "
"author, author_id, headline, id, pub_date, slug, tag"
):
Article.objects.filter(pub_date_year='2005').count()
def test_unsupported_lookups(self):
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'starts' for CharField or join on the field "
"not permitted, perhaps you meant startswith or istartswith?"
):
Article.objects.filter(headline__starts='Article')
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'is_null' for DateTimeField or join on the field "
"not permitted, perhaps you meant isnull?"
):
Article.objects.filter(pub_date__is_null=True)
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'gobbledygook' for DateTimeField or join on the field "
"not permitted."
):
Article.objects.filter(pub_date__gobbledygook='blahblah')
def test_relation_nested_lookup_error(self):
# An invalid nested lookup on a related field raises a useful error.
msg = 'Related Field got invalid lookup: editor'
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(author__editor__name='James')
msg = 'Related Field got invalid lookup: foo'
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(articles__foo='bar')
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
Article.objects.create(pub_date=now, headline='f')
Article.objects.create(pub_date=now, headline='fo')
Article.objects.create(pub_date=now, headline='foo')
Article.objects.create(pub_date=now, headline='fooo')
Article.objects.create(pub_date=now, headline='hey-Foo')
Article.objects.create(pub_date=now, headline='bar')
Article.objects.create(pub_date=now, headline='AbBa')
Article.objects.create(pub_date=now, headline='baz')
Article.objects.create(pub_date=now, headline='baxZ')
# zero-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>']
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
]
)
# one-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>']
)
# wildcard
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>']
)
# leading anchor
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>']
)
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'), ['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'), ['<Article: baz>'])
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>']
)
# character sets
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>']
)
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'), ['<Article: baxZ>'])
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>']
)
# and more articles:
Article.objects.create(pub_date=now, headline='foobar')
Article.objects.create(pub_date=now, headline='foobaz')
Article.objects.create(pub_date=now, headline='ooF')
Article.objects.create(pub_date=now, headline='foobarbaz')
Article.objects.create(pub_date=now, headline='zoocarfaz')
Article.objects.create(pub_date=now, headline='barfoobaz')
Article.objects.create(pub_date=now, headline='bazbaRFOO')
# alternation
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>']
)
# greedy matching
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
]
)
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
Article.objects.create(pub_date=now, headline='foobar')
Article.objects.create(pub_date=now, headline='foobaz')
Article.objects.create(pub_date=now, headline='ooF')
Article.objects.create(pub_date=now, headline='foobarbaz')
Article.objects.create(pub_date=now, headline='zoocarfaz')
Article.objects.create(pub_date=now, headline='barfoobaz')
Article.objects.create(pub_date=now, headline='bazbaRFOO')
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>']
)
def test_regex_null(self):
"""
A regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), [])
def test_regex_non_string(self):
"""
A regex lookup does not fail on non-string fields
"""
Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'), ['<Season: 2013>'])
def test_regex_non_ascii(self):
"""
A regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name='\u2660')
Player.objects.get(name__regex='\u2660')
def test_nonfield_lookups(self):
"""
A lookup query containing non-fields raises the proper exception.
"""
msg = "Unsupported lookup 'blahblah' for CharField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(headline__blahblah=99)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(headline__blahblah__exact=99)
msg = (
"Cannot resolve keyword 'blahblah' into field. Choices are: "
"author, author_id, headline, id, pub_date, slug, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Genuine field names don't collide with built-in lookup types
('year', 'gt', 'range', 'in' etc.) (#11670).
"""
# 'gt' is used as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games.set(Game.objects.filter(season__year__in=[2009, 2010]))
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games.set(Game.objects.filter(season__year=2009))
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games.set(Game.objects.filter(season__year__in=[2011]))
johnson = Player.objects.create(name="Johnson")
johnson.games.set(Game.objects.filter(season__year__in=[2011]))
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
def test_chain_date_time_lookups(self):
self.assertQuerysetEqual(
Article.objects.filter(pub_date__month__gt=7),
['<Article: Article 5>', '<Article: Article 6>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__day__gte=27),
['<Article: Article 2>', '<Article: Article 3>',
'<Article: Article 4>', '<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__hour__lt=8),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__minute__lte=0),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 5>', '<Article: Article 6>',
'<Article: Article 7>'],
ordered=False
)
def test_exact_none_transform(self):
"""Transforms are used for __exact=None."""
Season.objects.create(year=1, nulled_text_field='not null')
self.assertFalse(Season.objects.filter(nulled_text_field__isnull=True))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled__isnull=True))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled__exact=None))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled=None))
def test_exact_sliced_queryset_limit_one(self):
self.assertCountEqual(
Article.objects.filter(author=Author.objects.all()[:1]),
[self.a1, self.a2, self.a3, self.a4]
)
def test_exact_sliced_queryset_limit_one_offset(self):
self.assertCountEqual(
Article.objects.filter(author=Author.objects.all()[1:2]),
[self.a5, self.a6, self.a7]
)
def test_exact_sliced_queryset_not_limited_to_one(self):
msg = (
'The QuerySet value for an exact lookup must be limited to one '
'result using slicing.'
)
with self.assertRaisesMessage(ValueError, msg):
list(Article.objects.filter(author=Author.objects.all()[:2]))
with self.assertRaisesMessage(ValueError, msg):
list(Article.objects.filter(author=Author.objects.all()[1:]))
def test_custom_field_none_rhs(self):
"""
__exact=value is transformed to __isnull=True if Field.get_prep_value()
converts value to None.
"""
season = Season.objects.create(year=2012, nulled_text_field=None)
self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field__isnull=True))
self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field=''))
def test_pattern_lookups_with_substr(self):
a = Author.objects.create(name='John Smith', alias='Johx')
b = Author.objects.create(name='Rhonda Simpson', alias='sonx')
tests = (
('startswith', [a]),
('istartswith', [a]),
('contains', [a, b]),
('icontains', [a, b]),
('endswith', [b]),
('iendswith', [b]),
)
for lookup, result in tests:
with self.subTest(lookup=lookup):
authors = Author.objects.filter(**{'name__%s' % lookup: Substr('alias', 1, 3)})
self.assertCountEqual(authors, result)
def test_custom_lookup_none_rhs(self):
"""Lookup.can_use_none_as_rhs=True allows None as a lookup value."""
season = Season.objects.create(year=2012, nulled_text_field=None)
query = Season.objects.get_queryset().query
field = query.model._meta.get_field('nulled_text_field')
self.assertIsInstance(query.build_lookup(['isnull_none_rhs'], field, None), IsNullWithNoneAsRHS)
self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field__isnull_none_rhs=True))
|
97c15510b710f6a09a5e84ed5532bb789a28201862c3bef462621409802fea64 | from django.forms import CharField, Form, Media, MultiWidget, TextInput
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
@override_settings(
STATIC_URL='http://media.example.com/static/',
)
class FormsMediaTestCase(SimpleTestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(
css={'all': ('path/to/css1', '/path/to/css2')},
js=('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3'),
)
self.assertEqual(
str(m),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
self.assertEqual(
repr(m),
"Media(css={'all': ('path/to/css1', '/path/to/css2')}, "
"js=('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3'))"
)
class Foo:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(
str(m3),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(
str(w1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# Media objects can be interrogated by media type
self.assertEqual(
str(w1.media['css']),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">"""
)
self.assertEqual(
str(w1.media['js']),
"""<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(
str(w1.media + w2.media + w3.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# media addition hasn't affected the original objects
self.assertEqual(
str(w1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super().media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet">
<link href="/other/path" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super().media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(
str(w6.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<link href="/other/path" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>"""
)
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(
str(w7.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w8 = MyWidget8()
self.assertEqual(
str(w8.media),
"""<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet">
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(
str(w9.media),
"""<link href="/some/path" type="text/css" media="all" rel="stylesheet">
<link href="/other/path" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>"""
)
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet">
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w11 = MyWidget11()
self.assertEqual(
str(w11.media),
"""<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet">
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w12 = MyWidget12()
self.assertEqual(
str(w12.media),
"""<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet">
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1', '/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1', '/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(
str(multimedia.media),
"""<link href="/file4" type="text/css" media="print" rel="stylesheet">
<link href="/file3" type="text/css" media="screen" rel="stylesheet">
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet">
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super().__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(
str(mymulti.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(
str(f1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(
str(f1.media + f2.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(
str(f3.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet">
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>"""
)
# Media works in templates
self.assertEqual(
Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})),
"""<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>"""
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet">
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet">"""
)
def test_html_safe(self):
media = Media(css={'all': ['/path/to/css']}, js=['/path/to/js'])
self.assertTrue(hasattr(Media, '__html__'))
self.assertEqual(str(media), media.__html__())
def test_merge(self):
test_values = (
(([1, 2], [3, 4]), [1, 2, 3, 4]),
(([1, 2], [2, 3]), [1, 2, 3]),
(([2, 3], [1, 2]), [1, 2, 3]),
(([1, 3], [2, 3]), [1, 2, 3]),
(([1, 2], [1, 3]), [1, 2, 3]),
(([1, 2], [3, 2]), [1, 3, 2]),
)
for (list1, list2), expected in test_values:
with self.subTest(list1=list1, list2=list2):
self.assertEqual(Media.merge(list1, list2), expected)
def test_merge_warning(self):
msg = 'Detected duplicate Media files in an opposite order:\n1\n2'
with self.assertWarnsMessage(RuntimeWarning, msg):
self.assertEqual(Media.merge([1, 2], [2, 1]), [1, 2])
def test_merge_js_three_way(self):
"""
The relative order of scripts is preserved in a three-way merge.
"""
# custom_widget.js doesn't depend on jquery.js.
widget1 = Media(js=['custom_widget.js'])
widget2 = Media(js=['jquery.js', 'uses_jquery.js'])
form_media = widget1 + widget2
# The relative ordering of custom_widget.js and jquery.js has been
# established (but without a real need to).
self.assertEqual(form_media._js, ['custom_widget.js', 'jquery.js', 'uses_jquery.js'])
# The inline also uses custom_widget.js. This time, it's at the end.
inline_media = Media(js=['jquery.js', 'also_jquery.js']) + Media(js=['custom_widget.js'])
merged = form_media + inline_media
self.assertEqual(merged._js, ['custom_widget.js', 'jquery.js', 'uses_jquery.js', 'also_jquery.js'])
def test_merge_css_three_way(self):
widget1 = Media(css={'screen': ['a.css']})
widget2 = Media(css={'screen': ['b.css']})
widget3 = Media(css={'all': ['c.css']})
form1 = widget1 + widget2
form2 = widget2 + widget1
# form1 and form2 have a.css and b.css in different order...
self.assertEqual(form1._css, {'screen': ['a.css', 'b.css']})
self.assertEqual(form2._css, {'screen': ['b.css', 'a.css']})
# ...but merging succeeds as the relative ordering of a.css and b.css
# was never specified.
merged = widget3 + form1 + form2
self.assertEqual(merged._css, {'screen': ['a.css', 'b.css'], 'all': ['c.css']})
|
2cac75556f319cd4d7c44da5ce05c0b10196749643028130e32cbf6581637236 | import unittest
from django.db import connection
from django.db.models.fields import BooleanField, NullBooleanField
from django.db.utils import DatabaseError
from django.test import TransactionTestCase
from ..models import Square
@unittest.skipUnless(connection.vendor == 'oracle', 'Oracle tests')
class Tests(unittest.TestCase):
def test_quote_name(self):
"""'%' chars are escaped for query execution."""
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
"""A stored procedure can be called through a cursor wrapper."""
with connection.cursor() as cursor:
cursor.callproc('DBMS_SESSION.SET_IDENTIFIER', ['_django_testing!'])
def test_cursor_var(self):
"""Cursor variables can be passed as query parameters."""
with connection.cursor() as cursor:
var = cursor.var(str)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_client_encoding(self):
"""Client encoding is set correctly."""
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, 'UTF-8')
self.assertEqual(connection.connection.nencoding, 'UTF-8')
def test_order_of_nls_parameters(self):
"""
An 'almost right' datetime works with configured NLS parameters
(#18465).
"""
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# The query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
def test_boolean_constraints(self):
"""Boolean fields have check constraints on their values."""
for field in (BooleanField(), NullBooleanField(), BooleanField(null=True)):
with self.subTest(field=field):
field.set_attributes_from_name('is_nice')
self.assertIn('"IS_NICE" IN (0,1)', field.db_check(connection))
@unittest.skipUnless(connection.vendor == 'oracle', 'Oracle tests')
class TransactionalTests(TransactionTestCase):
available_apps = ['backends']
def test_hidden_no_data_found_exception(self):
# "ORA-1403: no data found" exception is hidden by Oracle OCI library
# when an INSERT statement is used with a RETURNING clause (see #28859).
with connection.cursor() as cursor:
# Create trigger that raises "ORA-1403: no data found".
cursor.execute("""
CREATE OR REPLACE TRIGGER "TRG_NO_DATA_FOUND"
AFTER INSERT ON "BACKENDS_SQUARE"
FOR EACH ROW
BEGIN
RAISE NO_DATA_FOUND;
END;
""")
try:
with self.assertRaisesMessage(DatabaseError, (
'The database did not return a new row id. Probably "ORA-1403: '
'no data found" was raised internally but was hidden by the '
'Oracle OCI library (see https://code.djangoproject.com/ticket/28859).'
)):
Square.objects.create(root=2, square=4)
finally:
with connection.cursor() as cursor:
cursor.execute('DROP TRIGGER "TRG_NO_DATA_FOUND"')
def test_password_with_at_sign(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = 'p@ssword'
try:
self.assertIn('/\\"p@ssword\\"@', connection._connect_string())
with self.assertRaises(DatabaseError) as context:
connection.cursor()
# Database exception: "ORA-01017: invalid username/password" is
# expected.
self.assertIn('ORA-01017', context.exception.args[0].message)
finally:
connection.settings_dict['PASSWORD'] = old_password
|
3302840c7d221fa5269108f149aac91eedaeb9608d9e75ee14a6963a1b1fa91d | import json
import mimetypes
import os
import re
import sys
from copy import copy
from functools import partial
from http import HTTPStatus
from importlib import import_module
from io import BytesIO
from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile(r'.*; charset=([\w\d-]+);?')
# Structured suffix spec: https://tools.ietf.org/html/rfc6838#section-4.2.8
JSON_CONTENT_TYPE_RE = re.compile(r'^application\/(.+\+)?json')
class RedirectCycleError(Exception):
"""The test client has been asked to follow a redirect loop."""
def __init__(self, message, last_response):
super().__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload:
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in real life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after it's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
yield from iterable
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
def conditional_content_removal(request, response):
"""
Simulate the behavior of most Web servers by removing the content of
responses for HEAD requests, 1xx, 204, and 304 responses. Ensure
compliance with RFC 7230, section 3.3.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Use the WSGI
interface to compose requests, but return the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Simulate behaviors of most Web servers.
conditional_content_removal(request, response)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# Emulate a WSGI server by calling the close method on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Store templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
if 'context' not in store:
store['context'] = ContextList()
store['context'].append(copy(context))
def encode_multipart(boundary, data):
"""
Encode multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if value is None:
raise TypeError(
'Cannot encode None as POST data. Did you mean to pass an '
'empty string or omit the value?'
)
elif is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, str) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# file.name might not be a string. For example, it's an int for
# tempfile.TemporaryFile().
file_has_string_name = hasattr(file, 'name') and isinstance(file.name, str)
filename = os.path.basename(file.name) if file_has_string_name else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
filename = filename or key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory:
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, *, json_encoder=DjangoJSONEncoder, **defaults):
self.json_encoder = json_encoder
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See https://www.python.org/dev/peps/pep-3333/#environ-variables
return {
'HTTP_COOKIE': '; '.join(sorted(
'%s=%s' % (morsel.key, morsel.coded_value)
for morsel in self.cookies.values()
)),
'PATH_INFO': '/',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
**self.defaults,
**request,
}
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _encode_json(self, data, content_type):
"""
Return encoded JSON if data is a dict, list, or tuple and content_type
is application/json.
"""
should_encode = JSON_CONTENT_TYPE_RE.match(content_type) and isinstance(data, (dict, list, tuple))
return json.dumps(data, cls=self.json_encoder) if should_encode else data
def _get_path(self, parsed):
path = parsed.path
# If there are parameters, add them
if parsed.params:
path += ";" + parsed.params
path = unquote_to_bytes(path)
# Replace the behavior where non-ASCII values in the WSGI environ are
# arbitrarily decoded with ISO-8859-1.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode('iso-8859-1')
def get(self, path, data=None, secure=False, **extra):
"""Construct a GET request."""
data = {} if data is None else data
return self.generic('GET', path, secure=secure, **{
'QUERY_STRING': urlencode(data, doseq=True),
**extra,
})
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"""Construct a POST request."""
data = self._encode_json({} if data is None else data, content_type)
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"""Construct a HEAD request."""
data = {} if data is None else data
return self.generic('HEAD', path, secure=secure, **{
'QUERY_STRING': urlencode(data, doseq=True),
**extra,
})
def trace(self, path, secure=False, **extra):
"""Construct a TRACE request."""
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PUT request."""
data = self._encode_json(data, content_type)
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PATCH request."""
data = self._encode_json(data, content_type)
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a DELETE request."""
data = self._encode_json(data, content_type)
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Construct an arbitrary HTTP request."""
parsed = urlparse(str(path)) # path can be lazy
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': method,
'SERVER_PORT': '443' if secure else '80',
'wsgi.url_scheme': 'https' if secure else 'http',
}
if data:
r.update({
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': content_type,
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
# WSGI requires latin-1 encoded strings. See get_path_info().
query_string = parsed[4].encode().decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, raise_request_exception=True, **defaults):
super().__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.raise_request_exception = raise_request_exception
self.exc_info = None
def store_exc_info(self, **kwargs):
"""Store exceptions when they are generated by a view."""
self.exc_info = sys.exc_info()
@property
def session(self):
"""Return the current session variables."""
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
def request(self, **request):
"""
The master request method. Compose the environment dictionary and pass
to the handler, return the result of the handler. Assume defaults for
the query environment, which can be overridden using the arguments to
the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = partial(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
response.exc_info = self.exc_info
if self.exc_info:
_, exc_value, _ = self.exc_info
self.exc_info = None
if self.raise_request_exception:
raise exc_value
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = partial(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(lambda: resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using GET."""
response = super().get(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""Request a response from the server using POST."""
response = super().post(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using HEAD."""
response = super().head(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Request a response from the server using OPTIONS."""
response = super().options(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PUT."""
response = super().put(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PATCH."""
response = super().patch(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a DELETE request to the server."""
response = super().delete(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""Send a TRACE request to the server."""
response = super().trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def login(self, **credentials):
"""
Set the Factory to appear as if it has successfully logged into a site.
Return True if login is possible; False if the provided credentials
are incorrect.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if user:
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
def get_backend():
from django.contrib.auth import load_backend
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
if hasattr(backend, 'get_user'):
return backend_path
if backend is None:
backend = get_backend()
user.backend = backend
self._login(user, backend)
def _login(self, user, backend=None):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user, backend)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""Log out the user by removing the cookies and session object."""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if not hasattr(response, '_json'):
if not JSON_CONTENT_TYPE_RE.match(response.get('Content-Type')):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
response._json = json.loads(response.content.decode(), **extra)
return response._json
def _handle_redirects(self, response, data='', content_type='', **extra):
"""
Follow any redirects by requesting responses from the server using GET.
"""
response.redirect_chain = []
redirect_status_codes = (
HTTPStatus.MOVED_PERMANENTLY,
HTTPStatus.FOUND,
HTTPStatus.SEE_OTHER,
HTTPStatus.TEMPORARY_REDIRECT,
HTTPStatus.PERMANENT_REDIRECT,
)
while response.status_code in redirect_status_codes:
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
# Prepend the request path to handle relative path redirects
path = url.path
if not path.startswith('/'):
path = urljoin(response.request['PATH_INFO'], path)
if response.status_code in (HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT):
# Preserve request method post-redirect for 307/308 responses.
request_method = getattr(self, response.request['REQUEST_METHOD'].lower())
else:
request_method = self.get
data = QueryDict(url.query)
content_type = None
response = request_method(path, data=data, content_type=content_type, follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
|
ce1e933fcc84a389a04aedc37bcb7270c3714672f87121be8b639e3bbc359e89 | import itertools
import json
import os
import re
from urllib.parse import unquote
from django.apps import apps
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.template import Context, Engine
from django.urls import translate_url
from django.utils.formats import get_format
from django.utils.http import is_safe_url
from django.utils.translation import (
LANGUAGE_SESSION_KEY, check_for_language, get_language,
)
from django.utils.translation.trans_real import DjangoTranslation
from django.views.generic import View
LANGUAGE_QUERY_PARAMETER = 'language'
def set_language(request):
"""
Redirect to a given URL while setting the chosen language in the session
(if enabled) and in a cookie. The URL and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if ((next or not request.is_ajax()) and
not is_safe_url(url=next, allowed_hosts={request.get_host()}, require_https=request.is_secure())):
next = request.META.get('HTTP_REFERER')
next = next and unquote(next) # HTTP_REFERER may be encoded.
if not is_safe_url(url=next, allowed_hosts={request.get_host()}, require_https=request.is_secure()):
next = '/'
response = HttpResponseRedirect(next) if next else HttpResponse(status=204)
if request.method == 'POST':
lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)
if lang_code and check_for_language(lang_code):
if next:
next_trans = translate_url(next, lang_code)
if next_trans != next:
response = HttpResponseRedirect(next_trans)
if hasattr(request, 'session'):
# Storing the language in the session is deprecated.
# (RemovedInDjango40Warning)
request.session[LANGUAGE_SESSION_KEY] = lang_code
response.set_cookie(
settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN,
)
return response
def get_formats():
"""Return all formats strings required for i18n to work."""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
return {attr: get_format(attr) for attr in FORMAT_SETTINGS}
js_catalog_template = r"""
{% autoescape off %}
(function(globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function(n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function(count) { return (count == 1) ? 0 : 1; };
{% endif %}
/* gettext library */
django.catalog = django.catalog || {};
{% if catalog_str %}
var newcatalog = {{ catalog_str }};
for (var key in newcatalog) {
django.catalog[key] = newcatalog[key];
}
{% endif %}
if (!django.jsi18n_initialized) {
django.gettext = function(msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function(singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value.constructor === Array ? value[django.pluralidx(count)] : value;
}
};
django.gettext_noop = function(msgid) { return msgid; };
django.pgettext = function(context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function(context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
django.interpolate = function(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function(format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
django.jsi18n_initialized = true;
}
}(this));
{% endautoescape %}
"""
class JavaScriptCatalog(View):
"""
Return the selected language catalog as a JavaScript library.
Receive the list of packages to check for translations in the `packages`
kwarg either from the extra dictionary passed to the url() function or as a
plus-sign delimited string from the request. Default is 'django.conf'.
You can override the gettext domain for this view, but usually you don't
want to do that as JavaScript messages go to the djangojs domain. This
might be needed if you deliver your JavaScript source from Django templates.
"""
domain = 'djangojs'
packages = None
def get(self, request, *args, **kwargs):
locale = get_language()
domain = kwargs.get('domain', self.domain)
# If packages are not provided, default to all installed packages, as
# DjangoTranslation without localedirs harvests them all.
packages = kwargs.get('packages', '')
packages = packages.split('+') if packages else self.packages
paths = self.get_paths(packages) if packages else None
self.translation = DjangoTranslation(locale, domain=domain, localedirs=paths)
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_paths(self, packages):
allowable_packages = {app_config.name: app_config for app_config in apps.get_app_configs()}
app_configs = [allowable_packages[p] for p in packages if p in allowable_packages]
if len(app_configs) < len(packages):
excluded = [p for p in packages if p not in allowable_packages]
raise ValueError(
'Invalid package(s) provided to JavaScriptCatalog: %s' % ','.join(excluded)
)
# paths of requested packages
return [os.path.join(app.path, 'locale') for app in app_configs]
@property
def _num_plurals(self):
"""
Return the number of plurals for this catalog language, or 2 if no
plural string is available.
"""
match = re.search(r'nplurals=\s*(\d+)', self._plural_string or '')
if match:
return int(match.groups()[0])
return 2
@property
def _plural_string(self):
"""
Return the plural string (including nplurals) for this catalog language,
or None if no plural string is available.
"""
if '' in self.translation._catalog:
for line in self.translation._catalog[''].split('\n'):
if line.startswith('Plural-Forms:'):
return line.split(':', 1)[1].strip()
return None
def get_plural(self):
plural = self._plural_string
if plural is not None:
# This should be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 :
# n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
return plural
def get_catalog(self):
pdict = {}
num_plurals = self._num_plurals
catalog = {}
trans_cat = self.translation._catalog
trans_fallback_cat = self.translation._fallback._catalog if self.translation._fallback else {}
seen_keys = set()
for key, value in itertools.chain(trans_cat.items(), trans_fallback_cat.items()):
if key == '' or key in seen_keys:
continue
if isinstance(key, str):
catalog[key] = value
elif isinstance(key, tuple):
msgid, cnt = key
pdict.setdefault(msgid, {})[cnt] = value
else:
raise TypeError(key)
seen_keys.add(key)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(num_plurals)]
return catalog
def get_context_data(self, **kwargs):
return {
'catalog': self.get_catalog(),
'formats': get_formats(),
'plural': self.get_plural(),
}
def render_to_response(self, context, **response_kwargs):
def indent(s):
return s.replace('\n', '\n ')
template = Engine().from_string(js_catalog_template)
context['catalog_str'] = indent(
json.dumps(context['catalog'], sort_keys=True, indent=2)
) if context['catalog'] else None
context['formats_str'] = indent(json.dumps(context['formats'], sort_keys=True, indent=2))
return HttpResponse(template.render(Context(context)), 'text/javascript; charset="utf-8"')
class JSONCatalog(JavaScriptCatalog):
"""
Return the selected language catalog as a JSON object.
Receive the same parameters as JavaScriptCatalog and return a response
with a JSON object of the following format:
{
"catalog": {
# Translations catalog
},
"formats": {
# Language formats for date, time, etc.
},
"plural": '...' # Expression for plural forms, or null.
}
"""
def render_to_response(self, context, **response_kwargs):
return JsonResponse(context)
|
1fea5c6793deb078a451cda32f02f5675519a59465d0ed1c36be9fab03e71b73 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import base64
import binascii
import cgi
import collections
from urllib.parse import unquote
from django.conf import settings
from django.core.exceptions import (
RequestDataTooBig, SuspiciousMultipartForm, TooManyFieldsSent,
)
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser:
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the
uploaded data.
:encoding:
The encoding with which to treat the incoming data.
"""
# Content-Type should contain multipart and the boundary information.
content_type = META.get('CONTENT_TYPE', '')
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary.decode())
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, str):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Return a tuple containing the POST and FILES dictionary, respectively.
"""
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict(encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(
self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding,
)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict(mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
# Number of bytes that have been read.
num_bytes_read = 0
# To count the number of keys in the request.
num_post_keys = 0
# To limit the amount of data read from the request.
read_size = None
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_str(field_name, encoding, errors='replace')
if item_type == FIELD:
# Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
num_post_keys += 1
if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None and
settings.DATA_UPLOAD_MAX_NUMBER_FIELDS < num_post_keys):
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
# Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read(size=read_size)
num_bytes_read += len(raw_data)
try:
data = base64.b64decode(raw_data)
except binascii.Error:
data = raw_data
else:
data = field_stream.read(size=read_size)
num_bytes_read += len(data)
# Add two here to make the check consistent with the
# x-www-form-urlencoded check that includes '&='.
num_bytes_read += len(field_name) + 2
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
self._post.appendlist(field_name, force_str(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if file_name:
file_name = force_str(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
if not file_name:
continue
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(
field_name, file_name, content_type,
content_length, charset, content_type_extra,
)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as exc:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data.") from exc
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk, counters[i])
counters[i] += chunk_length
if chunk is None:
# Don't continue if the chunk received by
# the handler is None.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
# any() shortcircuits if a handler's upload_complete() returns a value.
any(handler.upload_complete() for handler in handlers)
self._post._mutable = False
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_str(old_field_name, self._encoding, errors='replace'), file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream:
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
try:
chunk = next(self)
except StopIteration:
return
else:
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
Return whatever chunk is conveniently returned from the iterator.
Useful to avoid unnecessary bookkeeping if performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replace the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Place bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = bytes + self._leftover
def _update_unget_history(self, num_bytes):
"""
Update the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([
current_number for current_number in self._unget_history
if current_number == num_bytes
])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter:
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, yield chunks of read operations from that object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter:
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter:
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data):
"""
Find a multipart boundary in data.
Should no boundary exist in the data, return None. Otherwise, return
a tuple containing the indices of the following:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""Exhaust an iterator or stream."""
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
collections.deque(iterator, maxlen=0) # consume iterator quickly.
def parse_boundary_stream(stream, max_header_size):
"""
Parse one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser:
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
"""
Parse the header into a key-value.
Input (line): bytes, output: str for key/name, bytes for values which
will be decoded later.
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
value = unquote(value.decode(), encoding=encoding.decode())
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
|
be96cc69b4c82a5bd9a2c86278e70741564ae91c07ebec446f7799a3a86f7507 | """
Accessors for related objects.
When a field defines a relation between two models, each model class provides
an attribute to access related instances of the other model class (unless the
reverse accessor has been disabled with related_name='+').
Accessors are implemented as descriptors in order to customize access and
assignment. This module defines the descriptor classes.
Forward accessors follow foreign keys. Reverse accessors trace them back. For
example, with the following models::
class Parent(Model):
pass
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a forward many-to-one relation. ``parent.children`` is a
reverse many-to-one relation.
There are three types of relations (many-to-one, one-to-one, and many-to-many)
and two directions (forward and reverse) for a total of six combinations.
1. Related instance on the forward side of a many-to-one relation:
``ForwardManyToOneDescriptor``.
Uniqueness of foreign key values is irrelevant to accessing the related
instance, making the many-to-one and one-to-one cases identical as far as
the descriptor is concerned. The constraint is checked upstream (unicity
validation in forms) or downstream (unique indexes in the database).
2. Related instance on the forward side of a one-to-one
relation: ``ForwardOneToOneDescriptor``.
It avoids querying the database when accessing the parent link field in
a multi-table inheritance scenario.
3. Related instance on the reverse side of a one-to-one relation:
``ReverseOneToOneDescriptor``.
One-to-one relations are asymmetrical, despite the apparent symmetry of the
name, because they're implemented in the database with a foreign key from
one table to another. As a consequence ``ReverseOneToOneDescriptor`` is
slightly different from ``ForwardManyToOneDescriptor``.
4. Related objects manager for related instances on the reverse side of a
many-to-one relation: ``ReverseManyToOneDescriptor``.
Unlike the previous two classes, this one provides access to a collection
of objects. It returns a manager rather than an instance.
5. Related objects manager for related instances on the forward or reverse
sides of a many-to-many relation: ``ManyToManyDescriptor``.
Many-to-many relations are symmetrical. The syntax of Django models
requires declaring them on one side but that's an implementation detail.
They could be declared on the other side without any change in behavior.
Therefore the forward and reverse descriptors can be the same.
If you're looking for ``ForwardManyToManyDescriptor`` or
``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.
"""
from django.core.exceptions import FieldError
from django.db import connections, router, transaction
from django.db.models import Q, signals
from django.db.models.query import QuerySet
from django.utils.functional import cached_property
class ForwardManyToOneDescriptor:
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one (via ForwardOneToOneDescriptor subclass) relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``Child.parent`` is a ``ForwardManyToOneDescriptor`` instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `self.field.model` might
# still be a string model reference.
return type(
'RelatedObjectDoesNotExist',
(self.field.remote_field.model.DoesNotExist, AttributeError), {
'__module__': self.field.model.__module__,
'__qualname__': '%s.%s.RelatedObjectDoesNotExist' % (
self.field.model.__qualname__,
self.field.name,
),
}
)
def is_cached(self, instance):
return self.field.is_cached(instance)
def get_queryset(self, **hints):
return self.field.remote_field.model._base_manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
remote_field = self.field.remote_field
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: {instance_attr(inst)[0] for inst in instances}}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not remote_field.multiple:
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
remote_field.set_cached_value(rel_obj, instance)
return queryset, rel_obj_attr, instance_attr, True, self.field.get_cache_name(), False
def get_object(self, instance):
qs = self.get_queryset(instance=instance)
# Assuming the database enforces foreign keys, this won't fail.
return qs.get(self.field.get_reverse_related_filter(instance))
def __get__(self, instance, cls=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached
# by the field on the model instance state. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = self.field.get_cached_value(instance)
except KeyError:
has_value = None not in self.field.get_local_related_value(instance)
ancestor_link = instance._meta.get_ancestor_link(self.field.model) if has_value else None
if ancestor_link and ancestor_link.is_cached(instance):
# An ancestor link will exist if this field is defined on a
# multi-table inheritance parent of the instance's class.
ancestor = ancestor_link.get_cached_value(instance)
# The value might be cached on an ancestor if the instance
# originated from walking down the inheritance chain.
rel_obj = self.field.get_cached_value(ancestor, default=None)
else:
rel_obj = None
if rel_obj is None and has_value:
rel_obj = self.get_object(instance)
remote_field = self.field.remote_field
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not remote_field.multiple:
remote_field.set_cached_value(rel_obj, instance)
self.field.set_cached_value(instance, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` is the ``parent`` instance on the right of the equal sign
"""
# An object must be an instance of the related class.
if value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
remote_field = self.field.remote_field
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = self.field.get_cached_value(instance, default=None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
remote_field.set_cached_value(related, None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
self.field.set_cached_value(instance, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not remote_field.multiple:
remote_field.set_cached_value(value, instance)
def __reduce__(self):
"""
Pickling should return the instance attached by self.field on the
model, not a new copy of that descriptor. Use getattr() to retrieve
the instance directly from the model.
"""
return getattr, (self.field.model, self.field.name)
class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor):
"""
Accessor to the related object on the forward side of a one-to-one relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``Restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance.
"""
def get_object(self, instance):
if self.field.remote_field.parent_link:
deferred = instance.get_deferred_fields()
# Because it's a parent link, all the data is available in the
# instance, so populate the parent model with this data.
rel_model = self.field.remote_field.model
fields = [field.attname for field in rel_model._meta.concrete_fields]
# If any of the related model's fields are deferred, fallback to
# fetching all fields from the related model. This avoids a query
# on the related model for every deferred field.
if not any(field in fields for field in deferred):
kwargs = {field: getattr(instance, field) for field in fields}
obj = rel_model(**kwargs)
obj._state.adding = instance._state.adding
obj._state.db = instance._state.db
return obj
return super().get_object(instance)
def __set__(self, instance, value):
super().__set__(instance, value)
# If the primary key is a link to a parent model and a parent instance
# is being set, update the value of the inherited pk(s).
if self.field.primary_key and self.field.remote_field.parent_link:
opts = instance._meta
# Inherited primary key fields from this object's base classes.
inherited_pk_fields = [
field for field in opts.concrete_fields
if field.primary_key and field.remote_field
]
for field in inherited_pk_fields:
rel_model_pk_name = field.remote_field.model._meta.pk.attname
raw_value = getattr(value, rel_model_pk_name) if value is not None else None
setattr(instance, rel_model_pk_name, raw_value)
class ReverseOneToOneDescriptor:
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``Place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.
"""
def __init__(self, related):
# Following the example above, `related` is an instance of OneToOneRel
# which represents the reverse restaurant field (place.restaurant).
self.related = related
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ForwardManyToOneDescriptor`.
return type(
'RelatedObjectDoesNotExist',
(self.related.related_model.DoesNotExist, AttributeError), {
'__module__': self.related.model.__module__,
'__qualname__': '%s.%s.RelatedObjectDoesNotExist' % (
self.related.model.__qualname__,
self.related.name,
)
},
)
def is_cached(self, instance):
return self.related.is_cached(instance)
def get_queryset(self, **hints):
return self.related.related_model._base_manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.related.field.get_local_related_value
instance_attr = self.related.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
self.related.field.set_cached_value(rel_obj, instance)
return queryset, rel_obj_attr, instance_attr, True, self.related.get_cache_name(), False
def __get__(self, instance, cls=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``cls`` is the ``Place`` class (unused)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached
# by the field on the model instance state. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = self.related.get_cached_value(instance)
except KeyError:
related_pk = instance.pk
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
self.related.field.set_cached_value(rel_obj, instance)
self.related.set_cached_value(instance, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` is the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
if value is None:
# Update the cached related instance (if any) & clear the cache.
# Following the example above, this would be the cached
# ``restaurant`` instance (if any).
rel_obj = self.related.get_cached_value(instance, default=None)
if rel_obj is not None:
# Remove the ``restaurant`` instance from the ``place``
# instance cache.
self.related.delete_cached_value(instance)
# Set the ``place`` field on the ``restaurant``
# instance to None.
setattr(rel_obj, self.related.field.name, None)
elif not isinstance(value, self.related.related_model):
# An object must be an instance of the related class.
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
self.related.set_cached_value(instance, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
self.related.field.set_cached_value(value, instance)
def __reduce__(self):
# Same purpose as ForwardManyToOneDescriptor.__reduce__().
return getattr, (self.related.model, self.related.name)
class ReverseManyToOneDescriptor:
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``Parent.children`` is a ``ReverseManyToOneDescriptor`` instance.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model
return create_reverse_many_to_one_manager(
related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, cls=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``cls`` is the ``Parent`` class (unused)
"""
if instance is None:
return self
return self.related_manager_cls(instance)
def _get_set_deprecation_msg_params(self):
return (
'reverse side of a related set',
self.rel.get_accessor_name(),
)
def __set__(self, instance, value):
raise TypeError(
'Direct assignment to the %s is prohibited. Use %s.set() instead.'
% self._get_set_deprecation_msg_params(),
)
def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super().__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return queryset.none()
if self.field.many_to_one:
# Guard against field-like objects such as GenericRelation
# that abuse create_reverse_many_to_one_manager() with reverse
# one-to-many relationships instead and break known related
# objects assignment.
try:
target_field = self.field.target_field
except FieldError:
# The relationship has multiple target fields. Use a tuple
# for related object id.
rel_obj_id = tuple([
getattr(self.instance, target_field.attname)
for target_field in self.field.get_path_info()[-1].target_fields
])
else:
rel_obj_id = getattr(self.instance, target_field.attname)
queryset._known_related_objects = {self.field: {rel_obj_id: self.instance}}
return queryset
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.field.remote_field.get_cache_name())
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.remote_field.get_cache_name()]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.remote_field.get_cache_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name, False
def add(self, *objs, bulk=True):
self._remove_prefetched_objects()
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, bulk=True):
if not objs:
return
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear(bulk=bulk)
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ManyToManyDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``Pizza.toppings`` and ``Topping.pizzas`` are ``ManyToManyDescriptor``
instances.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel, reverse=False):
super().__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model if self.reverse else self.rel.model
return create_forward_many_to_many_manager(
related_model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def _get_set_deprecation_msg_params(self):
return (
'%s side of a many-to-many set' % ('reverse' if self.reverse else 'forward'),
self.rel.get_accessor_name() if self.reverse else self.field.name,
)
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
self.pk_field_names = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.pk_field_names[lh_field.name] = rh_field.name
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.pk_field_names[self.source_field_name]))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
return queryset._next_is_sticky().filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = fk.model._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
False,
)
@property
def constrained_target(self):
# If the through relation's target field's foreign integrity is
# enforced, the query can be performed solely against the through
# table as the INNER JOIN'ing against target table is unnecessary.
if not self.target_field.db_constraint:
return None
db = router.db_for_read(self.through, instance=self.instance)
if not connections[db].features.supports_foreign_keys:
return None
hints = {'instance': self.instance}
manager = self.through._base_manager.db_manager(db, hints=hints)
filters = {self.source_field_name: self.instance.pk}
# Nullable target rows must be excluded as well as they would have
# been filtered out from an INNER JOIN.
if self.target_field.null:
filters['%s__isnull' % self.target_field_name] = False
return manager.filter(**filters)
def exists(self):
constrained_target = self.constrained_target
return constrained_target.exists() if constrained_target else super().exists()
def count(self):
constrained_target = self.constrained_target
return constrained_target.count() if constrained_target else super().count()
def add(self, *objs, through_defaults=None):
self._remove_prefetched_objects()
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(
self.source_field_name, self.target_field_name, *objs,
through_defaults=through_defaults,
)
# If this is a symmetrical m2m relation to self, add the mirror
# entry in the m2m table. `through_defaults` aren't used here
# because of the system check error fields.E332: Many-to-many
# fields with intermediate tables must not be symmetrical.
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_prefetched_objects()
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(
sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
self._remove_prefetched_objects()
filters = self._build_remove_filters(super().get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
clear.alters_data = True
def set(self, objs, *, clear=False, through_defaults=None):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, through_defaults=through_defaults)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (
self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj
)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs, through_defaults=through_defaults)
set.alters_data = True
def create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj, through_defaults=through_defaults)
return new_obj
create.alters_data = True
def get_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
update_or_create.alters_data = True
def _get_target_ids(self, target_field_name, objs):
"""
Return the set of ids of `objs` that the target field references.
"""
from django.db.models import Model
target_ids = set()
target_field = self.through._meta.get_field(target_field_name)
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", '
'value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
target_id = target_field.get_foreign_related_value(obj)[0]
if target_id is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
target_ids.add(target_id)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
target_ids.add(obj)
return target_ids
def _get_missing_target_ids(self, source_field_name, target_field_name, db, target_ids):
"""
Return the subset of ids of `objs` that aren't already assigned to
this relationship.
"""
vals = self.through._default_manager.using(db).values_list(
target_field_name, flat=True
).filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: target_ids,
})
return target_ids.difference(vals)
def _get_add_plan(self, db, source_field_name):
"""
Return a boolean triple of the way the add should be performed.
The first element is whether or not bulk_create(ignore_conflicts)
can be used, the second whether or not signals must be sent, and
the third element is whether or not the immediate bulk insertion
with conflicts ignored can be performed.
"""
# Conflicts can be ignored when the intermediary model is
# auto-created as the only possible collision is on the
# (source_id, target_id) tuple. The same assertion doesn't hold for
# user-defined intermediary models as they could have other fields
# causing conflicts which must be surfaced.
can_ignore_conflicts = (
connections[db].features.supports_ignore_conflicts and
self.through._meta.auto_created is not False
)
# Don't send the signal when inserting duplicate data row
# for symmetrical reverse entries.
must_send_signals = (self.reverse or source_field_name == self.source_field_name) and (
signals.m2m_changed.has_listeners(self.through)
)
# Fast addition through bulk insertion can only be performed
# if no m2m_changed listeners are connected for self.through
# as they require the added set of ids to be provided via
# pk_set.
return can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and not must_send_signals)
def _add_items(self, source_field_name, target_field_name, *objs, through_defaults=None):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
through_defaults = through_defaults or {}
# If there aren't any objects, there is nothing to do.
if objs:
target_ids = self._get_target_ids(target_field_name, objs)
db = router.db_for_write(self.through, instance=self.instance)
can_ignore_conflicts, must_send_signals, can_fast_add = self._get_add_plan(db, source_field_name)
if can_fast_add:
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: target_id,
})
for target_id in target_ids
], ignore_conflicts=True)
return
missing_target_ids = self._get_missing_target_ids(
source_field_name, target_field_name, db, target_ids
)
with transaction.atomic(using=db, savepoint=False):
if must_send_signals:
signals.m2m_changed.send(
sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=missing_target_ids, using=db,
)
# Add the ones that aren't there already.
self.through._default_manager.using(db).bulk_create([
self.through(**through_defaults, **{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: target_id,
})
for target_id in missing_target_ids
], ignore_conflicts=can_ignore_conflicts)
if must_send_signals:
signals.m2m_changed.send(
sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=missing_target_ids, using=db,
)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(
sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
target_model_qs = super().get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
return ManyRelatedManager
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.