hash
stringlengths
64
64
content
stringlengths
0
1.51M
aaeaf235395b00ee6f2264916c840dc72149ee302afccff5be6bb26520a0c8cb
from django.db.models import FloatField, Func, IntegerField __all__ = [ 'CumeDist', 'DenseRank', 'FirstValue', 'Lag', 'LastValue', 'Lead', 'NthValue', 'Ntile', 'PercentRank', 'Rank', 'RowNumber', ] class CumeDist(Func): function = 'CUME_DIST' name = 'CumeDist' output_field = FloatField() window_compatible = True class DenseRank(Func): function = 'DENSE_RANK' name = 'DenseRank' output_field = IntegerField() window_compatible = True class FirstValue(Func): arity = 1 function = 'FIRST_VALUE' name = 'FirstValue' window_compatible = True class LagLeadFunction(Func): window_compatible = True def __init__(self, expression, offset=1, default=None, **extra): if expression is None: raise ValueError( '%s requires a non-null source expression.' % self.__class__.__name__ ) if offset is None or offset <= 0: raise ValueError( '%s requires a positive integer for the offset.' % self.__class__.__name__ ) args = (expression, offset) if default is not None: args += (default,) super().__init__(*args, **extra) def _resolve_output_field(self): sources = self.get_source_expressions() return sources[0].output_field class Lag(LagLeadFunction): function = 'LAG' name = 'Lag' class LastValue(Func): arity = 1 function = 'LAST_VALUE' name = 'LastValue' window_compatible = True class Lead(LagLeadFunction): function = 'LEAD' name = 'Lead' class NthValue(Func): function = 'NTH_VALUE' name = 'NthValue' window_compatible = True def __init__(self, expression, nth=1, **extra): if expression is None: raise ValueError('%s requires a non-null source expression.' % self.__class__.__name__) if nth is None or nth <= 0: raise ValueError('%s requires a positive integer as for nth.' % self.__class__.__name__) super().__init__(expression, nth, **extra) def _resolve_output_field(self): sources = self.get_source_expressions() return sources[0].output_field class Ntile(Func): function = 'NTILE' name = 'Ntile' output_field = IntegerField() window_compatible = True def __init__(self, num_buckets=1, **extra): if num_buckets <= 0: raise ValueError('num_buckets must be greater than 0.') super().__init__(num_buckets, **extra) class PercentRank(Func): function = 'PERCENT_RANK' name = 'PercentRank' output_field = FloatField() window_compatible = True class Rank(Func): function = 'RANK' name = 'Rank' output_field = IntegerField() window_compatible = True class RowNumber(Func): function = 'ROW_NUMBER' name = 'RowNumber' output_field = IntegerField() window_compatible = True
ad21a0eb1e9db3cf9ab82aebf339e5442d66a23114b518c5b012aff0dc1b4564
from datetime import datetime from django.conf import settings from django.db.models import ( DateField, DateTimeField, DurationField, Field, Func, IntegerField, TimeField, Transform, fields, ) from django.db.models.lookups import ( YearExact, YearGt, YearGte, YearLt, YearLte, ) from django.utils import timezone class TimezoneMixin: tzinfo = None def get_tzname(self): # Timezone conversions must happen to the input datetime *before* # applying a function. 2015-12-31 23:00:00 -02:00 is stored in the # database as 2016-01-01 01:00:00 +00:00. Any results should be # based on the input datetime not the stored datetime. tzname = None if settings.USE_TZ: if self.tzinfo is None: tzname = timezone.get_current_timezone_name() else: tzname = timezone._get_timezone_name(self.tzinfo) return tzname class Extract(TimezoneMixin, Transform): lookup_name = None output_field = IntegerField() def __init__(self, expression, lookup_name=None, tzinfo=None, **extra): if self.lookup_name is None: self.lookup_name = lookup_name if self.lookup_name is None: raise ValueError('lookup_name must be provided') self.tzinfo = tzinfo super().__init__(expression, **extra) def as_sql(self, compiler, connection): sql, params = compiler.compile(self.lhs) lhs_output_field = self.lhs.output_field if isinstance(lhs_output_field, DateTimeField): tzname = self.get_tzname() sql = connection.ops.datetime_extract_sql(self.lookup_name, sql, tzname) elif isinstance(lhs_output_field, DateField): sql = connection.ops.date_extract_sql(self.lookup_name, sql) elif isinstance(lhs_output_field, TimeField): sql = connection.ops.time_extract_sql(self.lookup_name, sql) elif isinstance(lhs_output_field, DurationField): if not connection.features.has_native_duration_field: raise ValueError('Extract requires native DurationField database support.') sql = connection.ops.time_extract_sql(self.lookup_name, sql) else: # resolve_expression has already validated the output_field so this # assert should never be hit. assert False, "Tried to Extract from an invalid type." return sql, params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) field = copy.lhs.output_field if not isinstance(field, (DateField, DateTimeField, TimeField, DurationField)): raise ValueError( 'Extract input expression must be DateField, DateTimeField, ' 'TimeField, or DurationField.' ) # Passing dates to functions expecting datetimes is most likely a mistake. if type(field) == DateField and copy.lookup_name in ('hour', 'minute', 'second'): raise ValueError( "Cannot extract time component '%s' from DateField '%s'. " % (copy.lookup_name, field.name) ) return copy class ExtractYear(Extract): lookup_name = 'year' class ExtractMonth(Extract): lookup_name = 'month' class ExtractDay(Extract): lookup_name = 'day' class ExtractWeek(Extract): """ Return 1-52 or 53, based on ISO-8601, i.e., Monday is the first of the week. """ lookup_name = 'week' class ExtractWeekDay(Extract): """ Return Sunday=1 through Saturday=7. To replicate this in Python: (mydatetime.isoweekday() % 7) + 1 """ lookup_name = 'week_day' class ExtractQuarter(Extract): lookup_name = 'quarter' class ExtractHour(Extract): lookup_name = 'hour' class ExtractMinute(Extract): lookup_name = 'minute' class ExtractSecond(Extract): lookup_name = 'second' DateField.register_lookup(ExtractYear) DateField.register_lookup(ExtractMonth) DateField.register_lookup(ExtractDay) DateField.register_lookup(ExtractWeekDay) DateField.register_lookup(ExtractWeek) DateField.register_lookup(ExtractQuarter) TimeField.register_lookup(ExtractHour) TimeField.register_lookup(ExtractMinute) TimeField.register_lookup(ExtractSecond) DateTimeField.register_lookup(ExtractHour) DateTimeField.register_lookup(ExtractMinute) DateTimeField.register_lookup(ExtractSecond) ExtractYear.register_lookup(YearExact) ExtractYear.register_lookup(YearGt) ExtractYear.register_lookup(YearGte) ExtractYear.register_lookup(YearLt) ExtractYear.register_lookup(YearLte) class Now(Func): template = 'CURRENT_TIMESTAMP' output_field = fields.DateTimeField() def as_postgresql(self, compiler, connection): # PostgreSQL's CURRENT_TIMESTAMP means "the time at the start of the # transaction". Use STATEMENT_TIMESTAMP to be cross-compatible with # other databases. return self.as_sql(compiler, connection, template='STATEMENT_TIMESTAMP()') class TruncBase(TimezoneMixin, Transform): kind = None tzinfo = None def __init__(self, expression, output_field=None, tzinfo=None, **extra): self.tzinfo = tzinfo super().__init__(expression, output_field=output_field, **extra) def as_sql(self, compiler, connection): inner_sql, inner_params = compiler.compile(self.lhs) # Escape any params because trunc_sql will format the string. inner_sql = inner_sql.replace('%s', '%%s') if isinstance(self.output_field, DateTimeField): tzname = self.get_tzname() sql = connection.ops.datetime_trunc_sql(self.kind, inner_sql, tzname) elif isinstance(self.output_field, DateField): sql = connection.ops.date_trunc_sql(self.kind, inner_sql) elif isinstance(self.output_field, TimeField): sql = connection.ops.time_trunc_sql(self.kind, inner_sql) else: raise ValueError('Trunc only valid on DateField, TimeField, or DateTimeField.') return sql, inner_params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) field = copy.lhs.output_field # DateTimeField is a subclass of DateField so this works for both. assert isinstance(field, (DateField, TimeField)), ( "%r isn't a DateField, TimeField, or DateTimeField." % field.name ) # If self.output_field was None, then accessing the field will trigger # the resolver to assign it to self.lhs.output_field. if not isinstance(copy.output_field, (DateField, DateTimeField, TimeField)): raise ValueError('output_field must be either DateField, TimeField, or DateTimeField') # Passing dates or times to functions expecting datetimes is most # likely a mistake. class_output_field = self.__class__.output_field if isinstance(self.__class__.output_field, Field) else None output_field = class_output_field or copy.output_field has_explicit_output_field = class_output_field or field.__class__ is not copy.output_field.__class__ if type(field) == DateField and ( isinstance(output_field, DateTimeField) or copy.kind in ('hour', 'minute', 'second', 'time')): raise ValueError("Cannot truncate DateField '%s' to %s. " % ( field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField' )) elif isinstance(field, TimeField) and ( isinstance(output_field, DateTimeField) or copy.kind in ('year', 'quarter', 'month', 'week', 'day', 'date')): raise ValueError("Cannot truncate TimeField '%s' to %s. " % ( field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField' )) return copy def convert_value(self, value, expression, connection): if isinstance(self.output_field, DateTimeField): if settings.USE_TZ: if value is None: raise ValueError( "Database returned an invalid datetime value. " "Are time zone definitions for your database installed?" ) value = value.replace(tzinfo=None) value = timezone.make_aware(value, self.tzinfo) elif isinstance(value, datetime): if isinstance(self.output_field, DateField): value = value.date() elif isinstance(self.output_field, TimeField): value = value.time() return value class Trunc(TruncBase): def __init__(self, expression, kind, output_field=None, tzinfo=None, **extra): self.kind = kind super().__init__(expression, output_field=output_field, tzinfo=tzinfo, **extra) class TruncYear(TruncBase): kind = 'year' class TruncQuarter(TruncBase): kind = 'quarter' class TruncMonth(TruncBase): kind = 'month' class TruncWeek(TruncBase): """Truncate to midnight on the Monday of the week.""" kind = 'week' class TruncDay(TruncBase): kind = 'day' class TruncDate(TruncBase): kind = 'date' lookup_name = 'date' output_field = DateField() def as_sql(self, compiler, connection): # Cast to date rather than truncate to date. lhs, lhs_params = compiler.compile(self.lhs) tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None sql = connection.ops.datetime_cast_date_sql(lhs, tzname) return sql, lhs_params class TruncTime(TruncBase): kind = 'time' lookup_name = 'time' output_field = TimeField() def as_sql(self, compiler, connection): # Cast to date rather than truncate to date. lhs, lhs_params = compiler.compile(self.lhs) tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None sql = connection.ops.datetime_cast_time_sql(lhs, tzname) return sql, lhs_params class TruncHour(TruncBase): kind = 'hour' class TruncMinute(TruncBase): kind = 'minute' class TruncSecond(TruncBase): kind = 'second' DateTimeField.register_lookup(TruncDate) DateTimeField.register_lookup(TruncTime)
84c06efb9da20008362cf6670f41651566ebb631d779c8709945ef89d3421ac7
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import functools from collections import Counter, OrderedDict, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import ( EmptyResultSet, FieldDoesNotExist, FieldError, ) from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import Col, Ref from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import ( INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE, ) from django.db.models.sql.datastructures import ( BaseTable, Empty, Join, MultiJoin, ) from django.db.models.sql.where import ( AND, OR, ExtraWhere, NothingNode, WhereNode, ) from django.utils.encoding import force_text from django.utils.functional import cached_property from django.utils.tree import Node __all__ = ['Query', 'RawQuery'] def get_field_names_from_opts(opts): return set(chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() )) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( 'JoinInfo', ('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function') ) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=None): self.params = params or () self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.column_name_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): return dict if isinstance(self.params, Mapping) else tuple def __str__(self): return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) class Query: """A single SQL query.""" alias_prefix = 'T' subq_aliases = frozenset([alias_prefix]) compiler = 'SQLCompiler' def __init__(self, model, where=WhereNode): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = OrderedDict() # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. self.external_aliases = set() self.table_map = {} # Maps table names to list of aliases. self.default_cols = True self.default_ordering = True self.standard_ordering = True self.used_aliases = set() self.filter_is_sticky = False self.subquery = False # SQL-related attributes # Select and related select clauses are expressions to use in the # SELECT clause of the query. # The select is used for cases where we want to set up the select # clause to contain other than default fields (values(), subqueries...) # Note that annotations go to annotations dictionary. self.select = () self.where = where() self.where_class = where # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. self.group_by = None self.order_by = () self.low_mark, self.high_mark = 0, None # Used for offset/limit self.distinct = False self.distinct_fields = () self.select_for_update = False self.select_for_update_nowait = False self.select_for_update_skip_locked = False self.select_for_update_of = () self.select_related = False # Arbitrary limit for select_related to prevents infinite recursion. self.max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. self.values_select = () # SQL annotation-related attributes # The _annotations will be an OrderedDict when used. Due to the cost # of creating OrderedDict this attribute is created lazily (in # self.annotations property). self._annotations = None # Maps alias -> Annotation Expression self.annotation_select_mask = None self._annotation_select_cache = None # Set combination attributes self.combinator = None self.combinator_all = False self.combined_queries = () # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. # The _extra attribute is an OrderedDict, lazily created similarly to # .annotations self._extra = None # Maps col_alias -> (col_sql, params). self.extra_select_mask = None self._extra_select_cache = None self.extra_tables = () self.extra_order_by = () # A tuple that is a set of model field names and either True, if these # are the fields to defer, or False if these are the only fields to # load. self.deferred_loading = (frozenset(), True) self._filtered_relations = {} self.explain_query = False self.explain_format = None self.explain_options = {} @property def extra(self): if self._extra is None: self._extra = OrderedDict() return self._extra @property def annotations(self): if self._annotations is None: self._annotations = OrderedDict() return self._annotations @property def has_select_fields(self): return bool(self.select or self.annotation_select_mask or self.extra_select_mask) @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def _prepare(self, field): return self def get_compiler(self, using=None, connection=None): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)(self, connection, using) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj._annotations = self._annotations.copy() if self._annotations is not None else None if self.annotation_select_mask is None: obj.annotation_select_mask = None else: obj.annotation_select_mask = self.annotation_select_mask.copy() # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj._extra = self._extra.copy() if self._extra is not None else None if self.extra_select_mask is None: obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is None: obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() if 'subq_aliases' in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property try: del obj.base_table except AttributeError: pass return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, '_setup_query'): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) elif isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary): # Reference to column. Make sure the referenced column # is selected. col_cnt += 1 col_alias = '__col%d' % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_exprs.append(Ref(col_alias, expr)) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} has_limit = self.low_mark != 0 or self.high_mark is not None has_existing_annotations = any( annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ) # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if (isinstance(self.group_by, tuple) or has_limit or has_existing_annotations or self.distinct or self.combinator): from django.db.models.sql.subqueries import AggregateQuery outer_query = AggregateQuery(self.model) inner_query = self.clone() inner_query.select_for_update = False inner_query.select_related = False if not has_limit and not self.distinct_fields: # Queries with distinct_fields need ordering and when a limit # is applied we must take the slice from the ordered query. # Otherwise no need for ordering. inner_query.clear_ordering(True) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. if inner_query.default_cols and has_existing_annotations: inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) inner_query.default_cols = False relabels = {t: 'subquery' for t in inner_query.alias_map} relabels[None] = 'subquery' # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone(relabels) del inner_query.annotations[alias] # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask: # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) try: outer_query.add_subquery(inner_query, using) except EmptyResultSet: return { alias: None for alias in outer_query.annotation_select } else: outer_query = self self.select = () self.default_cols = False self._extra = {} outer_query.clear_ordering(True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using) result = compiler.execute_sql(SINGLE) if result is None: result = [None] * len(outer_query.annotation_select) converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count('*'), alias='__count', is_summary=True) number = obj.get_aggregation(using, ['__count'])['__count'] if number is None: number = 0 return number def has_filters(self): return self.where def has_results(self, using): q = self.clone() if not q.distinct: if q.group_by is True: q.add_fields((f.attname for f in self.model._meta.concrete_fields), False) q.set_group_by() q.clear_select_clause() q.clear_ordering(True) q.set_limits(high=1) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() q.explain_query = True q.explain_format = format q.explain_options = options compiler = q.get_compiler(using=using) return '\n'.join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ assert self.model == rhs.model, \ "Cannot combine queries on two different base models." assert self.can_filter(), \ "Cannot combine queries once a slice has been taken." assert self.distinct == rhs.distinct, \ "Cannot combine a unique query with a non-unique query." assert self.distinct_fields == rhs.distinct_fields, \ "Cannot combine queries with different distinct fields." # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = (connector == AND) # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) # Base table must be present in the query - this is the same # table on both sides. self.get_initial_alias() joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self._extra and rhs._extra: raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def deferred_to_data(self, target, callback): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. The "target" parameter is the instance that is populated (in place). The "callback" is a function that is called whenever a (model, field) pair need to be added to "target". It accepts three parameters: "target", and the model and list of fields being added for that model. """ field_names, defer = self.deferred_loading if not field_names: return orig_opts = self.get_meta() seen = {} must_include = {orig_opts.concrete_model: {orig_opts.pk}} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model._meta.concrete_model opts = orig_opts for name in parts[:-1]: old_model = cur_model if name in self._filtered_relations: name = self._filtered_relations[name].relation_name source = opts.get_field(name) if is_reverse_o2o(source): cur_model = source.related_model else: cur_model = source.remote_field.model opts = cur_model._meta # Even if we're "just passing through" this model, we must add # both the current model's pk and the related reference field # (if it's not a reverse relation) to the things we select. if not is_reverse_o2o(source): must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) field = opts.get_field(parts[-1]) is_reverse_object = field.auto_created and not field.concrete model = field.related_model if is_reverse_object else field.model model = model._meta.concrete_model if model == opts.model: model = cur_model if not is_reverse_o2o(field): add_to_dict(seen, model, field) if defer: # We need to load all fields for each model, except those that # appear in "seen" (for all models that appear in "seen"). The only # slight complexity here is handling fields that exist on parent # models. workset = {} for model, values in seen.items(): for field in model._meta.local_fields: if field not in values: m = field.model._meta.concrete_model add_to_dict(workset, m, field) for model, values in must_include.items(): # If we haven't included a model in workset, we don't add the # corresponding must_include fields for that model, since an # empty set means "include all fields". That's why there's no # "else" branch here. if model in workset: workset[model].update(values) for model, values in workset.items(): callback(target, model, values) else: for model, values in must_include.items(): if model in seen: seen[model].update(values) else: # As we've passed through this model, but not explicitly # included any fields, we have to make sure it's mentioned # so that only the "must include" fields are pulled in. seen[model] = values # Now ensure that every model in the inheritance chain is mentioned # in the parent list. Again, it must be mentioned to ensure that # only "must include" fields are pulled in. for model in orig_opts.get_parent_list(): seen.setdefault(model, set()) for model, values in seen.items(): callback(target, model, values) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = filtered_relation.alias if filtered_relation is not None else table_name self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER already_louter = self.alias_map[alias].join_type == LOUTER if ((self.alias_map[alias].nullable or parent_louter) and not already_louter): self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by]) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self._annotations = self._annotations and OrderedDict( (key, col.relabeled_clone(change_map)) for key, col in self._annotations.items() ) # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = {change_map.get(alias, alias) for alias in self.external_aliases} def bump_prefix(self, outer_query): """ Change the alias prefix to the next letter in the alphabet in a way that the outer query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix):] if prefix else alphabet for s in product(seq, repeat=n): yield ''.join(s) prefix = None if self.alias_prefix != outer_query.alias_prefix: # No clashes between self and outer query should be possible. return local_recursion_limit = 127 # explicitly avoid infinite loop for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RuntimeError( 'Maximum recursion depth exceeded: too many subqueries.' ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases) change_map = OrderedDict() for pos, alias in enumerate(self.alias_map): new_alias = '%s%d' % (self.alias_prefix, pos) change_map[alias] = new_alias self.change_aliases(change_map) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) else: alias = self.join(BaseTable(self.get_meta().db_table, None)) return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a sql.datastructures.BaseTable or Join. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join, with_filtered_relation=False) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: self.ref_alias(reuse_aliases[0]) return reuse_aliases[0] # No reuse is possible, so we need a new alias. alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def add_annotation(self, annotation, alias, is_summary=False): """Add a single annotation expression to the Query.""" annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None, summarize=is_summary) self.append_annotation_mask([alias]) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True # It's safe to drop ordering if the queryset isn't using slicing, # distinct(*fields) or select_for_update(). if (self.low_mark == 0 and self.high_mark is None and not self.distinct_fields and not self.select_for_update): clone.clear_ordering(True) return clone def as_sql(self, compiler, connection): return self.get_compiler(connection=connection).as_sql() def resolve_lookup_value(self, value, can_reuse, allow_joins): if hasattr(value, 'resolve_expression'): value = value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. for sub_value in value: if hasattr(sub_value, 'resolve_expression'): sub_value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self._annotations: expression, expression_lookups = refs_expression(lookup_splitted, self.annotations) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, '_meta'): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name)) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if (isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field)): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, '_meta'): self.check_query_object_type(value, opts, field) elif hasattr(value, '__iter__'): for v in value: self.check_query_object_type(v, opts, field) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. lookups = lookups or ['exact'] for name in lookups[:-1]: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_name = lookups[-1] lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: if lhs.field.is_relation: raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name)) # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = 'exact' lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ('exact', 'iexact'): raise ValueError("Cannot use None as a query value") return lhs.get_lookup('isnull')(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and lookup_name == 'exact' and lookup.rhs == ''): return lhs.get_lookup('isnull')(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted." % (name, lhs.output_field.__class__.__name__)) def build_filter(self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) if not getattr(reffed_expression, 'filterable', True): raise NotSupportedError( reffed_expression.__class__.__name__ + ' is disallowed in ' 'the filter clause.' ) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins) used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)} clause = self.where_class() if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) clause.add(condition, AND) return clause, [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: # No support for transforms for relational fields num_lookups = len(lookups) if num_lookups > 1: raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0])) if len(targets) == 1: col = targets[0].get_col(alias, join_info.final_field) else: col = MultiColSource(alias, targets, join_info.targets, join_info.final_field) else: col = targets[0].get_col(alias, join_info.final_field) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause.add(condition, AND) require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None: require_outer = True if (lookup_type != 'isnull' and ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER)): # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). lookup_class = targets[0].get_lookup('isnull') clause.add(lookup_class(targets[0].get_col(alias, join_info.targets[0]), False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_clause): self.add_q(Q(**{filter_clause[0]: filter_clause[1]})) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER} clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated) for child in q_object.children: if isinstance(child, Node): child_clause, needed_inner = self._add_q( child, used_aliases, branch_negated, current_negated, allow_joins, split_subq) joinpromoter.add_votes(needed_inner) else: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) for lookup in chain((filtered_relation.relation_name,), lookups): lookup_parts, field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 if len(field_parts) > (shift + len(lookup_parts)): raise ValueError( "FilteredRelation's condition doesn't support nested " "relations (got %r)." % lookup ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == 'pk': name = opts.pk.name field = None filtered_relation = None try: field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: field_names = list(get_field_names_from_opts(opts)) available = sorted( field_names + list(self.annotation_select) + list(self._filtered_relations) ) raise FieldError("Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available))) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, 'get_path_info'): pathinfos = field.get_path_info(filtered_relation) if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name)) break return path, final_field, targets, names[pos + 1:] def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and and create a partial that # converts fields to the appropriate wrapped version. def final_transformer(field, alias): return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial(transform, name=name, previous=final_transformer) # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = Join( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets} targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False): if not allow_joins and LOOKUP_SEP in name: raise FieldError("Joined field references are not permitted in this query") if name in self.annotations: if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. return Ref(name, self.annotation_select[name]) else: return self.annotations[name] else: field_list = name.split(LOOKUP_SEP) join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse) targets, _, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if len(targets) > 1: raise FieldError("Referencing multicolumn fields with F() objects " "isn't supported") if reuse is not None: reuse.update(join_list) col = targets[0].get_col(join_list[-1], join_info.targets[0]) return col def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT (pk IN (SELECT parent_id FROM thetable WHERE name = 'foo' AND parent_id IS NOT NULL)) It might be worth it to consider using WHERE NOT EXISTS as that has saner null handling, and is easier for the backend's optimizer to handle. """ # Generate the inner query. query = Query(self.model) query.add_filter(filter_expr) query.clear_ordering(True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) # Add extra check to make sure the selected field will not be null # since we are adding an IN <subquery> clause. This prevents the # database from tripping over IN (...,NULL,...) selects and returning # nothing col = query.select[0] select_field = col.target alias = col.alias if self.is_nullable(select_field): lookup_class = select_field.get_lookup('isnull') lookup = lookup_class(select_field.get_col(alias), False) query.where.add(lookup, AND) if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup('exact') # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases.add(alias) condition, needed_inner = self.build_filter( ('%s__in' % trimmed_prefix, query), current_negated=True, branch_negated=True, can_reuse=can_reuse) if contains_louter: or_null_condition, _ = self.build_filter( ('%s__isnull' % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.low_mark and self.high_mark is None def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise else: names = sorted( list(get_field_names_from_opts(opts)) + list(self.extra) + list(self.annotation_select) + list(self._filtered_relations) ) raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names))) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item): errors.append(item) if getattr(item, 'contains_aggregate', False): raise FieldError( 'Using an aggregate in order_by() without also including ' 'it in annotate() is not allowed: %s' % item ) if errors: raise FieldError('Invalid order_by arguments: %s' % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force_empty): """ Remove any ordering settings. If 'force_empty' is True, there will be no ordering in the resulting query (not even the model's default). """ self.order_by = () self.extra_order_by = () if force_empty: self.default_ordering = False def set_group_by(self): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ group_by = list(self.select) if self.annotation_select: for annotation in self.annotation_select.values(): for col in annotation.get_group_by_cols(): group_by.append(col) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = OrderedDict() if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): entry = force_text(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != '%': entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) # This is order preserving, since self.extra_select is an OrderedDict. self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. self.deferred_loading = existing.difference(field_names), False def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if 'pk' in field_names: field_names.remove('pk') field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def get_loaded_field_names(self): """ If any fields are marked to be deferred, return a dictionary mapping models to a set of names in those fields that will be loaded. If a model is not in the returned dictionary, none of its fields are deferred. If no fields are marked for deferral, return an empty dictionary. """ # We cache this because we call this function multiple times # (compiler.fill_related_selections, query.iterator) try: return self._loaded_field_names_cache except AttributeError: collection = {} self.deferred_to_data(collection, self.get_loaded_field_names_cb) self._loaded_field_names_cache = collection return collection def get_loaded_field_names_cb(self, target, model, fields): """Callback used by get_deferred_field_names().""" target[model] = {f.attname for f in fields} def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() if self.group_by is True: self.add_fields((f.attname for f in self.model._meta.concrete_fields), False) self.set_group_by() self.clear_select_fields() if fields: field_names = [] extra_names = [] annotation_names = [] if not self._extra and not self._annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the OrderedDict of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self._annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = OrderedDict( (k, v) for k, v in self.annotations.items() if k in self.annotation_select_mask ) return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self._extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = OrderedDict( (k, v) for k, v in self.extra.items() if k in self.extra_select_mask ) return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append( join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for LEFT JOINs because we would # miss those rows that have nothing on the outer side. if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != LOUTER: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( self.where_class, None, lookup_tables[trimmed_paths + 1]) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a Join instead of a BaseTable reference. # But the first entry in the query's FROM clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return ( connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed ) or field.null def get_order_dir(field, default='ASC'): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == '-': return field[1:], dirn[1] return field, dirn[0] def add_to_dict(data, key, value): """ Add "value" to the set of values for "key", whether or not "key" already exists. """ if key in data: data[key].add(value) else: data[key] = {value} def is_reverse_o2o(field): """ Check if the given field is reverse-o2o. The field is expected to be some sort of relation field or related object. """ return field.is_relation and field.one_to_one and not field.concrete class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == 'OR' and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == 'AND' or ( self.effective_connector == 'OR' and votes == self.num_children): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
a0581f1beaec7e02425231ede29801c5c0b3b8e069930d050b6b8fde6bbc6071
import collections import functools import re import warnings from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import OrderBy, Random, RawSQL, Ref from django.db.models.query_utils import QueryWrapper, select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.transaction import TransactionManagementError from django.db.utils import DatabaseError, NotSupportedError from django.utils.deprecation import RemovedInDjango30Warning from django.utils.inspect import func_supports_parameter FORCE = object() class SQLCompiler: def __init__(self, query, connection, using): self.query = query self.connection = connection self.using = using self.quote_cache = {'*': '*'} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)') def setup_query(self): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select() self.col_count = len(self.select) def pre_sql_setup(self): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query() order_by = self.get_order_by() self.where, self.having = self.query.where.split_having() extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, 'as_sql'): expressions.append(self.query.resolve_ref(expr)) else: expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. for expr, _, _ in select: cols = expr.get_group_by_cols() for col in cols: expressions.append(col) for expr, (sql, params, is_ref) in order_by: # Skip References to the select clause, as all expressions in the # select clause are already part of the group by. if not expr.contains_aggregate and not is_ref: expressions.extend(expr.get_source_expressions()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: sql, params = self.compile(expr) if (sql, tuple(params)) not in seen: result.append((sql, params)) seen.add((sql, tuple(params))) return result def collapse_group_by(self, expressions, having): # If the DB can group by primary key, then group by the primary key of # query's main model. Note that for PostgreSQL the GROUP BY clause must # include the primary key of every table, but for MySQL it is enough to # have the main table's primary key. if self.connection.features.allows_group_by_pk: # Determine if the main model's primary key is in the query. pk = None for expr in expressions: # Is this a reference to query's base table primary key? If the # expression isn't a Col-like, then skip the expression. if (getattr(expr, 'target', None) == self.query.model._meta.pk and getattr(expr, 'alias', None) == self.query.base_table): pk = expr break # If the main model's primary key is in the query, group by that # field, HAVING expressions, and expressions associated with tables # that don't have a primary key included in the grouped columns. if pk: pk_aliases = { expr.alias for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key } expressions = [pk] + [ expr for expr in expressions if expr in having or ( getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases ) ] elif self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key and expr.target.model._meta.managed } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases ] return expressions def get_select(self): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: cols = self.get_default_columns() else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { 'model': self.query.model, 'select_fields': select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select) klass_info['related_klass_infos'] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info['related_klass_infos']: if ki['from_parent']: ki['select_fields'] = (klass_info['select_fields'] + ki['select_fields']) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] for col, alias in select: try: sql, params = self.compile(col, select_format=True) except EmptyResultSet: # Select a predicate that's always False. sql, params = '0', () ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def get_order_by(self): """ Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by else: ordering = (self.query.order_by or self.query.get_meta().ordering or []) if self.query.standard_ordering: asc, desc = ORDER_DIR['ASC'] else: asc, desc = ORDER_DIR['DESC'] order_by = [] for field in ordering: if hasattr(field, 'resolve_expression'): if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field.reverse_ordering() order_by.append((field, False)) continue if field == '?': # random order_by.append((OrderBy(Random()), False)) continue col, order = get_order_dir(field, asc) descending = order == 'DESC' if col in self.query.annotation_select: # Reference to expression in SELECT clause order_by.append(( OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending), True)) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT clause order_by.append(( OrderBy(self.query.annotations[col], descending=descending), False)) continue if '.' in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split('.', 1) order_by.append(( OrderBy( RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []), descending=descending ), False)) continue if not self.query._extra or col not in self.query._extra: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. order_by.extend(self.find_ordering_name( field, self.query.get_meta(), default_order=asc)) else: if col not in self.query.extra_select: order_by.append(( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False)) else: order_by.append(( OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending), True)) result = [] seen = set() for expr, is_ref in order_by: if self.query.combinator: src = expr.get_source_expressions()[0] # Relabel order by columns to raw numbers if this is a combined # query; necessary since the columns can't be referenced by the # fully qualified name and the simple column names may collide. for idx, (sel_expr, _, col_alias) in enumerate(self.select): if is_ref and col_alias == src.refs: src = src.source elif col_alias: continue if src == sel_expr: expr.set_source_expressions([RawSQL('%d' % (idx + 1), ())]) break else: raise DatabaseError('ORDER BY term does not match any column in the result set.') resolved = expr.resolve_expression( self.query, allow_joins=True, reuse=None) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql).group(1) if (without_ordering, tuple(params)) in seen: continue seen.add((without_ordering, tuple(params))) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql).group(1) if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ((name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( name in self.query.external_aliases and name not in self.query.table_map)): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node, select_format=False): vendor_impl = getattr(node, 'as_' + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) if select_format is FORCE or (select_format and not self.query.subquery): return node.output_field.select_format(self, sql, params) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection) for query in self.query.combined_queries if not query.is_empty() ] if not features.supports_slicing_ordering_in_compound: for query, compiler in zip(self.query.combined_queries, compilers): if query.low_mark or query.high_mark: raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.') if compiler.get_order_by(): raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.') parts = () for compiler in compilers: try: # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. if not compiler.query.values_select and self.query.values_select: compiler.query.set_values((*self.query.values_select, *self.query.annotation_select)) parts += (compiler.as_sql(),) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == 'union' or (combinator == 'difference' and parts): continue raise if not parts: raise EmptyResultSet combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == 'union': combinator_sql += ' ALL' braces = '({})' if features.supports_slicing_ordering_in_compound else '{}' sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts)) result = [' {} '.format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup() for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark) combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, 'supports_select_{}'.format(combinator)): raise NotSupportedError('{} is not supported on this database backend.'.format(combinator)) result, params = self.get_combinator_sql(combinator, self.query.combinator_all) else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() where, w_params = self.compile(self.where) if self.where is not None else ("", []) having, h_params = self.compile(self.having) if self.having is not None else ("", []) result = ['SELECT'] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] col_idx = 1 for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias)) elif with_col_aliases: s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx) col_idx += 1 params.extend(s_params) out_cols.append(s_sql) result += [', '.join(out_cols), 'FROM', *from_] params.extend(f_params) if self.query.select_for_update and self.connection.features.has_select_for_update: if self.connection.get_autocommit(): raise TransactionManagementError('select_for_update cannot be used outside of a transaction.') if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit: raise NotSupportedError( 'LIMIT/OFFSET is not supported with ' 'select_for_update on this database backend.' ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of # If it's a NOWAIT/SKIP LOCKED/OF query but the backend # doesn't support it, raise NotSupportedError to prevent a # possible deadlock. if nowait and not self.connection.features.has_select_for_update_nowait: raise NotSupportedError('NOWAIT is not supported on this database backend.') elif skip_locked and not self.connection.features.has_select_for_update_skip_locked: raise NotSupportedError('SKIP LOCKED is not supported on this database backend.') elif of and not self.connection.features.has_select_for_update_of: raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.') for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), ) if for_update_part and self.connection.features.for_update_after_from: result.append(for_update_part) if where: result.append('WHERE %s' % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError('annotate() + distinct(fields) is not implemented.') order_by = order_by or self.connection.ops.force_no_ordering() result.append('GROUP BY %s' % ', '.join(grouping)) if having: result.append('HAVING %s' % having) params.extend(h_params) if self.query.explain_query: result.insert(0, self.connection.ops.explain_query_prefix( self.query.explain_format, **self.query.explain_options )) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) result.append('ORDER BY %s' % ', '.join(ordering)) if with_limit_offset: result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark)) if for_update_part and not self.connection.features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering expressions # to be selected as well. Wrap the query in another subquery # to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if not alias and with_col_aliases: alias = 'col%d' % index if alias: sub_selects.append("%s.%s" % ( self.connection.ops.quote_name('subquery'), self.connection.ops.quote_name(alias), )) else: select_clone = select.relabeled_clone({select.alias: 'subquery'}) subselect, subparams = select_clone.as_sql(self, self.connection) sub_selects.append(subselect) sub_params.extend(subparams) return 'SELECT %s FROM (%s) subquery' % ( ', '.join(sub_selects), ' '.join(result), ), tuple(sub_params + params) return ' '.join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns(self, start_alias=None, opts=None, from_parent=None): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: opts = self.query.get_meta() only_load = self.deferred_to_columns() start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if field.model in only_load and field.attname not in only_load[field.model]: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(name) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == 'DESC' pieces = name.split(LOOKUP_SEP) field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless the attribute name # of the field is specified. if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name: # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins) if join_tuple in already_seen: raise FieldError('Infinite loop caused by ordering.') already_seen.add(join_tuple) results = [] for item in opts.ordering: results.extend(self.find_ordering_name(item, opts, alias, order, already_seen)) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in tuple(self.query.alias_map): if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: result.append(', %s' % self.quote_name_unless_alias(alias)) return result, params def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain(direct_choices, reverse_choices, self.query._filtered_relations) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() only_load = self.query.get_loaded_field_names() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info['related_klass_infos'] = related_klass_infos for f in opts.fields: field_model = f.model._meta.concrete_model fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or '(none)', ) ) else: next = False if not select_related_descend(f, restricted, requested, only_load.get(field_model)): continue klass_info = { 'model': f.remote_field.model, 'field': f, 'reverse': False, 'local_setter': f.set_cached_value, 'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None, 'from_parent': False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins( [f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_klass_infos = self.get_related_selections( select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: if not select_related_descend(f, restricted, requested, only_load.get(model), reverse=True): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins([related_field_name], opts, root_alias) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': f.remote_field.set_cached_value, 'remote_setter': f.set_cached_value, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias) model = join_opts.model alias = joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model def local_setter(obj, from_obj): f.remote_field.set_cached_value(from_obj, obj) def remote_setter(obj, from_obj): setattr(from_obj, name, obj) klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': local_setter, 'remote_setter': remote_setter, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( 'Invalid field name(s) given in select_related: %s. ' 'Choices are: %s' % ( ', '.join(invalid_fields), ', '.join(_get_field_choices()) or '(none)', ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield 'self' else: field = klass_info['field'] if klass_info['reverse']: field = field.remote_field path = parent_path + [field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in klass_info.get('related_klass_infos', []) ) result = [] invalid_names = [] for name in self.query.select_for_update_of: parts = [] if name == 'self' else name.split(LOOKUP_SEP) klass_info = self.klass_info for part in parts: for related_klass_info in klass_info.get('related_klass_infos', []): field = related_klass_info['field'] if related_klass_info['reverse']: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue select_index = klass_info['select_fields'][0] col = self.select[select_index][0] if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( 'Invalid field name(s) given in select_for_update(of=(...)): %s. ' 'Only relational fields followed in the query are allowed. ' 'Choices are: %s.' % ( ', '.join(invalid_names), ', '.join(_get_field_choices()), ) ) return result def deferred_to_columns(self): """ Convert the self.deferred_loading data structure to mapping of table names to sets of column names which are to be loaded. Return the dictionary. """ columns = {} self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb) return columns def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: convs = [] for conv in (backend_converters + field_converters): if func_supports_parameter(conv, 'context'): warnings.warn( 'Remove the context parameter from %s.%s(). Support for it ' 'will be removed in Django 3.0.' % ( conv.__self__.__class__.__name__, conv.__name__, ), RemovedInDjango30Warning, ) conv = functools.partial(conv, context={}) convs.append(conv) converters[i] = (convs, expression) return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size) fields = [s[0] for s in self.select[0:self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ # This is always executed on a query clone, so we can modify self.query self.query.add_extra({'a': 1}, None, None, None, None, None) self.query.set_extra_mask(['a']) return bool(self.execute_sql(SINGLE)) def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0:self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch and not self.connection.features.can_use_chunked_reads: try: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested. return list(result) finally: # done with the cursor cursor.close() return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = '%s.%s' % (qn(alias), qn2(columns[index])) self.query.where.add( QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND') sql, params = self.as_sql() return 'EXISTS (%s)' % sql, params def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. for row in result[0]: if not isinstance(row, str): yield ' '.join(str(c) for c in row) else: yield row class SQLInsertCompiler(SQLCompiler): return_id = False def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, 'as_sql'): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, 'get_placeholder'): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = '%s', [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, 'resolve_expression'): value = value.resolve_expression(self.query, allow_joins=False, for_save=True) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' 'can only be used to update, not to insert.' % (value, field) ) if value.contains_aggregate: raise FieldError("Aggregate functions are not allowed in this query") if value.contains_over_clause: raise FieldError('Window expressions are not allowed in this query.') else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() result = ['INSERT INTO %s' % qn(opts.db_table)] fields = self.query.fields or [opts.pk] result.append('(%s)' % ', '.join(qn(f.column) for f in fields)) if self.query.fields: value_rows = [ [self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields] for obj in self.query.objs ] else: # An empty object. value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = (not self.return_id and self.connection.features.has_bulk_insert) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) if self.return_id and self.connection.features.can_return_id_from_insert: if self.connection.features.can_return_ids_from_bulk_insert: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column)) r_fmt, r_params = self.connection.ops.return_insert_id() # Skip empty r_fmt to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. if r_fmt: result.append(r_fmt % col) params += [r_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, return_id=False): assert not ( return_id and len(self.query.objs) != 1 and not self.connection.features.can_return_ids_from_bulk_insert ) self.return_id = return_id with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not return_id: return if self.connection.features.can_return_ids_from_bulk_insert and len(self.query.objs) > 1: return self.connection.ops.fetch_returned_insert_ids(cursor) if self.connection.features.can_return_id_from_insert: assert len(self.query.objs) == 1 return self.connection.ops.fetch_returned_insert_id(cursor) return self.connection.ops.last_insert_id( cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column ) class SQLDeleteCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ assert len([t for t in self.query.alias_map if self.query.alias_refcount[t] > 0]) == 1, \ "Can only delete from one table at a time." qn = self.quote_name_unless_alias result = ['DELETE FROM %s' % qn(self.query.base_table)] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(params) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return '', () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, 'resolve_expression'): val = val.resolve_expression(self.query, allow_joins=False, for_save=True) if val.contains_aggregate: raise FieldError("Aggregate functions are not allowed in this query") if val.contains_over_clause: raise FieldError('Window expressions are not allowed in this query.') elif hasattr(val, 'prepare_database_save'): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, 'get_placeholder'): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = '%s' name = field.column if hasattr(val, 'as_sql'): sql, params = self.compile(val) values.append('%s = %s' % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append('%s = %s' % (qn(name), placeholder)) update_params.append(val) else: values.append('%s = NULL' % qn(name)) table = self.query.base_table result = [ 'UPDATE %s SET' % qn(table), ', '.join(values), ] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(True) query._extra = {} query.select = [] query.add_fields([query.get_meta().pk.name]) super().pre_sql_setup() must_pre_select = count > 1 and not self.connection.features.update_can_self_select # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.where = self.query.where_class() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) self.query.add_filter(('pk__in', idents)) self.query.related_ids = idents else: # The fast path. Filters and updates in one query. self.query.add_filter(('pk__in', query)) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation, select_format=FORCE) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ', '.join(sql) params = tuple(params) sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery) params = params + self.query.sub_params return sql, params def cursor_iter(cursor, sentinel, col_count, itersize): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): yield rows if col_count is None else [r[:col_count] for r in rows] finally: cursor.close()
558117aa28f7cdeb8365d04fda9f45a2219bfc0f1daa537e081a731638efe44a
""" Code to manage the creation and SQL rendering of 'where' constraints. """ from django.core.exceptions import EmptyResultSet from django.utils import tree from django.utils.functional import cached_property # Connection types AND = 'AND' OR = 'OR' class WhereNode(tree.Node): """ An SQL WHERE clause. The class is tied to the Query class that created it (in order to create the correct SQL). A child is usually an expression producing boolean values. Most likely the expression is a Lookup instance. However, a child could also be any class with as_sql() and either relabeled_clone() method or relabel_aliases() and clone() methods and contains_aggregate attribute. """ default = AND resolved = False conditional = True def split_having(self, negated=False): """ Return two possibly None nodes: one for those parts of self that should be included in the WHERE clause and one for those parts of self that must be included in the HAVING clause. """ if not self.contains_aggregate: return self, None in_negated = negated ^ self.negated # If the effective connector is OR and this node contains an aggregate, # then we need to push the whole branch to HAVING clause. may_need_split = ( (in_negated and self.connector == AND) or (not in_negated and self.connector == OR)) if may_need_split and self.contains_aggregate: return None, self where_parts = [] having_parts = [] for c in self.children: if hasattr(c, 'split_having'): where_part, having_part = c.split_having(in_negated) if where_part is not None: where_parts.append(where_part) if having_part is not None: having_parts.append(having_part) elif c.contains_aggregate: having_parts.append(c) else: where_parts.append(c) having_node = self.__class__(having_parts, self.connector, self.negated) if having_parts else None where_node = self.__class__(where_parts, self.connector, self.negated) if where_parts else None return where_node, having_node def as_sql(self, compiler, connection): """ Return the SQL version of the where clause and the value to be substituted in. Return '', [] if this node matches everything, None, [] if this node is empty, and raise EmptyResultSet if this node can't match anything. """ result = [] result_params = [] if self.connector == AND: full_needed, empty_needed = len(self.children), 1 else: full_needed, empty_needed = 1, len(self.children) for child in self.children: try: sql, params = compiler.compile(child) except EmptyResultSet: empty_needed -= 1 else: if sql: result.append(sql) result_params.extend(params) else: full_needed -= 1 # Check if this node matches nothing or everything. # First check the amount of full nodes and empty nodes # to make this node empty/full. # Now, check if this node is full/empty using the # counts. if empty_needed == 0: if self.negated: return '', [] else: raise EmptyResultSet if full_needed == 0: if self.negated: raise EmptyResultSet else: return '', [] conn = ' %s ' % self.connector sql_string = conn.join(result) if sql_string: if self.negated: # Some backends (Oracle at least) need parentheses # around the inner SQL in the negated case, even if the # inner SQL contains just a single expression. sql_string = 'NOT (%s)' % sql_string elif len(result) > 1 or self.resolved: sql_string = '(%s)' % sql_string return sql_string, result_params def get_group_by_cols(self): cols = [] for child in self.children: cols.extend(child.get_group_by_cols()) return cols def get_source_expressions(self): return self.children[:] def set_source_expressions(self, children): assert len(children) == len(self.children) self.children = children def relabel_aliases(self, change_map): """ Relabel the alias values of any children. 'change_map' is a dictionary mapping old (current) alias values to the new values. """ for pos, child in enumerate(self.children): if hasattr(child, 'relabel_aliases'): # For example another WhereNode child.relabel_aliases(change_map) elif hasattr(child, 'relabeled_clone'): self.children[pos] = child.relabeled_clone(change_map) def clone(self): """ Create a clone of the tree. Must only be called on root nodes (nodes with empty subtree_parents). Childs must be either (Contraint, lookup, value) tuples, or objects supporting .clone(). """ clone = self.__class__._new_instance( children=[], connector=self.connector, negated=self.negated) for child in self.children: if hasattr(child, 'clone'): clone.children.append(child.clone()) else: clone.children.append(child) return clone def relabeled_clone(self, change_map): clone = self.clone() clone.relabel_aliases(change_map) return clone @classmethod def _contains_aggregate(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_aggregate(c) for c in obj.children) return obj.contains_aggregate @cached_property def contains_aggregate(self): return self._contains_aggregate(self) @classmethod def _contains_over_clause(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_over_clause(c) for c in obj.children) return obj.contains_over_clause @cached_property def contains_over_clause(self): return self._contains_over_clause(self) @property def is_summary(self): return any(child.is_summary for child in self.children) def resolve_expression(self, *args, **kwargs): clone = self.clone() clone.resolved = True return clone class NothingNode: """A node that matches nothing.""" contains_aggregate = False def as_sql(self, compiler=None, connection=None): raise EmptyResultSet class ExtraWhere: # The contents are a black box - assume no aggregates are used. contains_aggregate = False def __init__(self, sqls, params): self.sqls = sqls self.params = params def as_sql(self, compiler=None, connection=None): sqls = ["(%s)" % sql for sql in self.sqls] return " AND ".join(sqls), list(self.params or ()) class SubqueryConstraint: # Even if aggregates would be used in a subquery, the outer query isn't # interested about those. contains_aggregate = False def __init__(self, alias, columns, targets, query_object): self.alias = alias self.columns = columns self.targets = targets self.query_object = query_object def as_sql(self, compiler, connection): query = self.query_object query.set_values(self.targets) query_compiler = query.get_compiler(connection=connection) return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
39cc2b0c3e6b780b407bf2f7881eac9bbbaace2d4939aa28ccdfa9f2f79866b7
from django.core.exceptions import EmptyResultSet from django.db.models.sql.query import * # NOQA from django.db.models.sql.subqueries import * # NOQA from django.db.models.sql.where import AND, OR __all__ = ['Query', 'AND', 'OR', 'EmptyResultSet']
2cf45392b67b22cd5de0c328dbd9965d5eece16deef5fd7a50d01c7b7a64ddae
""" Useful auxiliary data structures for query construction. Not useful outside the SQL domain. """ # for backwards-compatibility in Django 1.11 from django.core.exceptions import EmptyResultSet # NOQA: F401 from django.db.models.sql.constants import INNER, LOUTER class MultiJoin(Exception): """ Used by join construction code to indicate the point at which a multi-valued join was attempted (if the caller wants to treat that exceptionally). """ def __init__(self, names_pos, path_with_names): self.level = names_pos # The path travelled, this includes the path to the multijoin. self.names_with_path = path_with_names class Empty: pass class Join: """ Used by sql.Query and sql.SQLCompiler to generate JOIN clauses into the FROM entry. For example, the SQL generated could be LEFT OUTER JOIN "sometable" T1 ON ("othertable"."sometable_id" = "sometable"."id") This class is primarily used in Query.alias_map. All entries in alias_map must be Join compatible by providing the following attributes and methods: - table_name (string) - table_alias (possible alias for the table, can be None) - join_type (can be None for those entries that aren't joined from anything) - parent_alias (which table is this join's parent, can be None similarly to join_type) - as_sql() - relabeled_clone() """ def __init__(self, table_name, parent_alias, table_alias, join_type, join_field, nullable, filtered_relation=None): # Join table self.table_name = table_name self.parent_alias = parent_alias # Note: table_alias is not necessarily known at instantiation time. self.table_alias = table_alias # LOUTER or INNER self.join_type = join_type # A list of 2-tuples to use in the ON clause of the JOIN. # Each 2-tuple will create one join condition in the ON clause. self.join_cols = join_field.get_joining_columns() # Along which field (or ForeignObjectRel in the reverse join case) self.join_field = join_field # Is this join nullabled? self.nullable = nullable self.filtered_relation = filtered_relation def as_sql(self, compiler, connection): """ Generate the full LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params clause for this join. """ join_conditions = [] params = [] qn = compiler.quote_name_unless_alias qn2 = connection.ops.quote_name # Add a join condition for each pair of joining columns. for index, (lhs_col, rhs_col) in enumerate(self.join_cols): join_conditions.append('%s.%s = %s.%s' % ( qn(self.parent_alias), qn2(lhs_col), qn(self.table_alias), qn2(rhs_col), )) # Add a single condition inside parentheses for whatever # get_extra_restriction() returns. extra_cond = self.join_field.get_extra_restriction( compiler.query.where_class, self.table_alias, self.parent_alias) if extra_cond: extra_sql, extra_params = compiler.compile(extra_cond) join_conditions.append('(%s)' % extra_sql) params.extend(extra_params) if self.filtered_relation: extra_sql, extra_params = compiler.compile(self.filtered_relation) if extra_sql: join_conditions.append('(%s)' % extra_sql) params.extend(extra_params) if not join_conditions: # This might be a rel on the other end of an actual declared field. declared_field = getattr(self.join_field, 'field', self.join_field) raise ValueError( "Join generated an empty ON clause. %s did not yield either " "joining columns or extra restrictions." % declared_field.__class__ ) on_clause_sql = ' AND '.join(join_conditions) alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias) sql = '%s %s%s ON (%s)' % (self.join_type, qn(self.table_name), alias_str, on_clause_sql) return sql, params def relabeled_clone(self, change_map): new_parent_alias = change_map.get(self.parent_alias, self.parent_alias) new_table_alias = change_map.get(self.table_alias, self.table_alias) if self.filtered_relation is not None: filtered_relation = self.filtered_relation.clone() filtered_relation.path = [change_map.get(p, p) for p in self.filtered_relation.path] else: filtered_relation = None return self.__class__( self.table_name, new_parent_alias, new_table_alias, self.join_type, self.join_field, self.nullable, filtered_relation=filtered_relation, ) def equals(self, other, with_filtered_relation): return ( isinstance(other, self.__class__) and self.table_name == other.table_name and self.parent_alias == other.parent_alias and self.join_field == other.join_field and (not with_filtered_relation or self.filtered_relation == other.filtered_relation) ) def __eq__(self, other): return self.equals(other, with_filtered_relation=True) def demote(self): new = self.relabeled_clone({}) new.join_type = INNER return new def promote(self): new = self.relabeled_clone({}) new.join_type = LOUTER return new class BaseTable: """ The BaseTable class is used for base table references in FROM clause. For example, the SQL "foo" in SELECT * FROM "foo" WHERE somecond could be generated by this class. """ join_type = None parent_alias = None filtered_relation = None def __init__(self, table_name, alias): self.table_name = table_name self.table_alias = alias def as_sql(self, compiler, connection): alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias) base_sql = compiler.quote_name_unless_alias(self.table_name) return base_sql + alias_str, [] def relabeled_clone(self, change_map): return self.__class__(self.table_name, change_map.get(self.table_alias, self.table_alias)) def equals(self, other, with_filtered_relation): return ( isinstance(self, other.__class__) and self.table_name == other.table_name and self.table_alias == other.table_alias )
80c8349ff8f47ec7e6a95e57162cb0657812bee3eafd63d5e746521655986821
""" Constants specific to the SQL storage portion of the ORM. """ import re # Size of each "chunk" for get_iterator calls. # Larger values are slightly faster at the expense of more storage space. GET_ITERATOR_CHUNK_SIZE = 100 # Namedtuples for sql.* internal use. # How many results to expect from a cursor.execute call MULTI = 'multi' SINGLE = 'single' CURSOR = 'cursor' NO_RESULTS = 'no results' ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$') ORDER_DIR = { 'ASC': ('ASC', 'DESC'), 'DESC': ('DESC', 'ASC'), } # SQL join types. INNER = 'INNER JOIN' LOUTER = 'LEFT OUTER JOIN'
52eae23eb8028e5d46d241ff87181e6af415b2390e7cb4be3b04104b378a3d81
""" Query subclasses which provide extra functionality beyond simple data retrieval. """ from django.core.exceptions import FieldError from django.db import connections from django.db.models.query_utils import Q from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS, ) from django.db.models.sql.query import Query __all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery'] class DeleteQuery(Query): """A DELETE SQL query.""" compiler = 'SQLDeleteCompiler' def do_query(self, table, where, using): self.alias_map = {table: self.alias_map[table]} self.where = where cursor = self.get_compiler(using).execute_sql(CURSOR) return cursor.rowcount if cursor else 0 def delete_batch(self, pk_list, using): """ Set up and execute delete queries for all the objects in pk_list. More than one physical query may be executed if there are a lot of values in pk_list. """ # number of objects deleted num_deleted = 0 field = self.get_meta().pk for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.where = self.where_class() self.add_q(Q( **{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]})) num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using) return num_deleted def delete_qs(self, query, using): """ Delete the queryset in one SQL query (if possible). For simple queries this is done by copying the query.query.where to self.query, for complex queries by using subquery. """ innerq = query.query # Make sure the inner query has at least one table in use. innerq.get_initial_alias() # The same for our new query. self.get_initial_alias() innerq_used_tables = tuple([t for t in innerq.alias_map if innerq.alias_refcount[t]]) if not innerq_used_tables or innerq_used_tables == tuple(self.alias_map): # There is only the base table in use in the query. self.where = innerq.where else: pk = query.model._meta.pk if not connections[using].features.update_can_self_select: # We can't do the delete using subquery. values = list(query.values_list('pk', flat=True)) if not values: return 0 return self.delete_batch(values, using) else: innerq.clear_select_clause() innerq.select = [ pk.get_col(self.get_initial_alias()) ] values = innerq self.where = self.where_class() self.add_q(Q(pk__in=values)) cursor = self.get_compiler(using).execute_sql(CURSOR) return cursor.rowcount if cursor else 0 class UpdateQuery(Query): """An UPDATE SQL query.""" compiler = 'SQLUpdateCompiler' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._setup_query() def _setup_query(self): """ Run on initialization and at the end of chaining. Any attributes that would normally be set in __init__() should go here instead. """ self.values = [] self.related_ids = None self.related_updates = {} def clone(self): obj = super().clone() obj.related_updates = self.related_updates.copy() return obj def update_batch(self, pk_list, values, using): self.add_update_values(values) for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.where = self.where_class() self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE])) self.get_compiler(using).execute_sql(NO_RESULTS) def add_update_values(self, values): """ Convert a dictionary of field name to value mappings into an update query. This is the entry point for the public update() method on querysets. """ values_seq = [] for name, val in values.items(): field = self.get_meta().get_field(name) direct = not (field.auto_created and not field.concrete) or not field.concrete model = field.model._meta.concrete_model if not direct or (field.is_relation and field.many_to_many): raise FieldError( 'Cannot update model field %r (only non-relations and ' 'foreign keys permitted).' % field ) if model is not self.get_meta().concrete_model: self.add_related_update(model, field, val) continue values_seq.append((field, model, val)) return self.add_update_fields(values_seq) def add_update_fields(self, values_seq): """ Append a sequence of (field, model, value) triples to the internal list that will be used to generate the UPDATE query. Might be more usefully called add_update_targets() to hint at the extra information here. """ for field, model, val in values_seq: if hasattr(val, 'resolve_expression'): # Resolve expressions here so that annotations are no longer needed val = val.resolve_expression(self, allow_joins=False, for_save=True) self.values.append((field, model, val)) def add_related_update(self, model, field, value): """ Add (name, value) to an update query for an ancestor model. Update are coalesced so that only one update query per ancestor is run. """ self.related_updates.setdefault(model, []).append((field, None, value)) def get_related_updates(self): """ Return a list of query objects: one for each update required to an ancestor model. Each query will have the same filtering conditions as the current query but will only update a single table. """ if not self.related_updates: return [] result = [] for model, values in self.related_updates.items(): query = UpdateQuery(model) query.values = values if self.related_ids is not None: query.add_filter(('pk__in', self.related_ids)) result.append(query) return result class InsertQuery(Query): compiler = 'SQLInsertCompiler' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields = [] self.objs = [] def insert_values(self, fields, objs, raw=False): self.fields = fields self.objs = objs self.raw = raw class AggregateQuery(Query): """ Take another query as a parameter to the FROM clause and only select the elements in the provided list. """ compiler = 'SQLAggregateCompiler' def add_subquery(self, query, using): query.subquery = True self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
3b5571e658dfc84568f56fa8e0e56cbbf3937d9e55e4ff475f790d32d744ef9a
from django.core import checks from django.db.backends.base.validation import BaseDatabaseValidation class DatabaseValidation(BaseDatabaseValidation): def check_field_type(self, field, field_type): """Oracle doesn't support a database index on some data types.""" errors = [] if field.db_index and field_type.lower() in self.connection._limited_data_types: errors.append( checks.Warning( 'Oracle does not support a database index on %s columns.' % field_type, hint=( "An index won't be created. Silence this warning if " "you don't care about it." ), obj=field, id='fields.W162', ) ) return errors
9a28af2c410fb217105a3a46b8958d54f7e12efa390d422c76c851fb602feae9
from django.db.backends.base.features import BaseDatabaseFeatures from django.db.utils import InterfaceError class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () interprets_empty_strings_as_nulls = True uses_savepoints = True has_select_for_update = True has_select_for_update_nowait = True has_select_for_update_skip_locked = True has_select_for_update_of = True select_for_update_of_column = True can_return_id_from_insert = True can_introspect_autofield = True supports_subqueries_in_group_by = False supports_transactions = True supports_timezones = False has_native_duration_field = True can_defer_constraint_checks = True supports_partially_nullable_unique_constraints = False truncates_names = True supports_tablespaces = True supports_sequence_reset = False can_introspect_time_field = False atomic_transactions = False supports_combined_alters = False nulls_order_largest = True requires_literal_defaults = True closed_cursor_error_class = InterfaceError bare_select_suffix = " FROM DUAL" uppercases_column_names = True # select for update with limit can be achieved on Oracle, but not with the current backend. supports_select_for_update_with_limit = False supports_temporal_subtraction = True # Oracle doesn't ignore quoted identifiers case but the current backend # does by uppercasing all identifiers. ignores_table_name_case = True supports_index_on_text_field = False has_case_insensitive_like = False create_test_procedure_without_params_sql = """ CREATE PROCEDURE "TEST_PROCEDURE" AS V_I INTEGER; BEGIN V_I := 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE "TEST_PROCEDURE" (P_I INTEGER) AS V_I INTEGER; BEGIN V_I := P_I; END; """ supports_callproc_kwargs = True supports_over_clause = True max_query_params = 2**16 - 1
c28459528180db1c0fbb3da770cc92eb9f051024ec3e6381f27abc79bdc7761f
from collections import namedtuple import cx_Oracle from django.db import models from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo, ) FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('is_autofield',)) class DatabaseIntrospection(BaseDatabaseIntrospection): # Maps type objects to Django Field types. data_types_reverse = { cx_Oracle.BLOB: 'BinaryField', cx_Oracle.CLOB: 'TextField', cx_Oracle.DATETIME: 'DateField', cx_Oracle.FIXED_CHAR: 'CharField', cx_Oracle.FIXED_NCHAR: 'CharField', cx_Oracle.NATIVE_FLOAT: 'FloatField', cx_Oracle.NCHAR: 'CharField', cx_Oracle.NCLOB: 'TextField', cx_Oracle.NUMBER: 'DecimalField', cx_Oracle.STRING: 'CharField', cx_Oracle.TIMESTAMP: 'DateTimeField', } cache_bust_counter = 1 def get_field_type(self, data_type, description): if data_type == cx_Oracle.NUMBER: precision, scale = description[4:6] if scale == 0: if precision > 11: return 'BigAutoField' if description.is_autofield else 'BigIntegerField' elif precision == 1: return 'BooleanField' elif description.is_autofield: return 'AutoField' else: return 'IntegerField' elif scale == -127: return 'FloatField' return super().get_field_type(data_type, description) def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" cursor.execute("SELECT TABLE_NAME, 't' FROM USER_TABLES UNION ALL " "SELECT VIEW_NAME, 'v' FROM USER_VIEWS") return [TableInfo(row[0].lower(), row[1]) for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface. """ # user_tab_columns gives data default for columns cursor.execute(""" SELECT column_name, data_default, CASE WHEN char_used IS NULL THEN data_length ELSE char_length END as internal_size, CASE WHEN identity_column = 'YES' THEN 1 ELSE 0 END as is_autofield FROM user_tab_cols WHERE table_name = UPPER(%s)""", [table_name]) field_map = { column: (internal_size, default if default != 'NULL' else None, is_autofield) for column, default, internal_size, is_autofield in cursor.fetchall() } self.cache_bust_counter += 1 cursor.execute("SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format( self.connection.ops.quote_name(table_name), self.cache_bust_counter)) description = [] for desc in cursor.description: name = desc[0] internal_size, default, is_autofield = field_map[name] name = name % {} # cx_Oracle, for some reason, doubles percent signs. description.append(FieldInfo( name.lower(), *desc[1:3], internal_size, desc[4] or 0, desc[5] or 0, *desc[6:], default, is_autofield, )) return description def table_name_converter(self, name): """Table name comparison is case insensitive under Oracle.""" return name.lower() def get_sequences(self, cursor, table_name, table_fields=()): cursor.execute(""" SELECT user_tab_identity_cols.sequence_name, user_tab_identity_cols.column_name FROM user_tab_identity_cols, user_constraints, user_cons_columns cols WHERE user_constraints.constraint_name = cols.constraint_name AND user_constraints.table_name = user_tab_identity_cols.table_name AND cols.column_name = user_tab_identity_cols.column_name AND user_constraints.constraint_type = 'P' AND user_tab_identity_cols.table_name = UPPER(%s) """, [table_name]) # Oracle allows only one identity column per table. row = cursor.fetchone() if row: return [{'name': row[0].lower(), 'table': table_name, 'column': row[1].lower()}] # To keep backward compatibility for AutoFields that aren't Oracle # identity columns. for f in table_fields: if isinstance(f, models.AutoField): return [{'table': table_name, 'column': f.column}] return [] def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all relationships to the given table. """ table_name = table_name.upper() cursor.execute(""" SELECT ca.column_name, cb.table_name, cb.column_name FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb WHERE user_constraints.table_name = %s AND user_constraints.constraint_name = ca.constraint_name AND user_constraints.r_constraint_name = cb.constraint_name AND ca.position = cb.position""", [table_name]) relations = {} for row in cursor.fetchall(): relations[row[0].lower()] = (row[2].lower(), row[1].lower()) return relations def get_key_columns(self, cursor, table_name): cursor.execute(""" SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column FROM user_constraints c JOIN user_cons_columns ccol ON ccol.constraint_name = c.constraint_name JOIN user_cons_columns rcol ON rcol.constraint_name = c.r_constraint_name WHERE c.table_name = %s AND c.constraint_type = 'R'""", [table_name.upper()]) return [tuple(cell.lower() for cell in row) for row in cursor.fetchall()] def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. """ constraints = {} # Loop over the constraints, getting PKs, uniques, and checks cursor.execute(""" SELECT user_constraints.constraint_name, LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.position), CASE user_constraints.constraint_type WHEN 'P' THEN 1 ELSE 0 END AS is_primary_key, CASE WHEN user_constraints.constraint_type IN ('P', 'U') THEN 1 ELSE 0 END AS is_unique, CASE user_constraints.constraint_type WHEN 'C' THEN 1 ELSE 0 END AS is_check_constraint FROM user_constraints LEFT OUTER JOIN user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name WHERE user_constraints.constraint_type = ANY('P', 'U', 'C') AND user_constraints.table_name = UPPER(%s) GROUP BY user_constraints.constraint_name, user_constraints.constraint_type """, [table_name]) for constraint, columns, pk, unique, check in cursor.fetchall(): constraints[constraint] = { 'columns': columns.split(','), 'primary_key': pk, 'unique': unique, 'foreign_key': None, 'check': check, 'index': unique, # All uniques come with an index } # Foreign key constraints cursor.execute(""" SELECT cons.constraint_name, LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.position), LOWER(rcols.table_name), LOWER(rcols.column_name) FROM user_constraints cons INNER JOIN user_cons_columns rcols ON rcols.constraint_name = cons.r_constraint_name AND rcols.position = 1 LEFT OUTER JOIN user_cons_columns cols ON cons.constraint_name = cols.constraint_name WHERE cons.constraint_type = 'R' AND cons.table_name = UPPER(%s) GROUP BY cons.constraint_name, rcols.table_name, rcols.column_name """, [table_name]) for constraint, columns, other_table, other_column in cursor.fetchall(): constraints[constraint] = { 'primary_key': False, 'unique': False, 'foreign_key': (other_table, other_column), 'check': False, 'index': False, 'columns': columns.split(','), } # Now get indexes cursor.execute(""" SELECT ind.index_name, LOWER(ind.index_type), LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.column_position), LISTAGG(cols.descend, ',') WITHIN GROUP (ORDER BY cols.column_position) FROM user_ind_columns cols, user_indexes ind WHERE cols.table_name = UPPER(%s) AND NOT EXISTS ( SELECT 1 FROM user_constraints cons WHERE ind.index_name = cons.index_name ) AND cols.index_name = ind.index_name GROUP BY ind.index_name, ind.index_type """, [table_name]) for constraint, type_, columns, orders in cursor.fetchall(): constraints[constraint] = { 'primary_key': False, 'unique': False, 'foreign_key': None, 'check': False, 'index': True, 'type': 'idx' if type_ == 'normal' else type_, 'columns': columns.split(','), 'orders': orders.split(','), } return constraints
e4c24a7579ccaf23a039a0d72c4ee0bc3b7dcf155405306376a440b418c65052
""" Oracle database backend for Django. Requires cx_Oracle: http://cx-oracle.sourceforge.net/ """ import datetime import decimal import os import platform from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import utils from django.db.backends.base.base import BaseDatabaseWrapper from django.utils.encoding import force_bytes, force_text from django.utils.functional import cached_property def _setup_environment(environ): # Cygwin requires some special voodoo to set the environment variables # properly so that Oracle will see them. if platform.system().upper().startswith('CYGWIN'): try: import ctypes except ImportError as e: raise ImproperlyConfigured("Error loading ctypes: %s; " "the Oracle backend requires ctypes to " "operate correctly under Cygwin." % e) kernel32 = ctypes.CDLL('kernel32') for name, value in environ: kernel32.SetEnvironmentVariableA(name, value) else: os.environ.update(environ) _setup_environment([ # Oracle takes client-side character set encoding from the environment. ('NLS_LANG', '.AL32UTF8'), # This prevents unicode from getting mangled by getting encoded into the # potentially non-unicode database character set. ('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'), ]) try: import cx_Oracle as Database except ImportError as e: raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e) # Some of these import cx_Oracle, so import them after checking if it's installed. from .client import DatabaseClient # NOQA isort:skip from .creation import DatabaseCreation # NOQA isort:skip from .features import DatabaseFeatures # NOQA isort:skip from .introspection import DatabaseIntrospection # NOQA isort:skip from .operations import DatabaseOperations # NOQA isort:skip from .schema import DatabaseSchemaEditor # NOQA isort:skip from .utils import Oracle_datetime # NOQA isort:skip from .validation import DatabaseValidation # NOQA isort:skip class _UninitializedOperatorsDescriptor: def __get__(self, instance, cls=None): # If connection.operators is looked up before a connection has been # created, transparently initialize connection.operators to avert an # AttributeError. if instance is None: raise AttributeError("operators not available as class attribute") # Creating a cursor will initialize the operators. instance.cursor().close() return instance.__dict__['operators'] class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'oracle' display_name = 'Oracle' # This dictionary maps Field objects to their associated Oracle column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. # # Any format strings starting with "qn_" are quoted before being used in the # output (the "qn_" prefix is stripped before the lookup is performed. data_types = { 'AutoField': 'NUMBER(11) GENERATED BY DEFAULT ON NULL AS IDENTITY', 'BigAutoField': 'NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY', 'BinaryField': 'BLOB', 'BooleanField': 'NUMBER(1)', 'CharField': 'NVARCHAR2(%(max_length)s)', 'DateField': 'DATE', 'DateTimeField': 'TIMESTAMP', 'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)', 'DurationField': 'INTERVAL DAY(9) TO SECOND(6)', 'FileField': 'NVARCHAR2(%(max_length)s)', 'FilePathField': 'NVARCHAR2(%(max_length)s)', 'FloatField': 'DOUBLE PRECISION', 'IntegerField': 'NUMBER(11)', 'BigIntegerField': 'NUMBER(19)', 'IPAddressField': 'VARCHAR2(15)', 'GenericIPAddressField': 'VARCHAR2(39)', 'NullBooleanField': 'NUMBER(1)', 'OneToOneField': 'NUMBER(11)', 'PositiveIntegerField': 'NUMBER(11)', 'PositiveSmallIntegerField': 'NUMBER(11)', 'SlugField': 'NVARCHAR2(%(max_length)s)', 'SmallIntegerField': 'NUMBER(11)', 'TextField': 'NCLOB', 'TimeField': 'TIMESTAMP', 'URLField': 'VARCHAR2(%(max_length)s)', 'UUIDField': 'VARCHAR2(32)', } data_type_check_constraints = { 'BooleanField': '%(qn_column)s IN (0,1)', 'NullBooleanField': '%(qn_column)s IN (0,1)', 'PositiveIntegerField': '%(qn_column)s >= 0', 'PositiveSmallIntegerField': '%(qn_column)s >= 0', } # Oracle doesn't support a database index on these columns. _limited_data_types = ('clob', 'nclob', 'blob') operators = _UninitializedOperatorsDescriptor() _standard_operators = { 'exact': '= %s', 'iexact': '= UPPER(%s)', 'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", } _likec_operators = { **_standard_operators, 'contains': "LIKEC %s ESCAPE '\\'", 'icontains': "LIKEC UPPER(%s) ESCAPE '\\'", 'startswith': "LIKEC %s ESCAPE '\\'", 'endswith': "LIKEC %s ESCAPE '\\'", 'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'", 'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" _pattern_ops = { 'contains': "'%%' || {} || '%%'", 'icontains': "'%%' || UPPER({}) || '%%'", 'startswith': "{} || '%%'", 'istartswith': "UPPER({}) || '%%'", 'endswith': "'%%' || {}", 'iendswith': "'%%' || UPPER({})", } _standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)" " ESCAPE TRANSLATE('\\' USING NCHAR_CS)" for k, v in _pattern_ops.items()} _likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'" for k, v in _pattern_ops.items()} Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations validation_class = DatabaseValidation def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True) self.features.can_return_id_from_insert = use_returning_into def _dsn(self): settings_dict = self.settings_dict if not settings_dict['HOST'].strip(): settings_dict['HOST'] = 'localhost' if settings_dict['PORT']: return Database.makedsn(settings_dict['HOST'], int(settings_dict['PORT']), settings_dict['NAME']) return settings_dict['NAME'] def _connect_string(self): return '%s/\\"%s\\"@%s' % (self.settings_dict['USER'], self.settings_dict['PASSWORD'], self._dsn()) def get_connection_params(self): conn_params = self.settings_dict['OPTIONS'].copy() if 'use_returning_into' in conn_params: del conn_params['use_returning_into'] return conn_params def get_new_connection(self, conn_params): return Database.connect( user=self.settings_dict['USER'], password=self.settings_dict['PASSWORD'], dsn=self._dsn(), **conn_params, ) def init_connection_state(self): cursor = self.create_cursor() # Set the territory first. The territory overrides NLS_DATE_FORMAT # and NLS_TIMESTAMP_FORMAT to the territory default. When all of # these are set in single statement it isn't clear what is supposed # to happen. cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'") # Set Oracle date to ANSI date format. This only needs to execute # once when we create a new connection. We also set the Territory # to 'AMERICA' which forces Sunday to evaluate to a '1' in # TO_CHAR(). cursor.execute( "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'" " NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" + (" TIME_ZONE = 'UTC'" if settings.USE_TZ else '') ) cursor.close() if 'operators' not in self.__dict__: # Ticket #14149: Check whether our LIKE implementation will # work for this connection or we need to fall back on LIKEC. # This check is performed only once per DatabaseWrapper # instance per thread, since subsequent connections will use # the same settings. cursor = self.create_cursor() try: cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s" % self._standard_operators['contains'], ['X']) except Database.DatabaseError: self.operators = self._likec_operators self.pattern_ops = self._likec_pattern_ops else: self.operators = self._standard_operators self.pattern_ops = self._standard_pattern_ops cursor.close() self.connection.stmtcachesize = 20 # Ensure all changes are preserved even when AUTOCOMMIT is False. if not self.get_autocommit(): self.commit() def create_cursor(self, name=None): return FormatStylePlaceholderCursor(self.connection) def _commit(self): if self.connection is not None: try: return self.connection.commit() except Database.DatabaseError as e: # cx_Oracle raises a cx_Oracle.DatabaseError exception # with the following attributes and values: # code = 2091 # message = 'ORA-02091: transaction rolled back # 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS # _C00102056) violated - parent key not found' # We convert that particular case to our IntegrityError exception x = e.args[0] if hasattr(x, 'code') and hasattr(x, 'message') \ and x.code == 2091 and 'ORA-02291' in x.message: raise utils.IntegrityError(*tuple(e.args)) raise # Oracle doesn't support releasing savepoints. But we fake them when query # logging is enabled to keep query counts consistent with other backends. def _savepoint_commit(self, sid): if self.queries_logged: self.queries_log.append({ 'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid), 'time': '0.000', }) def _set_autocommit(self, autocommit): with self.wrap_database_errors: self.connection.autocommit = autocommit def check_constraints(self, table_names=None): """ Check constraints by setting them to immediate. Return them to deferred afterward. """ self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE') self.cursor().execute('SET CONSTRAINTS ALL DEFERRED') def is_usable(self): try: self.connection.ping() except Database.Error: return False else: return True @cached_property def oracle_full_version(self): with self.temporary_connection(): return self.connection.version @cached_property def oracle_version(self): try: return int(self.oracle_full_version.split('.')[0]) except ValueError: return None class OracleParam: """ Wrapper object for formatting parameters for Oracle. If the string representation of the value is large enough (greater than 4000 characters) the input size needs to be set as CLOB. Alternatively, if the parameter has an `input_size` attribute, then the value of the `input_size` attribute will be used instead. Otherwise, no input size will be set for the parameter when executing the query. """ def __init__(self, param, cursor, strings_only=False): # With raw SQL queries, datetimes can reach this function # without being converted by DateTimeField.get_db_prep_value. if settings.USE_TZ and (isinstance(param, datetime.datetime) and not isinstance(param, Oracle_datetime)): param = Oracle_datetime.from_datetime(param) string_size = 0 # Oracle doesn't recognize True and False correctly. if param is True: param = 1 elif param is False: param = 0 if hasattr(param, 'bind_parameter'): self.force_bytes = param.bind_parameter(cursor) elif isinstance(param, (Database.Binary, datetime.timedelta)): self.force_bytes = param else: # To transmit to the database, we need Unicode if supported # To get size right, we must consider bytes. self.force_bytes = force_text(param, cursor.charset, strings_only) if isinstance(self.force_bytes, str): # We could optimize by only converting up to 4000 bytes here string_size = len(force_bytes(param, cursor.charset, strings_only)) if hasattr(param, 'input_size'): # If parameter has `input_size` attribute, use that. self.input_size = param.input_size elif string_size > 4000: # Mark any string param greater than 4000 characters as a CLOB. self.input_size = Database.CLOB elif isinstance(param, datetime.datetime): self.input_size = Database.TIMESTAMP else: self.input_size = None class VariableWrapper: """ An adapter class for cursor variables that prevents the wrapped object from being converted into a string when used to instantiate an OracleParam. This can be used generally for any other object that should be passed into Cursor.execute as-is. """ def __init__(self, var): self.var = var def bind_parameter(self, cursor): return self.var def __getattr__(self, key): return getattr(self.var, key) def __setattr__(self, key, value): if key == 'var': self.__dict__[key] = value else: setattr(self.var, key, value) class FormatStylePlaceholderCursor: """ Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". We also do automatic conversion between Unicode on the Python side and UTF-8 -- for talking to Oracle -- in here. """ charset = 'utf-8' def __init__(self, connection): self.cursor = connection.cursor() self.cursor.outputtypehandler = self._output_type_handler # The default for cx_Oracle < 5.3 is 50. self.cursor.arraysize = 100 @staticmethod def _output_number_converter(value): return decimal.Decimal(value) if '.' in value else int(value) @staticmethod def _get_decimal_converter(precision, scale): if scale == 0: return int context = decimal.Context(prec=precision) quantize_value = decimal.Decimal(1).scaleb(-scale) return lambda v: decimal.Decimal(v).quantize(quantize_value, context=context) @staticmethod def _output_type_handler(cursor, name, defaultType, length, precision, scale): """ Called for each db column fetched from cursors. Return numbers as the appropriate Python type. """ if defaultType == Database.NUMBER: if scale == -127: if precision == 0: # NUMBER column: decimal-precision floating point. # This will normally be an integer from a sequence, # but it could be a decimal value. outconverter = FormatStylePlaceholderCursor._output_number_converter else: # FLOAT column: binary-precision floating point. # This comes from FloatField columns. outconverter = float elif precision > 0: # NUMBER(p,s) column: decimal-precision fixed point. # This comes from IntegerField and DecimalField columns. outconverter = FormatStylePlaceholderCursor._get_decimal_converter(precision, scale) else: # No type information. This normally comes from a # mathematical expression in the SELECT list. Guess int # or Decimal based on whether it has a decimal point. outconverter = FormatStylePlaceholderCursor._output_number_converter return cursor.var( Database.STRING, size=255, arraysize=cursor.arraysize, outconverter=outconverter, ) def _format_params(self, params): try: return {k: OracleParam(v, self, True) for k, v in params.items()} except AttributeError: return tuple(OracleParam(p, self, True) for p in params) def _guess_input_sizes(self, params_list): # Try dict handling; if that fails, treat as sequence if hasattr(params_list[0], 'keys'): sizes = {} for params in params_list: for k, value in params.items(): if value.input_size: sizes[k] = value.input_size self.setinputsizes(**sizes) else: # It's not a list of dicts; it's a list of sequences sizes = [None] * len(params_list[0]) for params in params_list: for i, value in enumerate(params): if value.input_size: sizes[i] = value.input_size self.setinputsizes(*sizes) def _param_generator(self, params): # Try dict handling; if that fails, treat as sequence if hasattr(params, 'items'): return {k: v.force_bytes for k, v in params.items()} else: return [p.force_bytes for p in params] def _fix_for_params(self, query, params, unify_by_values=False): # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(';') or query.endswith('/'): query = query[:-1] if params is None: params = [] elif hasattr(params, 'keys'): # Handle params as dict args = {k: ":%s" % k for k in params} query = query % args elif unify_by_values and params: # Handle params as a dict with unified query parameters by their # values. It can be used only in single query execute() because # executemany() shares the formatted query with each of the params # list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75] # params_dict = {0.75: ':arg0', 2: ':arg1', 'sth': ':arg2'} # args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0'] # params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'} params_dict = {param: ':arg%d' % i for i, param in enumerate(set(params))} args = [params_dict[param] for param in params] params = {value: key for key, value in params_dict.items()} query = query % tuple(args) else: # Handle params as sequence args = [(':arg%d' % i) for i in range(len(params))] query = query % tuple(args) return query, self._format_params(params) def execute(self, query, params=None): query, params = self._fix_for_params(query, params, unify_by_values=True) self._guess_input_sizes([params]) return self.cursor.execute(query, self._param_generator(params)) def executemany(self, query, params=None): if not params: # No params given, nothing to do return None # uniform treatment for sequences and iterables params_iter = iter(params) query, firstparams = self._fix_for_params(query, next(params_iter)) # we build a list of formatted params; as we're going to traverse it # more than once, we can't make it lazy by using a generator formatted = [firstparams] + [self._format_params(p) for p in params_iter] self._guess_input_sizes(formatted) return self.cursor.executemany(query, [self._param_generator(p) for p in formatted]) def fetchmany(self, size=None): if size is None: size = self.arraysize return tuple(self.cursor.fetchmany(size)) def fetchall(self): return tuple(self.cursor.fetchall()) def close(self): try: self.cursor.close() except Database.InterfaceError: # already closed pass def var(self, *args): return VariableWrapper(self.cursor.var(*args)) def arrayvar(self, *args): return VariableWrapper(self.cursor.arrayvar(*args)) def __getattr__(self, attr): return getattr(self.cursor, attr) def __iter__(self): return iter(self.cursor)
6fb67721d11b80b5018a90d4de23d0fb96dd5ba84cc47e6c5580f52f1bed0451
import datetime import re import uuid from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations from django.db.backends.utils import strip_quotes, truncate_name from django.db.utils import DatabaseError from django.utils import timezone from django.utils.encoding import force_bytes from .base import Database from .utils import BulkInsertMapper, InsertIdVar, Oracle_datetime class DatabaseOperations(BaseDatabaseOperations): # Oracle uses NUMBER(11) and NUMBER(19) for integer fields. integer_field_ranges = { 'SmallIntegerField': (-99999999999, 99999999999), 'IntegerField': (-99999999999, 99999999999), 'BigIntegerField': (-9999999999999999999, 9999999999999999999), 'PositiveSmallIntegerField': (0, 99999999999), 'PositiveIntegerField': (0, 99999999999), } set_operators = {**BaseDatabaseOperations.set_operators, 'difference': 'MINUS'} # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc. _sequence_reset_sql = """ DECLARE table_value integer; seq_value integer; seq_name user_tab_identity_cols.sequence_name%%TYPE; BEGIN BEGIN SELECT sequence_name INTO seq_name FROM user_tab_identity_cols WHERE table_name = '%(table_name)s' AND column_name = '%(column_name)s'; EXCEPTION WHEN NO_DATA_FOUND THEN seq_name := '%(no_autofield_sequence_name)s'; END; SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s; SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences WHERE sequence_name = seq_name; WHILE table_value > seq_value LOOP EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL' INTO seq_value; END LOOP; END; /""" # Oracle doesn't support string without precision; use the max string size. cast_char_field_without_max_length = 'NVARCHAR2(2000)' cast_data_types = { 'TextField': cast_char_field_without_max_length, } def cache_key_culling_sql(self): return 'SELECT cache_key FROM %s ORDER BY cache_key OFFSET %%s ROWS FETCH FIRST 1 ROWS ONLY' def date_extract_sql(self, lookup_type, field_name): if lookup_type == 'week_day': # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. return "TO_CHAR(%s, 'D')" % field_name elif lookup_type == 'week': # IW = ISO week number return "TO_CHAR(%s, 'IW')" % field_name elif lookup_type == 'quarter': return "TO_CHAR(%s, 'Q')" % field_name else: # https://docs.oracle.com/database/121/SQLRF/functions067.htm#SQLRF00639 return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): # https://docs.oracle.com/database/121/SQLRF/functions271.htm#SQLRF52058 if lookup_type in ('year', 'month'): return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': return "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': return "TRUNC(%s, 'IW')" % field_name else: return "TRUNC(%s)" % field_name # Oracle crashes with "ORA-03113: end-of-file on communication channel" # if the time zone name is passed in parameter. Use interpolation instead. # https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ # This regexp matches all time zone names from the zoneinfo database. _tzname_re = re.compile(r'^[\w/:+-]+$') def _convert_field_to_tz(self, field_name, tzname): if not settings.USE_TZ: return field_name if not self._tzname_re.match(tzname): raise ValueError("Invalid time zone name: %s" % tzname) # Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE # and cast it back to TIMESTAMP to strip the TIME ZONE details. return "CAST((FROM_TZ(%s, '0:00') AT TIME ZONE '%s') AS TIMESTAMP)" % (field_name, tzname) def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return 'TRUNC(%s)' % field_name def datetime_cast_time_sql(self, field_name, tzname): # Since `TimeField` values are stored as TIMESTAMP where only the date # part is ignored, convert the field to the specified timezone. return self._convert_field_to_tz(field_name, tzname) def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://docs.oracle.com/database/121/SQLRF/functions271.htm#SQLRF52058 if lookup_type in ('year', 'month'): sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': sql = "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': sql = "TRUNC(%s, 'IW')" % field_name elif lookup_type == 'day': sql = "TRUNC(%s)" % field_name elif lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name else: sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def time_trunc_sql(self, lookup_type, field_name): # The implementation is similar to `datetime_trunc_sql` as both # `DateTimeField` and `TimeField` are stored as TIMESTAMP where # the date part of the later is ignored. if lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name elif lookup_type == 'second': sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'TextField': converters.append(self.convert_textfield_value) elif internal_type == 'BinaryField': converters.append(self.convert_binaryfield_value) elif internal_type in ['BooleanField', 'NullBooleanField']: converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) # Oracle stores empty strings as null. If the field accepts the empty # string, undo this to adhere to the Django convention of using # the empty string instead of null. if expression.field.empty_strings_allowed: converters.append( self.convert_empty_bytes if internal_type == 'BinaryField' else self.convert_empty_string ) return converters def convert_textfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = value.read() return value def convert_binaryfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = force_bytes(value.read()) return value def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value # cx_Oracle always returns datetime.datetime objects for # DATE and TIMESTAMP columns, but Django wants to see a # python datetime.date, .time, or .datetime. def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.date() return value def convert_timefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.time() return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value @staticmethod def convert_empty_string(value, expression, connection): return '' if value is None else value @staticmethod def convert_empty_bytes(value, expression, connection): return b'' if value is None else value def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_id(self, cursor): try: return int(cursor._insert_id_var.getvalue()) except (IndexError, TypeError): # cx_Oracle < 6.3 returns None, >= 6.3 raises IndexError. raise DatabaseError( 'The database did not return a new row id. Probably "ORA-1403: ' 'no data found" was raised internally but was hidden by the ' 'Oracle OCI library (see https://code.djangoproject.com/ticket/28859).' ) def field_cast_sql(self, db_type, internal_type): if db_type and db_type.endswith('LOB'): return "DBMS_LOB.SUBSTR(%s)" else: return "%s" def no_limit_value(self): return None def limit_offset_sql(self, low_mark, high_mark): fetch, offset = self._get_limit_offset_params(low_mark, high_mark) return '%s%s' % ( (' OFFSET %d ROWS' % offset) if offset else '', (' FETCH FIRST %d ROWS ONLY' % fetch) if fetch else '', ) def last_executed_query(self, cursor, sql, params): # https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement # The DB API definition does not define this attribute. statement = cursor.statement # Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's # `statement` doesn't contain the query parameters. refs #20010. return super().last_executed_query(cursor, statement, params) def last_insert_id(self, cursor, table_name, pk_name): sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name) cursor.execute('"%s".currval' % sq_name) return cursor.fetchone()[0] def lookup_cast(self, lookup_type, internal_type=None): if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): return "UPPER(%s)" return "%s" def max_in_list_size(self): return 1000 def max_name_length(self): return 30 def pk_default_value(self): return "NULL" def prep_for_iexact_query(self, x): return x def process_clob(self, value): if value is None: return '' return value.read() def quote_name(self, name): # SQL92 requires delimited (quoted) names to be case-sensitive. When # not quoted, Oracle has case-insensitive behavior for identifiers, but # always defaults to uppercase. # We simplify things by making Oracle identifiers always uppercase. if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % truncate_name(name.upper(), self.max_name_length()) # Oracle puts the query text into a (query % args) construct, so % signs # in names need to be escaped. The '%%' will be collapsed back to '%' at # that stage so we aren't really making the name longer here. name = name.replace('%', '%%') return name.upper() def random_function_sql(self): return "DBMS_RANDOM.RANDOM" def regex_lookup(self, lookup_type): if lookup_type == 'regex': match_option = "'c'" else: match_option = "'i'" return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option def return_insert_id(self): return "RETURNING %s INTO %%s", (InsertIdVar(),) def savepoint_create_sql(self, sid): return "SAVEPOINT " + self.quote_name(sid) def savepoint_rollback_sql(self, sid): return "ROLLBACK TO SAVEPOINT " + self.quote_name(sid) def _foreign_key_constraints(self, table_name, recursive=False): with self.connection.cursor() as cursor: if recursive: cursor.execute(""" SELECT user_tables.table_name, rcons.constraint_name FROM user_tables JOIN user_constraints cons ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U')) LEFT JOIN user_constraints rcons ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R') START WITH user_tables.table_name = UPPER(%s) CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name GROUP BY user_tables.table_name, rcons.constraint_name HAVING user_tables.table_name != UPPER(%s) ORDER BY MAX(level) DESC """, (table_name, table_name)) else: cursor.execute(""" SELECT cons.table_name, cons.constraint_name FROM user_constraints cons WHERE cons.constraint_type = 'R' AND cons.table_name = UPPER(%s) """, (table_name,)) return cursor.fetchall() def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables: truncated_tables = {table.upper() for table in tables} constraints = set() # Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE # foreign keys which Django doesn't define. Emulate the # PostgreSQL behavior which truncates all dependent tables by # manually retrieving all foreign key constraints and resolving # dependencies. for table in tables: for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade): if allow_cascade: truncated_tables.add(foreign_table) constraints.add((foreign_table, constraint)) sql = [ "%s %s %s %s %s %s %s %s;" % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('DISABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), style.SQL_KEYWORD('KEEP'), style.SQL_KEYWORD('INDEX'), ) for table, constraint in constraints ] + [ "%s %s %s;" % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), ) for table in truncated_tables ] + [ "%s %s %s %s %s %s;" % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('ENABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), ) for table, constraint in constraints ] # Since we've just deleted all the rows, running our sequence # ALTER code will reset the sequence to 0. sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): sql = [] for sequence_info in sequences: no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table']) table = self.quote_name(sequence_info['table']) column = self.quote_name(sequence_info['column'] or 'id') query = self._sequence_reset_sql % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), } sql.append(query) return sql def sequence_reset_sql(self, style, model_list): from django.db import models output = [] query = self._sequence_reset_sql for model in model_list: for f in model._meta.local_fields: if isinstance(f, models.AutoField): no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table) table = self.quote_name(model._meta.db_table) column = self.quote_name(f.column) output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), }) # Only one AutoField is allowed per model, so don't # continue to loop break for f in model._meta.many_to_many: if not f.remote_field.through: no_autofield_sequence_name = self._get_no_autofield_sequence_name(f.m2m_db_table()) table = self.quote_name(f.m2m_db_table()) column = self.quote_name('id') output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': 'ID', }) return output def start_transaction_sql(self): return '' def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. The default implementation transforms the date to text, but that is not necessary for Oracle. """ return value def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. If naive datetime is passed assumes that is in UTC. Normally Django models.DateTimeField makes sure that if USE_TZ is True passed datetime is timezone aware. """ if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # cx_Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.") return Oracle_datetime.from_datetime(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value if isinstance(value, str): return datetime.datetime.strptime(value, '%H:%M:%S') # Oracle doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("Oracle backend does not support timezone-aware times.") return Oracle_datetime(1900, 1, 1, value.hour, value.minute, value.second, value.microsecond) def combine_expression(self, connector, sub_expressions): lhs, rhs = sub_expressions if connector == '%%': return 'MOD(%s)' % ','.join(sub_expressions) elif connector == '&': return 'BITAND(%s)' % ','.join(sub_expressions) elif connector == '|': return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs} elif connector == '<<': return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '>>': return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '^': return 'POWER(%s)' % ','.join(sub_expressions) return super().combine_expression(connector, sub_expressions) def _get_no_autofield_sequence_name(self, table): """ Manually created sequence name to keep backward compatibility for AutoFields that aren't Oracle identity columns. """ name_length = self.max_name_length() - 3 return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper() def _get_sequence_name(self, cursor, table, pk_name): cursor.execute(""" SELECT sequence_name FROM user_tab_identity_cols WHERE table_name = UPPER(%s) AND column_name = UPPER(%s)""", [table, pk_name]) row = cursor.fetchone() return self._get_no_autofield_sequence_name(table) if row is None else row[0] def bulk_insert_sql(self, fields, placeholder_rows): query = [] for row in placeholder_rows: select = [] for i, placeholder in enumerate(row): # A model without any fields has fields=[None]. if fields[i]: internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type() placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder # Add columns aliases to the first select to avoid "ORA-00918: # column ambiguously defined" when two or more columns in the # first select have the same value. if not query: placeholder = '%s col_%s' % (placeholder, i) select.append(placeholder) query.append('SELECT %s FROM DUAL' % ', '.join(select)) # Bulk insert to tables with Oracle identity columns causes Oracle to # add sequence.nextval to it. Sequence.nextval cannot be used with the # UNION operator. To prevent incorrect SQL, move UNION to a subquery. return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query) def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return "NUMTODSINTERVAL(%s - %s, 'DAY')" % (lhs_sql, rhs_sql), lhs_params + rhs_params return super().subtract_temporals(internal_type, lhs, rhs) def bulk_batch_size(self, fields, objs): """Oracle restricts the number of parameters in a query.""" if fields: return self.connection.features.max_query_params // len(fields) return len(objs)
6c2f2a5f7032bdeafe79d7fe5bc6002dc26e083b70ed47f12cba9a6354e6e046
import copy import datetime import re from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.utils import DatabaseError class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s" sql_alter_column_type = "MODIFY %(column)s %(type)s" sql_alter_column_null = "MODIFY %(column)s NULL" sql_alter_column_not_null = "MODIFY %(column)s NOT NULL" sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s" sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL" sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS" def quote_value(self, value): if isinstance(value, (datetime.date, datetime.time, datetime.datetime)): return "'%s'" % value elif isinstance(value, str): return "'%s'" % value.replace("\'", "\'\'") elif isinstance(value, (bytes, bytearray, memoryview)): return "'%s'" % value.hex() elif isinstance(value, bool): return "1" if value else "0" else: return str(value) def remove_field(self, model, field): # If the column is an identity column, drop the identity before # removing the field. if self._is_identity_column(model._meta.db_table, field.column): self._drop_identity(model._meta.db_table, field.column) super().remove_field(model, field) def delete_model(self, model): # Run superclass action super().delete_model(model) # Clean up manually created sequence. self.execute(""" DECLARE i INTEGER; BEGIN SELECT COUNT(1) INTO i FROM USER_SEQUENCES WHERE SEQUENCE_NAME = '%(sq_name)s'; IF i = 1 THEN EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"'; END IF; END; /""" % {'sq_name': self.connection.ops._get_no_autofield_sequence_name(model._meta.db_table)}) def alter_field(self, model, old_field, new_field, strict=False): try: super().alter_field(model, old_field, new_field, strict) except DatabaseError as e: description = str(e) # If we're changing type to an unsupported type we need a # SQLite-ish workaround if 'ORA-22858' in description or 'ORA-22859' in description: self._alter_field_type_workaround(model, old_field, new_field) # If an identity column is changing to a non-numeric type, drop the # identity first. elif 'ORA-30675' in description: self._drop_identity(model._meta.db_table, old_field.column) self.alter_field(model, old_field, new_field, strict) # If a primary key column is changing to an identity column, drop # the primary key first. elif 'ORA-30673' in description and old_field.primary_key: self._delete_primary_key(model, strict=True) self._alter_field_type_workaround(model, old_field, new_field) else: raise def _alter_field_type_workaround(self, model, old_field, new_field): """ Oracle refuses to change from some type to other type. What we need to do instead is: - Add a nullable version of the desired field with a temporary name. If the new column is an auto field, then the temporary column can't be nullable. - Update the table to transfer values from old to new - Drop old column - Rename the new column and possibly drop the nullable property """ # Make a new field that's like the new one but with a temporary # column name. new_temp_field = copy.deepcopy(new_field) new_temp_field.null = (new_field.get_internal_type() not in ('AutoField', 'BigAutoField')) new_temp_field.column = self._generate_temp_name(new_field.column) # Add it self.add_field(model, new_temp_field) # Explicit data type conversion # https://docs.oracle.com/database/121/SQLRF/sql_elements002.htm#SQLRF51054 new_value = self.quote_name(old_field.column) old_type = old_field.db_type(self.connection) if re.match('^N?CLOB', old_type): new_value = "TO_CHAR(%s)" % new_value old_type = 'VARCHAR2' if re.match('^N?VARCHAR2', old_type): new_internal_type = new_field.get_internal_type() if new_internal_type == 'DateField': new_value = "TO_DATE(%s, 'YYYY-MM-DD')" % new_value elif new_internal_type == 'DateTimeField': new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value elif new_internal_type == 'TimeField': # TimeField are stored as TIMESTAMP with a 1900-01-01 date part. new_value = "TO_TIMESTAMP(CONCAT('1900-01-01 ', %s), 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value # Transfer values across self.execute("UPDATE %s set %s=%s" % ( self.quote_name(model._meta.db_table), self.quote_name(new_temp_field.column), new_value, )) # Drop the old field self.remove_field(model, old_field) # Rename and possibly make the new field NOT NULL super().alter_field(model, new_temp_field, new_field) def normalize_name(self, name): """ Get the properly shortened and uppercased identifier as returned by quote_name() but without the quotes. """ nn = self.quote_name(name) if nn[0] == '"' and nn[-1] == '"': nn = nn[1:-1] return nn def _generate_temp_name(self, for_name): """Generate temporary names for workarounds that need temp columns.""" suffix = hex(hash(for_name)).upper()[1:] return self.normalize_name(for_name + "_" + suffix) def prepare_default(self, value): return self.quote_value(value) def _field_should_be_indexed(self, model, field): create_index = super()._field_should_be_indexed(model, field) db_type = field.db_type(self.connection) if db_type is not None and db_type.lower() in self.connection._limited_data_types: return False return create_index def _unique_should_be_added(self, old_field, new_field): return ( super()._unique_should_be_added(old_field, new_field) and not self._field_became_primary_key(old_field, new_field) ) def _is_identity_column(self, table_name, column_name): with self.connection.cursor() as cursor: cursor.execute(""" SELECT CASE WHEN identity_column = 'YES' THEN 1 ELSE 0 END FROM user_tab_cols WHERE table_name = %s AND column_name = %s """, [self.normalize_name(table_name), self.normalize_name(column_name)]) row = cursor.fetchone() return row[0] if row else False def _drop_identity(self, table_name, column_name): self.execute('ALTER TABLE %(table)s MODIFY %(column)s DROP IDENTITY' % { 'table': self.quote_name(table_name), 'column': self.quote_name(column_name), })
79fa006151fbeb1fc030caaad61f0935a0a9abf52d964b72b98b972d97f37b81
import subprocess from django.db.backends.base.client import BaseDatabaseClient class DatabaseClient(BaseDatabaseClient): executable_name = 'sqlplus' def runshell(self): conn_string = self.connection._connect_string() args = [self.executable_name, "-L", conn_string] subprocess.check_call(args)
3c730ef5c0291b512164f0f813455de9dccf984fc3ce8b9ff462166c5d57ee47
from django.db.models import DecimalField, DurationField, Func class IntervalToSeconds(Func): function = '' template = """ EXTRACT(day from %(expressions)s) * 86400 + EXTRACT(hour from %(expressions)s) * 3600 + EXTRACT(minute from %(expressions)s) * 60 + EXTRACT(second from %(expressions)s) """ def __init__(self, expression, *, output_field=None, **extra): super().__init__(expression, output_field=output_field or DecimalField(), **extra) class SecondsToInterval(Func): function = 'NUMTODSINTERVAL' template = "%(function)s(%(expressions)s, 'SECOND')" def __init__(self, expression, *, output_field=None, **extra): super().__init__(expression, output_field=output_field or DurationField(), **extra)
9984defba47efb1a188b3949776e8efb91ff942b5f1646786955da2e5b2a4b86
import datetime from .base import Database class InsertIdVar: """ A late-binding cursor variable that can be passed to Cursor.execute as a parameter, in order to receive the id of the row created by an insert statement. """ def bind_parameter(self, cursor): param = cursor.cursor.var(Database.NUMBER) cursor._insert_id_var = param return param class Oracle_datetime(datetime.datetime): """ A datetime object, with an additional class attribute to tell cx_Oracle to save the microseconds too. """ input_size = Database.TIMESTAMP @classmethod def from_datetime(cls, dt): return Oracle_datetime( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, ) class BulkInsertMapper: BLOB = 'TO_BLOB(%s)' DATE = 'TO_DATE(%s)' INTERVAL = 'CAST(%s as INTERVAL DAY(9) TO SECOND(6))' NUMBER = 'TO_NUMBER(%s)' TIMESTAMP = 'TO_TIMESTAMP(%s)' types = { 'BigIntegerField': NUMBER, 'BinaryField': BLOB, 'BooleanField': NUMBER, 'DateField': DATE, 'DateTimeField': TIMESTAMP, 'DecimalField': NUMBER, 'DurationField': INTERVAL, 'FloatField': NUMBER, 'IntegerField': NUMBER, 'NullBooleanField': NUMBER, 'PositiveIntegerField': NUMBER, 'PositiveSmallIntegerField': NUMBER, 'SmallIntegerField': NUMBER, 'TimeField': TIMESTAMP, }
2cb68f9e1bf52598515ab76a54f28653142ba745dc53de16eb312ee5834719fd
import sys from django.conf import settings from django.db.backends.base.creation import BaseDatabaseCreation from django.db.utils import DatabaseError from django.utils.crypto import get_random_string from django.utils.functional import cached_property TEST_DATABASE_PREFIX = 'test_' class DatabaseCreation(BaseDatabaseCreation): @cached_property def _maindb_connection(self): """ This is analogous to other backends' `_nodb_connection` property, which allows access to an "administrative" connection which can be used to manage the test databases. For Oracle, the only connection that can be used for that purpose is the main (non-test) connection. """ settings_dict = settings.DATABASES[self.connection.alias] user = settings_dict.get('SAVED_USER') or settings_dict['USER'] password = settings_dict.get('SAVED_PASSWORD') or settings_dict['PASSWORD'] settings_dict = {**settings_dict, 'USER': user, 'PASSWORD': password} DatabaseWrapper = type(self.connection) return DatabaseWrapper(settings_dict, alias=self.connection.alias) def _create_test_db(self, verbosity=1, autoclobber=False, keepdb=False): parameters = self._get_test_db_params() with self._maindb_connection.cursor() as cursor: if self._test_database_create(): try: self._execute_test_db_creation(cursor, parameters, verbosity, keepdb) except Exception as e: if 'ORA-01543' not in str(e): # All errors except "tablespace already exists" cancel tests sys.stderr.write("Got an error creating the test database: %s\n" % e) sys.exit(2) if not autoclobber: confirm = input( "It appears the test database, %s, already exists. " "Type 'yes' to delete it, or 'no' to cancel: " % parameters['user']) if autoclobber or confirm == 'yes': if verbosity >= 1: print("Destroying old test database for alias '%s'..." % self.connection.alias) try: self._execute_test_db_destruction(cursor, parameters, verbosity) except DatabaseError as e: if 'ORA-29857' in str(e): self._handle_objects_preventing_db_destruction(cursor, parameters, verbosity, autoclobber) else: # Ran into a database error that isn't about leftover objects in the tablespace sys.stderr.write("Got an error destroying the old test database: %s\n" % e) sys.exit(2) except Exception as e: sys.stderr.write("Got an error destroying the old test database: %s\n" % e) sys.exit(2) try: self._execute_test_db_creation(cursor, parameters, verbosity, keepdb) except Exception as e: sys.stderr.write("Got an error recreating the test database: %s\n" % e) sys.exit(2) else: print("Tests cancelled.") sys.exit(1) if self._test_user_create(): if verbosity >= 1: print("Creating test user...") try: self._create_test_user(cursor, parameters, verbosity, keepdb) except Exception as e: if 'ORA-01920' not in str(e): # All errors except "user already exists" cancel tests sys.stderr.write("Got an error creating the test user: %s\n" % e) sys.exit(2) if not autoclobber: confirm = input( "It appears the test user, %s, already exists. Type " "'yes' to delete it, or 'no' to cancel: " % parameters['user']) if autoclobber or confirm == 'yes': try: if verbosity >= 1: print("Destroying old test user...") self._destroy_test_user(cursor, parameters, verbosity) if verbosity >= 1: print("Creating test user...") self._create_test_user(cursor, parameters, verbosity, keepdb) except Exception as e: sys.stderr.write("Got an error recreating the test user: %s\n" % e) sys.exit(2) else: print("Tests cancelled.") sys.exit(1) self._maindb_connection.close() # done with main user -- test user and tablespaces created self._switch_to_test_user(parameters) return self.connection.settings_dict['NAME'] def _switch_to_test_user(self, parameters): """ Switch to the user that's used for creating the test database. Oracle doesn't have the concept of separate databases under the same user, so a separate user is used; see _create_test_db(). The main user is also needed for cleanup when testing is completed, so save its credentials in the SAVED_USER/SAVED_PASSWORD key in the settings dict. """ real_settings = settings.DATABASES[self.connection.alias] real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = \ self.connection.settings_dict['USER'] real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = \ self.connection.settings_dict['PASSWORD'] real_test_settings = real_settings['TEST'] test_settings = self.connection.settings_dict['TEST'] real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = \ self.connection.settings_dict['USER'] = parameters['user'] real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = parameters['password'] def set_as_test_mirror(self, primary_settings_dict): """ Set this database up to be used in testing as a mirror of a primary database whose settings are given. """ self.connection.settings_dict['USER'] = primary_settings_dict['USER'] self.connection.settings_dict['PASSWORD'] = primary_settings_dict['PASSWORD'] def _handle_objects_preventing_db_destruction(self, cursor, parameters, verbosity, autoclobber): # There are objects in the test tablespace which prevent dropping it # The easy fix is to drop the test user -- but are we allowed to do so? print("There are objects in the old test database which prevent its destruction.") print("If they belong to the test user, deleting the user will allow the test " "database to be recreated.") print("Otherwise, you will need to find and remove each of these objects, " "or use a different tablespace.\n") if self._test_user_create(): if not autoclobber: confirm = input("Type 'yes' to delete user %s: " % parameters['user']) if autoclobber or confirm == 'yes': try: if verbosity >= 1: print("Destroying old test user...") self._destroy_test_user(cursor, parameters, verbosity) except Exception as e: sys.stderr.write("Got an error destroying the test user: %s\n" % e) sys.exit(2) try: if verbosity >= 1: print("Destroying old test database for alias '%s'..." % self.connection.alias) self._execute_test_db_destruction(cursor, parameters, verbosity) except Exception as e: sys.stderr.write("Got an error destroying the test database: %s\n" % e) sys.exit(2) else: print("Tests cancelled -- test database cannot be recreated.") sys.exit(1) else: print("Django is configured to use pre-existing test user '%s'," " and will not attempt to delete it.\n" % parameters['user']) print("Tests cancelled -- test database cannot be recreated.") sys.exit(1) def _destroy_test_db(self, test_database_name, verbosity=1): """ Destroy a test database, prompting the user for confirmation if the database already exists. Return the name of the test database created. """ self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER'] self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] self.connection.close() parameters = self._get_test_db_params() with self._maindb_connection.cursor() as cursor: if self._test_user_create(): if verbosity >= 1: print('Destroying test user...') self._destroy_test_user(cursor, parameters, verbosity) if self._test_database_create(): if verbosity >= 1: print('Destroying test database tables...') self._execute_test_db_destruction(cursor, parameters, verbosity) self._maindb_connection.close() def _execute_test_db_creation(self, cursor, parameters, verbosity, keepdb=False): if verbosity >= 2: print("_create_test_db(): dbname = %s" % parameters['user']) statements = [ """CREATE TABLESPACE %(tblspace)s DATAFILE '%(datafile)s' SIZE %(size)s REUSE AUTOEXTEND ON NEXT %(extsize)s MAXSIZE %(maxsize)s """, """CREATE TEMPORARY TABLESPACE %(tblspace_temp)s TEMPFILE '%(datafile_tmp)s' SIZE %(size_tmp)s REUSE AUTOEXTEND ON NEXT %(extsize_tmp)s MAXSIZE %(maxsize_tmp)s """, ] # Ignore "tablespace already exists" error when keepdb is on. acceptable_ora_err = 'ORA-01543' if keepdb else None self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err) def _create_test_user(self, cursor, parameters, verbosity, keepdb=False): if verbosity >= 2: print("_create_test_user(): username = %s" % parameters['user']) statements = [ """CREATE USER %(user)s IDENTIFIED BY "%(password)s" DEFAULT TABLESPACE %(tblspace)s TEMPORARY TABLESPACE %(tblspace_temp)s QUOTA UNLIMITED ON %(tblspace)s """, """GRANT CREATE SESSION, CREATE TABLE, CREATE SEQUENCE, CREATE PROCEDURE, CREATE TRIGGER TO %(user)s""", ] # Ignore "user already exists" error when keepdb is on acceptable_ora_err = 'ORA-01920' if keepdb else None success = self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err) # If the password was randomly generated, change the user accordingly. if not success and self._test_settings_get('PASSWORD') is None: set_password = 'ALTER USER %(user)s IDENTIFIED BY "%(password)s"' self._execute_statements(cursor, [set_password], parameters, verbosity) # Most test-suites can be run without the create-view privilege. But some need it. extra = "GRANT CREATE VIEW TO %(user)s" success = self._execute_allow_fail_statements(cursor, [extra], parameters, verbosity, 'ORA-01031') if not success and verbosity >= 2: print("Failed to grant CREATE VIEW permission to test user. This may be ok.") def _execute_test_db_destruction(self, cursor, parameters, verbosity): if verbosity >= 2: print("_execute_test_db_destruction(): dbname=%s" % parameters['user']) statements = [ 'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS', 'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS', ] self._execute_statements(cursor, statements, parameters, verbosity) def _destroy_test_user(self, cursor, parameters, verbosity): if verbosity >= 2: print("_destroy_test_user(): user=%s" % parameters['user']) print("Be patient. This can take some time...") statements = [ 'DROP USER %(user)s CASCADE', ] self._execute_statements(cursor, statements, parameters, verbosity) def _execute_statements(self, cursor, statements, parameters, verbosity, allow_quiet_fail=False): for template in statements: stmt = template % parameters if verbosity >= 2: print(stmt) try: cursor.execute(stmt) except Exception as err: if (not allow_quiet_fail) or verbosity >= 2: sys.stderr.write("Failed (%s)\n" % (err)) raise def _execute_allow_fail_statements(self, cursor, statements, parameters, verbosity, acceptable_ora_err): """ Execute statements which are allowed to fail silently if the Oracle error code given by `acceptable_ora_err` is raised. Return True if the statements execute without an exception, or False otherwise. """ try: # Statement can fail when acceptable_ora_err is not None allow_quiet_fail = acceptable_ora_err is not None and len(acceptable_ora_err) > 0 self._execute_statements(cursor, statements, parameters, verbosity, allow_quiet_fail=allow_quiet_fail) return True except DatabaseError as err: description = str(err) if acceptable_ora_err is None or acceptable_ora_err not in description: raise return False def _get_test_db_params(self): return { 'dbname': self._test_database_name(), 'user': self._test_database_user(), 'password': self._test_database_passwd(), 'tblspace': self._test_database_tblspace(), 'tblspace_temp': self._test_database_tblspace_tmp(), 'datafile': self._test_database_tblspace_datafile(), 'datafile_tmp': self._test_database_tblspace_tmp_datafile(), 'maxsize': self._test_database_tblspace_maxsize(), 'maxsize_tmp': self._test_database_tblspace_tmp_maxsize(), 'size': self._test_database_tblspace_size(), 'size_tmp': self._test_database_tblspace_tmp_size(), 'extsize': self._test_database_tblspace_extsize(), 'extsize_tmp': self._test_database_tblspace_tmp_extsize(), } def _test_settings_get(self, key, default=None, prefixed=None): """ Return a value from the test settings dict, or a given default, or a prefixed entry from the main settings dict. """ settings_dict = self.connection.settings_dict val = settings_dict['TEST'].get(key, default) if val is None and prefixed: val = TEST_DATABASE_PREFIX + settings_dict[prefixed] return val def _test_database_name(self): return self._test_settings_get('NAME', prefixed='NAME') def _test_database_create(self): return self._test_settings_get('CREATE_DB', default=True) def _test_user_create(self): return self._test_settings_get('CREATE_USER', default=True) def _test_database_user(self): return self._test_settings_get('USER', prefixed='USER') def _test_database_passwd(self): password = self._test_settings_get('PASSWORD') if password is None and self._test_user_create(): # Oracle passwords are limited to 30 chars and can't contain symbols. password = get_random_string(length=30) return password def _test_database_tblspace(self): return self._test_settings_get('TBLSPACE', prefixed='USER') def _test_database_tblspace_tmp(self): settings_dict = self.connection.settings_dict return settings_dict['TEST'].get('TBLSPACE_TMP', TEST_DATABASE_PREFIX + settings_dict['USER'] + '_temp') def _test_database_tblspace_datafile(self): tblspace = '%s.dbf' % self._test_database_tblspace() return self._test_settings_get('DATAFILE', default=tblspace) def _test_database_tblspace_tmp_datafile(self): tblspace = '%s.dbf' % self._test_database_tblspace_tmp() return self._test_settings_get('DATAFILE_TMP', default=tblspace) def _test_database_tblspace_maxsize(self): return self._test_settings_get('DATAFILE_MAXSIZE', default='500M') def _test_database_tblspace_tmp_maxsize(self): return self._test_settings_get('DATAFILE_TMP_MAXSIZE', default='500M') def _test_database_tblspace_size(self): return self._test_settings_get('DATAFILE_SIZE', default='50M') def _test_database_tblspace_tmp_size(self): return self._test_settings_get('DATAFILE_TMP_SIZE', default='50M') def _test_database_tblspace_extsize(self): return self._test_settings_get('DATAFILE_EXTSIZE', default='25M') def _test_database_tblspace_tmp_extsize(self): return self._test_settings_get('DATAFILE_TMP_EXTSIZE', default='25M') def _get_test_db_name(self): """ Return the 'production' DB name to get the test DB creation machinery to work. This isn't a great deal in this case because DB names as handled by Django don't have real counterparts in Oracle. """ return self.connection.settings_dict['NAME'] def test_db_signature(self): settings_dict = self.connection.settings_dict return ( settings_dict['HOST'], settings_dict['PORT'], settings_dict['ENGINE'], settings_dict['NAME'], self._test_database_user(), )
e3320056c78fc844e246d2bb080c3bf32e198823c84acd0fbf5ee6156cb64ebe
class BaseDatabaseValidation: """Encapsulate backend-specific validation.""" def __init__(self, connection): self.connection = connection def check(self, **kwargs): return [] def check_field(self, field, **kwargs): errors = [] # Backends may implement a check_field_type() method. if (hasattr(self, 'check_field_type') and # Ignore any related fields. not getattr(field, 'remote_field', None)): # Ignore fields with unsupported features. db_supports_all_required_features = all( getattr(self.connection.features, feature, False) for feature in field.model._meta.required_db_features ) if db_supports_all_required_features: field_type = field.db_type(self.connection) # Ignore non-concrete fields. if field_type is not None: errors.extend(self.check_field_type(field, field_type)) return errors
f7c7c3a97763d5a93f7a2cc0657f2883fe0e319192edfb7ec11e85f527aeff1f
from django.db.models.aggregates import StdDev from django.db.utils import NotSupportedError, ProgrammingError from django.utils.functional import cached_property class BaseDatabaseFeatures: gis_enabled = False allows_group_by_pk = False allows_group_by_selected_pks = False empty_fetchmany_value = [] update_can_self_select = True # Does the backend distinguish between '' and None? interprets_empty_strings_as_nulls = False # Does the backend allow inserting duplicate NULL rows in a nullable # unique field? All core backends implement this correctly, but other # databases such as SQL Server do not. supports_nullable_unique_constraints = True # Does the backend allow inserting duplicate rows when a unique_together # constraint exists and some fields are nullable but not all of them? supports_partially_nullable_unique_constraints = True can_use_chunked_reads = True can_return_id_from_insert = False can_return_ids_from_bulk_insert = False has_bulk_insert = True uses_savepoints = False can_release_savepoints = False # If True, don't use integer foreign keys referring to, e.g., positive # integer primary keys. related_fields_match_type = False allow_sliced_subqueries_with_in = True has_select_for_update = False has_select_for_update_nowait = False has_select_for_update_skip_locked = False has_select_for_update_of = False # Does the database's SELECT FOR UPDATE OF syntax require a column rather # than a table? select_for_update_of_column = False # Does the default test database allow multiple connections? # Usually an indication that the test database is in-memory test_db_allows_multiple_connections = True # Can an object be saved without an explicit primary key? supports_unspecified_pk = False # Can a fixture contain forward references? i.e., are # FK constraints checked at the end of transaction, or # at the end of each save operation? supports_forward_references = True # Does the backend truncate names properly when they are too long? truncates_names = False # Is there a REAL datatype in addition to floats/doubles? has_real_datatype = False supports_subqueries_in_group_by = True # Is there a true datatype for uuid? has_native_uuid_field = False # Is there a true datatype for timedeltas? has_native_duration_field = False # Does the database driver supports same type temporal data subtraction # by returning the type used to store duration field? supports_temporal_subtraction = False # Does the __regex lookup support backreferencing and grouping? supports_regex_backreferencing = True # Can date/datetime lookups be performed using a string? supports_date_lookup_using_string = True # Can datetimes with timezones be used? supports_timezones = True # Does the database have a copy of the zoneinfo database? has_zoneinfo_database = True # When performing a GROUP BY, is an ORDER BY NULL required # to remove any ordering? requires_explicit_null_ordering_when_grouping = False # Does the backend order NULL values as largest or smallest? nulls_order_largest = False # The database's limit on the number of query parameters. max_query_params = None # Can an object have an autoincrement primary key of 0? MySQL says No. allows_auto_pk_0 = True # Do we need to NULL a ForeignKey out, or can the constraint check be # deferred can_defer_constraint_checks = False # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas supports_mixed_date_datetime_comparisons = True # Does the backend support tablespaces? Default to False because it isn't # in the SQL standard. supports_tablespaces = False # Does the backend reset sequences between tests? supports_sequence_reset = True # Can the backend introspect the default value of a column? can_introspect_default = True # Confirm support for introspected foreign keys # Every database can do this reliably, except MySQL, # which can't do it for MyISAM tables can_introspect_foreign_keys = True # Can the backend introspect an AutoField, instead of an IntegerField? can_introspect_autofield = False # Can the backend introspect a BigIntegerField, instead of an IntegerField? can_introspect_big_integer_field = True # Can the backend introspect an BinaryField, instead of an TextField? can_introspect_binary_field = True # Can the backend introspect an DecimalField, instead of an FloatField? can_introspect_decimal_field = True # Can the backend introspect an IPAddressField, instead of an CharField? can_introspect_ip_address_field = False # Can the backend introspect a PositiveIntegerField, instead of an IntegerField? can_introspect_positive_integer_field = False # Can the backend introspect a SmallIntegerField, instead of an IntegerField? can_introspect_small_integer_field = False # Can the backend introspect a TimeField, instead of a DateTimeField? can_introspect_time_field = True # Some backends may not be able to differentiate BooleanField from other # fields such as IntegerField. introspected_boolean_field_type = 'BooleanField' # Can the backend introspect the column order (ASC/DESC) for indexes? supports_index_column_ordering = True # Support for the DISTINCT ON clause can_distinct_on_fields = False # Does the backend decide to commit before SAVEPOINT statements # when autocommit is disabled? http://bugs.python.org/issue8145#msg109965 autocommits_when_autocommit_is_off = False # Does the backend prevent running SQL queries in broken transactions? atomic_transactions = True # Can we roll back DDL in a transaction? can_rollback_ddl = False # Does it support operations requiring references rename in a transaction? supports_atomic_references_rename = True # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? supports_combined_alters = False # Does it support foreign keys? supports_foreign_keys = True # Does it support CHECK constraints? supports_column_check_constraints = True # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) # parameter passing? Note this can be provided by the backend even if not # supported by the Python driver supports_paramstyle_pyformat = True # Does the backend require literal defaults, rather than parameterized ones? requires_literal_defaults = False # Does the backend require a connection reset after each material schema change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? closed_cursor_error_class = ProgrammingError # Does 'a' LIKE 'A' match? has_case_insensitive_like = True # Does the backend require the sqlparse library for splitting multi-line # statements before executing them? requires_sqlparse_for_splitting = True # Suffix for backends that don't support "SELECT xxx;" queries. bare_select_suffix = '' # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False uppercases_column_names = False # Does the backend support "select for update" queries with limit (and offset)? supports_select_for_update_with_limit = True # Does the backend ignore null expressions in GREATEST and LEAST queries unless # every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? # Defaults to False to allow third-party backends to opt-in. can_clone_databases = False # Does the backend consider table names with different casing to # be equal? ignores_table_name_case = False # Place FOR UPDATE right after FROM clause. Used on MSSQL. for_update_after_from = False # Combinatorial flags supports_select_union = True supports_select_intersection = True supports_select_difference = True supports_slicing_ordering_in_compound = False # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate # expressions? supports_aggregate_filter_clause = False # Does the backend support indexing a TextField? supports_index_on_text_field = True # Does the backed support window expressions (expression OVER (...))? supports_over_clause = False # Does the backend support CAST with precision? supports_cast_with_precision = True # SQL to create a procedure for use by the Django test suite. The # functionality of the procedure isn't important. create_test_procedure_without_params_sql = None create_test_procedure_with_int_param_sql = None # Does the backend support keyword parameters for cursor.callproc()? supports_callproc_kwargs = False # Convert CharField results from bytes to str in database functions. db_functions_convert_bytes_to_str = False # What formats does the backend EXPLAIN syntax support? supported_explain_formats = set() # Does DatabaseOperations.explain_query_prefix() raise ValueError if # unknown kwargs are passed to QuerySet.explain()? validates_explain_options = True def __init__(self, connection): self.connection = connection @cached_property def supports_explaining_query_execution(self): """Does this backend support explaining query execution?""" return self.connection.ops.explain_prefix is not None @cached_property def supports_transactions(self): """Confirm support for transactions.""" with self.connection.cursor() as cursor: cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') self.connection.set_autocommit(False) cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)') self.connection.rollback() self.connection.set_autocommit(True) cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST') count, = cursor.fetchone() cursor.execute('DROP TABLE ROLLBACK_TEST') return count == 0 @cached_property def supports_stddev(self): """Confirm support for STDDEV and related stats functions.""" try: self.connection.ops.check_expression_support(StdDev(1)) except NotSupportedError: return False return True
50b5e24c03ebd759d38f40eb7ae4b5f90f143b56fc28fe6233b20b6ae0b68eef
from collections import namedtuple # Structure returned by DatabaseIntrospection.get_table_list() TableInfo = namedtuple('TableInfo', ['name', 'type']) # Structure returned by the DB-API cursor.description interface (PEP 249) FieldInfo = namedtuple('FieldInfo', 'name type_code display_size internal_size precision scale null_ok default') class BaseDatabaseIntrospection: """Encapsulate backend-specific introspection utilities.""" data_types_reverse = {} def __init__(self, connection): self.connection = connection def get_field_type(self, data_type, description): """ Hook for a database backend to use the cursor description to match a Django field type to a database column. For Oracle, the column data_type on its own is insufficient to distinguish between a FloatField and IntegerField, for example. """ return self.data_types_reverse[data_type] def table_name_converter(self, name): """ Apply a conversion to the name for the purposes of comparison. The default table name converter is for case sensitive comparison. """ return name def column_name_converter(self, name): """ Apply a conversion to the column name for the purposes of comparison. Use table_name_converter() by default. """ return self.table_name_converter(name) def table_names(self, cursor=None, include_views=False): """ Return a list of names of all tables that exist in the database. Sort the returned table list by Python's default sorting. Do NOT use the database's ORDER BY here to avoid subtle differences in sorting order between databases. """ def get_names(cursor): return sorted(ti.name for ti in self.get_table_list(cursor) if include_views or ti.type == 't') if cursor is None: with self.connection.cursor() as cursor: return get_names(cursor) return get_names(cursor) def get_table_list(self, cursor): """ Return an unsorted list of TableInfo named tuples of all tables and views that exist in the database. """ raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_table_list() method') def django_table_names(self, only_existing=False, include_views=True): """ Return a list of all table names that have associated Django models and are in INSTALLED_APPS. If only_existing is True, include only the tables in the database. """ from django.apps import apps from django.db import router tables = set() for app_config in apps.get_app_configs(): for model in router.get_migratable_models(app_config, self.connection.alias): if not model._meta.managed: continue tables.add(model._meta.db_table) tables.update( f.m2m_db_table() for f in model._meta.local_many_to_many if f.remote_field.through._meta.managed ) tables = list(tables) if only_existing: existing_tables = self.table_names(include_views=include_views) tables = [ t for t in tables if self.table_name_converter(t) in existing_tables ] return tables def installed_models(self, tables): """ Return a set of all models represented by the provided list of table names. """ from django.apps import apps from django.db import router all_models = [] for app_config in apps.get_app_configs(): all_models.extend(router.get_migratable_models(app_config, self.connection.alias)) tables = list(map(self.table_name_converter, tables)) return { m for m in all_models if self.table_name_converter(m._meta.db_table) in tables } def sequence_list(self): """ Return a list of information about all DB sequences for all models in all apps. """ from django.apps import apps from django.db import router sequence_list = [] with self.connection.cursor() as cursor: for app_config in apps.get_app_configs(): for model in router.get_migratable_models(app_config, self.connection.alias): if not model._meta.managed: continue if model._meta.swapped: continue sequence_list.extend(self.get_sequences(cursor, model._meta.db_table, model._meta.local_fields)) for f in model._meta.local_many_to_many: # If this is an m2m using an intermediate table, # we don't need to reset the sequence. if f.remote_field.through is None: sequence = self.get_sequences(cursor, f.m2m_db_table()) sequence_list.extend(sequence or [{'table': f.m2m_db_table(), 'column': None}]) return sequence_list def get_sequences(self, cursor, table_name, table_fields=()): """ Return a list of introspected sequences for table_name. Each sequence is a dict: {'table': <table_name>, 'column': <column_name>}. An optional 'name' key can be added if the backend supports named sequences. """ raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_sequences() method') def get_key_columns(self, cursor, table_name): """ Backends can override this to return a list of: (column_name, referenced_table_name, referenced_column_name) for all key columns in given table. """ raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_key_columns() method') def get_primary_key_column(self, cursor, table_name): """ Return the name of the primary key column for the given table. """ for constraint in self.get_constraints(cursor, table_name).values(): if constraint['primary_key']: return constraint['columns'][0] return None def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. Return a dict mapping constraint names to their attributes, where attributes is a dict with keys: * columns: List of columns this covers * primary_key: True if primary key, False otherwise * unique: True if this is a unique constraint, False otherwise * foreign_key: (table, column) of target, or None * check: True if check constraint, False otherwise * index: True if index, False otherwise. * orders: The order (ASC/DESC) defined for the columns of indexes * type: The type of the index (btree, hash, etc.) Some backends may return special constraint names that don't exist if they don't name constraints of a certain type (e.g. SQLite) """ raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_constraints() method')
efc0a6f224306d31189e945c3d7804b6db0a83be7462d32b33318426b9771220
import copy import time import warnings from collections import deque from contextlib import contextmanager import _thread import pytz from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import DEFAULT_DB_ALIAS from django.db.backends import utils from django.db.backends.base.validation import BaseDatabaseValidation from django.db.backends.signals import connection_created from django.db.transaction import TransactionManagementError from django.db.utils import DatabaseError, DatabaseErrorWrapper from django.utils import timezone from django.utils.functional import cached_property NO_DB_ALIAS = '__no_db__' class BaseDatabaseWrapper: """Represent a database connection.""" # Mapping of Field objects to their column types. data_types = {} # Mapping of Field objects to their SQL suffix such as AUTOINCREMENT. data_types_suffix = {} # Mapping of Field objects to their SQL for CHECK constraints. data_type_check_constraints = {} ops = None vendor = 'unknown' display_name = 'unknown' SchemaEditorClass = None # Classes instantiated in __init__(). client_class = None creation_class = None features_class = None introspection_class = None ops_class = None validation_class = BaseDatabaseValidation queries_limit = 9000 def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS, allow_thread_sharing=False): # Connection related attributes. # The underlying database connection. self.connection = None # `settings_dict` should be a dictionary containing keys such as # NAME, USER, etc. It's called `settings_dict` instead of `settings` # to disambiguate it from Django settings modules. self.settings_dict = settings_dict self.alias = alias # Query logging in debug mode or when explicitly enabled. self.queries_log = deque(maxlen=self.queries_limit) self.force_debug_cursor = False # Transaction related attributes. # Tracks if the connection is in autocommit mode. Per PEP 249, by # default, it isn't. self.autocommit = False # Tracks if the connection is in a transaction managed by 'atomic'. self.in_atomic_block = False # Increment to generate unique savepoint ids. self.savepoint_state = 0 # List of savepoints created by 'atomic'. self.savepoint_ids = [] # Tracks if the outermost 'atomic' block should commit on exit, # ie. if autocommit was active on entry. self.commit_on_exit = True # Tracks if the transaction should be rolled back to the next # available savepoint because of an exception in an inner block. self.needs_rollback = False # Connection termination related attributes. self.close_at = None self.closed_in_transaction = False self.errors_occurred = False # Thread-safety related attributes. self.allow_thread_sharing = allow_thread_sharing self._thread_ident = _thread.get_ident() # A list of no-argument functions to run when the transaction commits. # Each entry is an (sids, func) tuple, where sids is a set of the # active savepoint IDs when this function was registered. self.run_on_commit = [] # Should we run the on-commit hooks the next time set_autocommit(True) # is called? self.run_commit_hooks_on_set_autocommit_on = False # A stack of wrappers to be invoked around execute()/executemany() # calls. Each entry is a function taking five arguments: execute, sql, # params, many, and context. It's the function's responsibility to # call execute(sql, params, many, context). self.execute_wrappers = [] self.client = self.client_class(self) self.creation = self.creation_class(self) self.features = self.features_class(self) self.introspection = self.introspection_class(self) self.ops = self.ops_class(self) self.validation = self.validation_class(self) def ensure_timezone(self): """ Ensure the connection's timezone is set to `self.timezone_name` and return whether it changed or not. """ return False @cached_property def timezone(self): """ Time zone for datetimes stored as naive values in the database. Return a tzinfo object or None. This is only needed when time zone support is enabled and the database doesn't support time zones. (When the database supports time zones, the adapter handles aware datetimes so Django doesn't need to.) """ if not settings.USE_TZ: return None elif self.features.supports_timezones: return None elif self.settings_dict['TIME_ZONE'] is None: return timezone.utc else: return pytz.timezone(self.settings_dict['TIME_ZONE']) @cached_property def timezone_name(self): """ Name of the time zone of the database connection. """ if not settings.USE_TZ: return settings.TIME_ZONE elif self.settings_dict['TIME_ZONE'] is None: return 'UTC' else: return self.settings_dict['TIME_ZONE'] @property def queries_logged(self): return self.force_debug_cursor or settings.DEBUG @property def queries(self): if len(self.queries_log) == self.queries_log.maxlen: warnings.warn( "Limit for query logging exceeded, only the last {} queries " "will be returned.".format(self.queries_log.maxlen)) return list(self.queries_log) # ##### Backend-specific methods for creating connections and cursors ##### def get_connection_params(self): """Return a dict of parameters suitable for get_new_connection.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method') def get_new_connection(self, conn_params): """Open a connection to the database.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method') def init_connection_state(self): """Initialize the database connection settings.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method') def create_cursor(self, name=None): """Create a cursor. Assume that a connection is established.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method') # ##### Backend-specific methods for creating connections ##### def connect(self): """Connect to the database. Assume that the connection is closed.""" # Check for invalid configurations. self.check_settings() # In case the previous connection was closed while in an atomic block self.in_atomic_block = False self.savepoint_ids = [] self.needs_rollback = False # Reset parameters defining when to close the connection max_age = self.settings_dict['CONN_MAX_AGE'] self.close_at = None if max_age is None else time.time() + max_age self.closed_in_transaction = False self.errors_occurred = False # Establish the connection conn_params = self.get_connection_params() self.connection = self.get_new_connection(conn_params) self.set_autocommit(self.settings_dict['AUTOCOMMIT']) self.init_connection_state() connection_created.send(sender=self.__class__, connection=self) self.run_on_commit = [] def check_settings(self): if self.settings_dict['TIME_ZONE'] is not None: if not settings.USE_TZ: raise ImproperlyConfigured( "Connection '%s' cannot set TIME_ZONE because USE_TZ is " "False." % self.alias) elif self.features.supports_timezones: raise ImproperlyConfigured( "Connection '%s' cannot set TIME_ZONE because its engine " "handles time zones conversions natively." % self.alias) def ensure_connection(self): """Guarantee that a connection to the database is established.""" if self.connection is None: with self.wrap_database_errors: self.connect() # ##### Backend-specific wrappers for PEP-249 connection methods ##### def _prepare_cursor(self, cursor): """ Validate the connection is usable and perform database cursor wrapping. """ self.validate_thread_sharing() if self.queries_logged: wrapped_cursor = self.make_debug_cursor(cursor) else: wrapped_cursor = self.make_cursor(cursor) return wrapped_cursor def _cursor(self, name=None): self.ensure_connection() with self.wrap_database_errors: return self._prepare_cursor(self.create_cursor(name)) def _commit(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.commit() def _rollback(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.rollback() def _close(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.close() # ##### Generic wrappers for PEP-249 connection methods ##### def cursor(self): """Create a cursor, opening a connection if necessary.""" return self._cursor() def commit(self): """Commit a transaction and reset the dirty flag.""" self.validate_thread_sharing() self.validate_no_atomic_block() self._commit() # A successful commit means that the database connection works. self.errors_occurred = False self.run_commit_hooks_on_set_autocommit_on = True def rollback(self): """Roll back a transaction and reset the dirty flag.""" self.validate_thread_sharing() self.validate_no_atomic_block() self._rollback() # A successful rollback means that the database connection works. self.errors_occurred = False self.needs_rollback = False self.run_on_commit = [] def close(self): """Close the connection to the database.""" self.validate_thread_sharing() self.run_on_commit = [] # Don't call validate_no_atomic_block() to avoid making it difficult # to get rid of a connection in an invalid state. The next connect() # will reset the transaction state anyway. if self.closed_in_transaction or self.connection is None: return try: self._close() finally: if self.in_atomic_block: self.closed_in_transaction = True self.needs_rollback = True else: self.connection = None # ##### Backend-specific savepoint management methods ##### def _savepoint(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_create_sql(sid)) def _savepoint_rollback(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_rollback_sql(sid)) def _savepoint_commit(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_commit_sql(sid)) def _savepoint_allowed(self): # Savepoints cannot be created outside a transaction return self.features.uses_savepoints and not self.get_autocommit() # ##### Generic savepoint management methods ##### def savepoint(self): """ Create a savepoint inside the current transaction. Return an identifier for the savepoint that will be used for the subsequent rollback or commit. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return thread_ident = _thread.get_ident() tid = str(thread_ident).replace('-', '') self.savepoint_state += 1 sid = "s%s_x%d" % (tid, self.savepoint_state) self.validate_thread_sharing() self._savepoint(sid) return sid def savepoint_rollback(self, sid): """ Roll back to a savepoint. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return self.validate_thread_sharing() self._savepoint_rollback(sid) # Remove any callbacks registered while this savepoint was active. self.run_on_commit = [ (sids, func) for (sids, func) in self.run_on_commit if sid not in sids ] def savepoint_commit(self, sid): """ Release a savepoint. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return self.validate_thread_sharing() self._savepoint_commit(sid) def clean_savepoints(self): """ Reset the counter used to generate unique savepoint ids in this thread. """ self.savepoint_state = 0 # ##### Backend-specific transaction management methods ##### def _set_autocommit(self, autocommit): """ Backend-specific implementation to enable or disable autocommit. """ raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method') # ##### Generic transaction management methods ##### def get_autocommit(self): """Get the autocommit state.""" self.ensure_connection() return self.autocommit def set_autocommit(self, autocommit, force_begin_transaction_with_broken_autocommit=False): """ Enable or disable autocommit. The usual way to start a transaction is to turn autocommit off. SQLite does not properly start a transaction when disabling autocommit. To avoid this buggy behavior and to actually enter a new transaction, an explcit BEGIN is required. Using force_begin_transaction_with_broken_autocommit=True will issue an explicit BEGIN with SQLite. This option will be ignored for other backends. """ self.validate_no_atomic_block() self.ensure_connection() start_transaction_under_autocommit = ( force_begin_transaction_with_broken_autocommit and not autocommit and self.features.autocommits_when_autocommit_is_off ) if start_transaction_under_autocommit: self._start_transaction_under_autocommit() else: self._set_autocommit(autocommit) self.autocommit = autocommit if autocommit and self.run_commit_hooks_on_set_autocommit_on: self.run_and_clear_commit_hooks() self.run_commit_hooks_on_set_autocommit_on = False def get_rollback(self): """Get the "needs rollback" flag -- for *advanced use* only.""" if not self.in_atomic_block: raise TransactionManagementError( "The rollback flag doesn't work outside of an 'atomic' block.") return self.needs_rollback def set_rollback(self, rollback): """ Set or unset the "needs rollback" flag -- for *advanced use* only. """ if not self.in_atomic_block: raise TransactionManagementError( "The rollback flag doesn't work outside of an 'atomic' block.") self.needs_rollback = rollback def validate_no_atomic_block(self): """Raise an error if an atomic block is active.""" if self.in_atomic_block: raise TransactionManagementError( "This is forbidden when an 'atomic' block is active.") def validate_no_broken_transaction(self): if self.needs_rollback: raise TransactionManagementError( "An error occurred in the current transaction. You can't " "execute queries until the end of the 'atomic' block.") # ##### Foreign key constraints checks handling ##### @contextmanager def constraint_checks_disabled(self): """ Disable foreign key constraint checking. """ disabled = self.disable_constraint_checking() try: yield finally: if disabled: self.enable_constraint_checking() def disable_constraint_checking(self): """ Backends can implement as needed to temporarily disable foreign key constraint checking. Should return True if the constraints were disabled and will need to be reenabled. """ return False def enable_constraint_checking(self): """ Backends can implement as needed to re-enable foreign key constraint checking. """ pass def check_constraints(self, table_names=None): """ Backends can override this method if they can apply constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an IntegrityError if any invalid foreign key references are encountered. """ pass # ##### Connection termination handling ##### def is_usable(self): """ Test if the database connection is usable. This method may assume that self.connection is not None. Actual implementations should take care not to raise exceptions as that may prevent Django from recycling unusable connections. """ raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require an is_usable() method") def close_if_unusable_or_obsolete(self): """ Close the current connection if unrecoverable errors have occurred or if it outlived its maximum age. """ if self.connection is not None: # If the application didn't restore the original autocommit setting, # don't take chances, drop the connection. if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']: self.close() return # If an exception other than DataError or IntegrityError occurred # since the last commit / rollback, check if the connection works. if self.errors_occurred: if self.is_usable(): self.errors_occurred = False else: self.close() return if self.close_at is not None and time.time() >= self.close_at: self.close() return # ##### Thread safety handling ##### def validate_thread_sharing(self): """ Validate that the connection isn't accessed by another thread than the one which originally created it, unless the connection was explicitly authorized to be shared between threads (via the `allow_thread_sharing` property). Raise an exception if the validation fails. """ if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()): raise DatabaseError( "DatabaseWrapper objects created in a " "thread can only be used in that same thread. The object " "with alias '%s' was created in thread id %s and this is " "thread id %s." % (self.alias, self._thread_ident, _thread.get_ident()) ) # ##### Miscellaneous ##### def prepare_database(self): """ Hook to do any database check or preparation, generally called before migrating a project or an app. """ pass @cached_property def wrap_database_errors(self): """ Context manager and decorator that re-throws backend-specific database exceptions using Django's common wrappers. """ return DatabaseErrorWrapper(self) def chunked_cursor(self): """ Return a cursor that tries to avoid caching in the database (if supported by the database), otherwise return a regular cursor. """ return self.cursor() def make_debug_cursor(self, cursor): """Create a cursor that logs all queries in self.queries_log.""" return utils.CursorDebugWrapper(cursor, self) def make_cursor(self, cursor): """Create a cursor without debug logging.""" return utils.CursorWrapper(cursor, self) @contextmanager def temporary_connection(self): """ Context manager that ensures that a connection is established, and if it opened one, closes it to avoid leaving a dangling connection. This is useful for operations outside of the request-response cycle. Provide a cursor: with self.temporary_connection() as cursor: ... """ must_close = self.connection is None try: with self.cursor() as cursor: yield cursor finally: if must_close: self.close() @property def _nodb_connection(self): """ Return an alternative connection to be used when there is no need to access the main database, specifically for test db creation/deletion. This also prevents the production database from being exposed to potential child threads while (or after) the test database is destroyed. Refs #10868, #17786, #16969. """ return self.__class__( {**self.settings_dict, 'NAME': None}, alias=NO_DB_ALIAS, allow_thread_sharing=False, ) def _start_transaction_under_autocommit(self): """ Only required when autocommits_when_autocommit_is_off = True. """ raise NotImplementedError( 'subclasses of BaseDatabaseWrapper may require a ' '_start_transaction_under_autocommit() method' ) def schema_editor(self, *args, **kwargs): """ Return a new instance of this backend's SchemaEditor. """ if self.SchemaEditorClass is None: raise NotImplementedError( 'The SchemaEditorClass attribute of this database wrapper is still None') return self.SchemaEditorClass(self, *args, **kwargs) def on_commit(self, func): if self.in_atomic_block: # Transaction in progress; save for execution on commit. self.run_on_commit.append((set(self.savepoint_ids), func)) elif not self.get_autocommit(): raise TransactionManagementError('on_commit() cannot be used in manual transaction management') else: # No transaction in progress and in autocommit mode; execute # immediately. func() def run_and_clear_commit_hooks(self): self.validate_no_atomic_block() current_run_on_commit = self.run_on_commit self.run_on_commit = [] while current_run_on_commit: sids, func = current_run_on_commit.pop(0) func() @contextmanager def execute_wrapper(self, wrapper): """ Return a context manager under which the wrapper is applied to suitable database query executions. """ self.execute_wrappers.append(wrapper) try: yield finally: self.execute_wrappers.pop() def copy(self, alias=None, allow_thread_sharing=None): """ Return a copy of this connection. For tests that require two connections to the same database. """ settings_dict = copy.deepcopy(self.settings_dict) if alias is None: alias = self.alias if allow_thread_sharing is None: allow_thread_sharing = self.allow_thread_sharing return type(self)(settings_dict, alias, allow_thread_sharing)
ceb28ed05c2a4cffed886d839f2f944cc0971be0cd1817324d713db63dfe5913
import datetime import decimal from importlib import import_module from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import NotSupportedError, transaction from django.db.backends import utils from django.utils import timezone from django.utils.encoding import force_text class BaseDatabaseOperations: """ Encapsulate backend-specific differences, such as the way a backend performs ordering or calculates the ID of a recently-inserted row. """ compiler_module = "django.db.models.sql.compiler" # Integer field safe ranges by `internal_type` as documented # in docs/ref/models/fields.txt. integer_field_ranges = { 'SmallIntegerField': (-32768, 32767), 'IntegerField': (-2147483648, 2147483647), 'BigIntegerField': (-9223372036854775808, 9223372036854775807), 'PositiveSmallIntegerField': (0, 32767), 'PositiveIntegerField': (0, 2147483647), } set_operators = { 'union': 'UNION', 'intersection': 'INTERSECT', 'difference': 'EXCEPT', } # Mapping of Field.get_internal_type() (typically the model field's class # name) to the data type to use for the Cast() function, if different from # DatabaseWrapper.data_types. cast_data_types = {} # CharField data type if the max_length argument isn't provided. cast_char_field_without_max_length = None # Start and end points for window expressions. PRECEDING = 'PRECEDING' FOLLOWING = 'FOLLOWING' UNBOUNDED_PRECEDING = 'UNBOUNDED ' + PRECEDING UNBOUNDED_FOLLOWING = 'UNBOUNDED ' + FOLLOWING CURRENT_ROW = 'CURRENT ROW' # Prefix for EXPLAIN queries, or None EXPLAIN isn't supported. explain_prefix = None def __init__(self, connection): self.connection = connection self._cache = None def autoinc_sql(self, table, column): """ Return any SQL needed to support auto-incrementing primary keys, or None if no SQL is necessary. This SQL is executed when a table is created. """ return None def bulk_batch_size(self, fields, objs): """ Return the maximum allowed batch size for the backend. The fields are the fields going to be inserted in the batch, the objs contains all the objects to be inserted. """ return len(objs) def cache_key_culling_sql(self): """ Return an SQL query that retrieves the first cache key greater than the n smallest. This is used by the 'db' cache backend to determine where to start culling. """ return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" def unification_cast_sql(self, output_field): """ Given a field instance, return the SQL that casts the result of a union to that type. The resulting string should contain a '%s' placeholder for the expression being cast. """ return '%s' def date_extract_sql(self, lookup_type, field_name): """ Given a lookup_type of 'year', 'month', or 'day', return the SQL that extracts a value from the given date field field_name. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method') def date_interval_sql(self, timedelta): """ Implement the date interval functionality for expressions. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method') def date_trunc_sql(self, lookup_type, field_name): """ Given a lookup_type of 'year', 'month', or 'day', return the SQL that truncates the given date field field_name to a date object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_trunc_sql() method.') def datetime_cast_date_sql(self, field_name, tzname): """ Return the SQL to cast a datetime value to date value. """ raise NotImplementedError( 'subclasses of BaseDatabaseOperations may require a ' 'datetime_cast_date_sql() method.' ) def datetime_cast_time_sql(self, field_name, tzname): """ Return the SQL to cast a datetime value to time value. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method') def datetime_extract_sql(self, lookup_type, field_name, tzname): """ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that extracts a value from the given datetime field field_name. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method') def datetime_trunc_sql(self, lookup_type, field_name, tzname): """ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that truncates the given datetime field field_name to a datetime object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method') def time_trunc_sql(self, lookup_type, field_name): """ Given a lookup_type of 'hour', 'minute' or 'second', return the SQL that truncates the given time field field_name to a time object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method') def time_extract_sql(self, lookup_type, field_name): """ Given a lookup_type of 'hour', 'minute', or 'second', return the SQL that extracts a value from the given time field field_name. """ return self.date_extract_sql(lookup_type, field_name) def deferrable_sql(self): """ Return the SQL to make a constraint "initially deferred" during a CREATE TABLE statement. """ return '' def distinct_sql(self, fields, params): """ Return an SQL DISTINCT clause which removes duplicate rows from the result set. If any fields are given, only check the given fields for duplicates. """ if fields: raise NotSupportedError('DISTINCT ON fields is not supported by this database backend') else: return ['DISTINCT'], [] def fetch_returned_insert_id(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table that has an auto-incrementing ID, return the newly created ID. """ return cursor.fetchone()[0] def field_cast_sql(self, db_type, internal_type): """ Given a column type (e.g. 'BLOB', 'VARCHAR') and an internal type (e.g. 'GenericIPAddressField'), return the SQL to cast it before using it in a WHERE statement. The resulting string should contain a '%s' placeholder for the column being searched against. """ return '%s' def force_no_ordering(self): """ Return a list used in the "ORDER BY" clause to force no ordering at all. Return an empty list to include nothing in the ordering. """ return [] def for_update_sql(self, nowait=False, skip_locked=False, of=()): """ Return the FOR UPDATE SQL clause to lock rows for an update operation. """ return 'FOR UPDATE%s%s%s' % ( ' OF %s' % ', '.join(of) if of else '', ' NOWAIT' if nowait else '', ' SKIP LOCKED' if skip_locked else '', ) def _get_limit_offset_params(self, low_mark, high_mark): offset = low_mark or 0 if high_mark is not None: return (high_mark - offset), offset elif offset: return self.connection.ops.no_limit_value(), offset return None, offset def limit_offset_sql(self, low_mark, high_mark): """Return LIMIT/OFFSET SQL clause.""" limit, offset = self._get_limit_offset_params(low_mark, high_mark) return '%s%s' % ( (' LIMIT %d' % limit) if limit else '', (' OFFSET %d' % offset) if offset else '', ) def last_executed_query(self, cursor, sql, params): """ Return a string of the query last executed by the given cursor, with placeholders replaced with actual values. `sql` is the raw query containing placeholders and `params` is the sequence of parameters. These are used by default, but this method exists for database backends to provide a better implementation according to their own quoting schemes. """ # Convert params to contain string values. def to_string(s): return force_text(s, strings_only=True, errors='replace') if isinstance(params, (list, tuple)): u_params = tuple(to_string(val) for val in params) elif params is None: u_params = () else: u_params = {to_string(k): to_string(v) for k, v in params.items()} return "QUERY = %r - PARAMS = %r" % (sql, u_params) def last_insert_id(self, cursor, table_name, pk_name): """ Given a cursor object that has just performed an INSERT statement into a table that has an auto-incrementing ID, return the newly created ID. `pk_name` is the name of the primary-key column. """ return cursor.lastrowid def lookup_cast(self, lookup_type, internal_type=None): """ Return the string to use in a query when performing lookups ("contains", "like", etc.). It should contain a '%s' placeholder for the column being searched against. """ return "%s" def max_in_list_size(self): """ Return the maximum number of items that can be passed in a single 'IN' list condition, or None if the backend does not impose a limit. """ return None def max_name_length(self): """ Return the maximum length of table and column names, or None if there is no limit. """ return None def no_limit_value(self): """ Return the value to use for the LIMIT when we are wanting "LIMIT infinity". Return None if the limit clause can be omitted in this case. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method') def pk_default_value(self): """ Return the value to use during an INSERT statement to specify that the field should use its default value. """ return 'DEFAULT' def prepare_sql_script(self, sql): """ Take an SQL script that may contain multiple lines and return a list of statements to feed to successive cursor.execute() calls. Since few databases are able to process raw SQL scripts in a single cursor.execute() call and PEP 249 doesn't talk about this use case, the default implementation is conservative. """ try: import sqlparse except ImportError: raise ImproperlyConfigured( "The sqlparse package is required if you don't split your SQL " "statements manually." ) else: return [sqlparse.format(statement, strip_comments=True) for statement in sqlparse.split(sql) if statement] def process_clob(self, value): """ Return the value of a CLOB column, for backends that return a locator object that requires additional processing. """ return value def return_insert_id(self): """ For backends that support returning the last insert ID as part of an insert query, return the SQL and params to append to the INSERT query. The returned fragment should contain a format string to hold the appropriate column. """ pass def compiler(self, compiler_name): """ Return the SQLCompiler class corresponding to the given name, in the namespace corresponding to the `compiler_module` attribute on this backend. """ if self._cache is None: self._cache = import_module(self.compiler_module) return getattr(self._cache, compiler_name) def quote_name(self, name): """ Return a quoted version of the given table, index, or column name. Do not quote the given name if it's already been quoted. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method') def random_function_sql(self): """Return an SQL expression that returns a random value.""" return 'RANDOM()' def regex_lookup(self, lookup_type): """ Return the string to use in a query when performing regular expression lookups (using "regex" or "iregex"). It should contain a '%s' placeholder for the column being searched against. If the feature is not supported (or part of it is not supported), raise NotImplementedError. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method') def savepoint_create_sql(self, sid): """ Return the SQL for starting a new savepoint. Only required if the "uses_savepoints" feature is True. The "sid" parameter is a string for the savepoint id. """ return "SAVEPOINT %s" % self.quote_name(sid) def savepoint_commit_sql(self, sid): """ Return the SQL for committing the given savepoint. """ return "RELEASE SAVEPOINT %s" % self.quote_name(sid) def savepoint_rollback_sql(self, sid): """ Return the SQL for rolling back the given savepoint. """ return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid) def set_time_zone_sql(self): """ Return the SQL that will set the connection's time zone. Return '' if the backend doesn't support time zones. """ return '' def sql_flush(self, style, tables, sequences, allow_cascade=False): """ Return a list of SQL statements required to remove all data from the given database tables (without actually removing the tables themselves) and the SQL statements required to reset the sequences passed in `sequences`. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. The `allow_cascade` argument determines whether truncation may cascade to tables with foreign keys pointing the tables being truncated. PostgreSQL requires a cascade even if these tables are empty. """ raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method') def execute_sql_flush(self, using, sql_list): """Execute a list of SQL statements to flush the database.""" with transaction.atomic(using=using, savepoint=self.connection.features.can_rollback_ddl): with self.connection.cursor() as cursor: for sql in sql_list: cursor.execute(sql) def sequence_reset_by_name_sql(self, style, sequences): """ Return a list of the SQL statements required to reset sequences passed in `sequences`. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. """ return [] def sequence_reset_sql(self, style, model_list): """ Return a list of the SQL statements required to reset sequences for the given models. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. """ return [] # No sequence reset required by default. def start_transaction_sql(self): """Return the SQL statement required to start a transaction.""" return "BEGIN;" def end_transaction_sql(self, success=True): """Return the SQL statement required to end a transaction.""" if not success: return "ROLLBACK;" return "COMMIT;" def tablespace_sql(self, tablespace, inline=False): """ Return the SQL that will be used in a query to define the tablespace. Return '' if the backend doesn't support tablespaces. If `inline` is True, append the SQL to a row; otherwise append it to the entire CREATE TABLE or CREATE INDEX statement. """ return '' def prep_for_like_query(self, x): """Prepare a value for use in a LIKE query.""" return str(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_") # Same as prep_for_like_query(), but called for "iexact" matches, which # need not necessarily be implemented using "LIKE" in the backend. prep_for_iexact_query = prep_for_like_query def validate_autopk_value(self, value): """ Certain backends do not accept some values for "serial" fields (for example zero in MySQL). Raise a ValueError if the value is invalid, otherwise return the validated value. """ return value def adapt_unknown_value(self, value): """ Transform a value to something compatible with the backend driver. This method only depends on the type of the value. It's designed for cases where the target type isn't known, such as .raw() SQL queries. As a consequence it may not work perfectly in all circumstances. """ if isinstance(value, datetime.datetime): # must be before date return self.adapt_datetimefield_value(value) elif isinstance(value, datetime.date): return self.adapt_datefield_value(value) elif isinstance(value, datetime.time): return self.adapt_timefield_value(value) elif isinstance(value, decimal.Decimal): return self.adapt_decimalfield_value(value) else: return value def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. """ if value is None: return None return str(value) def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. """ if value is None: return None return str(value) def adapt_timefield_value(self, value): """ Transform a time value to an object compatible with what is expected by the backend driver for time columns. """ if value is None: return None if timezone.is_aware(value): raise ValueError("Django does not support timezone-aware times.") return str(value) def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): """ Transform a decimal.Decimal value to an object compatible with what is expected by the backend driver for decimal (numeric) columns. """ return utils.format_number(value, max_digits, decimal_places) def adapt_ipaddressfield_value(self, value): """ Transform a string representation of an IP address into the expected type for the backend driver. """ return value or None def year_lookup_bounds_for_date_field(self, value): """ Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateField value using a year lookup. `value` is an int, containing the looked-up year. """ first = datetime.date(value, 1, 1) second = datetime.date(value, 12, 31) first = self.adapt_datefield_value(first) second = self.adapt_datefield_value(second) return [first, second] def year_lookup_bounds_for_datetime_field(self, value): """ Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateTimeField value using a year lookup. `value` is an int, containing the looked-up year. """ first = datetime.datetime(value, 1, 1) second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999) if settings.USE_TZ: tz = timezone.get_current_timezone() first = timezone.make_aware(first, tz) second = timezone.make_aware(second, tz) first = self.adapt_datetimefield_value(first) second = self.adapt_datetimefield_value(second) return [first, second] def get_db_converters(self, expression): """ Return a list of functions needed to convert field data. Some field types on some backends do not provide data in the correct format, this is the hook for converter functions. """ return [] def convert_durationfield_value(self, value, expression, connection): if value is not None: return datetime.timedelta(0, 0, value) def check_expression_support(self, expression): """ Check that the backend supports the provided expression. This is used on specific backends to rule out known expressions that have problematic or nonexistent implementations. If the expression has a known problem, the backend should raise NotSupportedError. """ pass def combine_expression(self, connector, sub_expressions): """ Combine a list of subexpressions into a single expression, using the provided connecting operator. This is required because operators can vary between backends (e.g., Oracle with %% and &) and between subexpression types (e.g., date expressions). """ conn = ' %s ' % connector return conn.join(sub_expressions) def combine_duration_expression(self, connector, sub_expressions): return self.combine_expression(connector, sub_expressions) def binary_placeholder_sql(self, value): """ Some backends require special syntax to insert binary content (MySQL for example uses '_binary %s'). """ return '%s' def modify_insert_params(self, placeholder, params): """ Allow modification of insert parameters. Needed for Oracle Spatial backend due to #10888. """ return params def integer_field_range(self, internal_type): """ Given an integer field internal type (e.g. 'PositiveIntegerField'), return a tuple of the (min_value, max_value) form representing the range of the column type bound to the field. """ return self.integer_field_ranges[internal_type] def subtract_temporals(self, internal_type, lhs, rhs): if self.connection.features.supports_temporal_subtraction: lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return "(%s - %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params raise NotSupportedError("This backend does not support %s subtraction." % internal_type) def window_frame_start(self, start): if isinstance(start, int): if start < 0: return '%d %s' % (abs(start), self.PRECEDING) elif start == 0: return self.CURRENT_ROW elif start is None: return self.UNBOUNDED_PRECEDING raise ValueError("start argument must be a negative integer, zero, or None, but got '%s'." % start) def window_frame_end(self, end): if isinstance(end, int): if end == 0: return self.CURRENT_ROW elif end > 0: return '%d %s' % (end, self.FOLLOWING) elif end is None: return self.UNBOUNDED_FOLLOWING raise ValueError("end argument must be a positive integer, zero, or None, but got '%s'." % end) def window_frame_rows_start_end(self, start=None, end=None): """ Return SQL for start and end points in an OVER clause window frame. """ if not self.connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') return self.window_frame_start(start), self.window_frame_end(end) def window_frame_range_start_end(self, start=None, end=None): return self.window_frame_rows_start_end(start, end) def explain_query_prefix(self, format=None, **options): if not self.connection.features.supports_explaining_query_execution: raise NotSupportedError('This backend does not support explaining query execution.') if format: supported_formats = self.connection.features.supported_explain_formats normalized_format = format.upper() if normalized_format not in supported_formats: msg = '%s is not a recognized format.' % normalized_format if supported_formats: msg += ' Allowed formats: %s' % ', '.join(sorted(supported_formats)) raise ValueError(msg) if options: raise ValueError('Unknown options: %s' % ', '.join(sorted(options.keys()))) return self.explain_prefix
754c619c37c2485e2c42bde4e4a94e2c77c6183d20fd6629c883c7c748ee317b
import hashlib import logging from datetime import datetime from django.db.backends.ddl_references import ( Columns, ForeignKeyName, IndexName, Statement, Table, ) from django.db.backends.utils import split_identifier from django.db.models import Index from django.db.transaction import TransactionManagementError, atomic from django.utils import timezone from django.utils.encoding import force_bytes logger = logging.getLogger('django.db.backends.schema') def _is_relevant_relation(relation, altered_field): """ When altering the given field, must constraints on its model from the given relation be temporarily dropped? """ field = relation.field if field.many_to_many: # M2M reverse field return False if altered_field.primary_key and field.to_fields == [None]: # Foreign key constraint on the primary key, which is being altered. return True # Is the constraint targeting the field being altered? return altered_field.name in field.to_fields def _related_non_m2m_objects(old_field, new_field): # Filter out m2m objects from reverse relations. # Return (old_relation, new_relation) tuples. return zip( (obj for obj in old_field.model._meta.related_objects if _is_relevant_relation(obj, old_field)), (obj for obj in new_field.model._meta.related_objects if _is_relevant_relation(obj, new_field)) ) class BaseDatabaseSchemaEditor: """ This class and its subclasses are responsible for emitting schema-changing statements to the databases - model creation/removal/alteration, field renaming, index fiddling, and so on. """ # Overrideable SQL templates sql_create_table = "CREATE TABLE %(table)s (%(definition)s)" sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s" sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s" sql_delete_table = "DROP TABLE %(table)s CASCADE" sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s" sql_alter_column = "ALTER TABLE %(table)s %(changes)s" sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s" sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL" sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL" sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s" sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT" sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE" sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s" sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL" sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)" sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)" sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_create_fk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) " "REFERENCES %(to_table)s (%(to_column)s)%(deferrable)s" ) sql_create_inline_fk = None sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s" sql_delete_index = "DROP INDEX %(name)s" sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_delete_procedure = 'DROP PROCEDURE %(procedure)s' def __init__(self, connection, collect_sql=False, atomic=True): self.connection = connection self.collect_sql = collect_sql if self.collect_sql: self.collected_sql = [] self.atomic_migration = self.connection.features.can_rollback_ddl and atomic # State-managing methods def __enter__(self): self.deferred_sql = [] if self.atomic_migration: self.atomic = atomic(self.connection.alias) self.atomic.__enter__() return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: for sql in self.deferred_sql: self.execute(sql) if self.atomic_migration: self.atomic.__exit__(exc_type, exc_value, traceback) # Core utility functions def execute(self, sql, params=()): """Execute the given SQL statement, with optional parameters.""" # Don't perform the transactional DDL check if SQL is being collected # as it's not going to be executed anyway. if not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl: raise TransactionManagementError( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) # Account for non-string statement objects. sql = str(sql) # Log the command we're running, then run it logger.debug("%s; (params %r)", sql, params, extra={'params': params, 'sql': sql}) if self.collect_sql: ending = "" if sql.endswith(";") else ";" if params is not None: self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending) else: self.collected_sql.append(sql + ending) else: with self.connection.cursor() as cursor: cursor.execute(sql, params) def quote_name(self, name): return self.connection.ops.quote_name(name) @classmethod def _digest(cls, *args): """ Generate a 32-bit digest of a set of arguments that can be used to shorten identifying names. """ h = hashlib.md5() for arg in args: h.update(force_bytes(arg)) return h.hexdigest()[:8] # Field <-> database mapping functions def column_sql(self, model, field, include_default=False): """ Take a field and return its column definition. The field must already have had set_attributes_from_name() called. """ # Get the column's type and use that as the basis of the SQL db_params = field.db_parameters(connection=self.connection) sql = db_params['type'] params = [] # Check for fields that aren't actually columns (e.g. M2M) if sql is None: return None, None # Work out nullability null = field.null # If we were told to include a default value, do so include_default = include_default and not self.skip_default(field) if include_default: default_value = self.effective_default(field) if default_value is not None: if self.connection.features.requires_literal_defaults: # Some databases can't take defaults as a parameter (oracle) # If this is the case, the individual schema backend should # implement prepare_default sql += " DEFAULT %s" % self.prepare_default(default_value) else: sql += " DEFAULT %s" params += [default_value] # Oracle treats the empty string ('') as null, so coerce the null # option whenever '' is a possible value. if (field.empty_strings_allowed and not field.primary_key and self.connection.features.interprets_empty_strings_as_nulls): null = True if null and not self.connection.features.implied_column_null: sql += " NULL" elif not null: sql += " NOT NULL" # Primary key/unique outputs if field.primary_key: sql += " PRIMARY KEY" elif field.unique: sql += " UNIQUE" # Optionally add the tablespace if it's an implicitly indexed column tablespace = field.db_tablespace or model._meta.db_tablespace if tablespace and self.connection.features.supports_tablespaces and field.unique: sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True) # Return the sql return sql, params def skip_default(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob). """ return False def prepare_default(self, value): """ Only used for backends which have requires_literal_defaults feature """ raise NotImplementedError( 'subclasses of BaseDatabaseSchemaEditor for backends which have ' 'requires_literal_defaults must provide a prepare_default() method' ) def effective_default(self, field): """Return a field's effective database default value.""" if field.has_default(): default = field.get_default() elif not field.null and field.blank and field.empty_strings_allowed: if field.get_internal_type() == "BinaryField": default = bytes() else: default = str() elif getattr(field, 'auto_now', False) or getattr(field, 'auto_now_add', False): default = datetime.now() internal_type = field.get_internal_type() if internal_type == 'DateField': default = default.date elif internal_type == 'TimeField': default = default.time elif internal_type == 'DateTimeField': default = timezone.now else: default = None # If it's a callable, call it if callable(default): default = default() # Convert the value so it can be sent to the database. return field.get_db_prep_save(default, self.connection) def quote_value(self, value): """ Return a quoted version of the value so it's safe to use in an SQL string. This is not safe against injection from user code; it is intended only for use in making SQL scripts or preparing default values for particularly tricky backends (defaults are not user-defined, though, so this is safe). """ raise NotImplementedError() # Actions def create_model(self, model): """ Create a table and any accompanying indexes or unique constraints for the given `model`. """ # Create column SQL, add FK deferreds if needed column_sqls = [] params = [] for field in model._meta.local_fields: # SQL definition, extra_params = self.column_sql(model, field) if definition is None: continue # Check constraints can go on the column SQL here db_params = field.db_parameters(connection=self.connection) if db_params['check']: definition += " CHECK (%s)" % db_params['check'] # Autoincrement SQL (for backends with inline variant) col_type_suffix = field.db_type_suffix(connection=self.connection) if col_type_suffix: definition += " %s" % col_type_suffix params.extend(extra_params) # FK if field.remote_field and field.db_constraint: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column if self.sql_create_inline_fk: definition += " " + self.sql_create_inline_fk % { "to_table": self.quote_name(to_table), "to_column": self.quote_name(to_column), } elif self.connection.features.supports_foreign_keys: self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s")) # Add the SQL to our big list column_sqls.append("%s %s" % ( self.quote_name(field.column), definition, )) # Autoincrement SQL (for backends with post table definition variant) if field.get_internal_type() in ("AutoField", "BigAutoField"): autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column) if autoinc_sql: self.deferred_sql.extend(autoinc_sql) # Add any unique_togethers (always deferred, as some fields might be # created afterwards, like geometry fields with some backends) for fields in model._meta.unique_together: columns = [model._meta.get_field(field).column for field in fields] self.deferred_sql.append(self._create_unique_sql(model, columns)) # Make the table sql = self.sql_create_table % { "table": self.quote_name(model._meta.db_table), "definition": ", ".join(column_sqls) } if model._meta.db_tablespace: tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace) if tablespace_sql: sql += ' ' + tablespace_sql # Prevent using [] as params, in the case a literal '%' is used in the definition self.execute(sql, params or None) # Add any field index and index_together's (deferred as SQLite3 _remake_table needs it) self.deferred_sql.extend(self._model_indexes_sql(model)) # Make M2M tables for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.create_model(field.remote_field.through) def delete_model(self, model): """Delete a model from the database.""" # Handle auto-created intermediary models for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # Delete the table self.execute(self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), }) # Remove all deferred statements referencing the deleted table. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_table(model._meta.db_table): self.deferred_sql.remove(sql) def add_index(self, model, index): """Add an index on a model.""" self.execute(index.create_sql(model, self)) def remove_index(self, model, index): """Remove an index from a model.""" self.execute(index.remove_sql(model, self)) def alter_unique_together(self, model, old_unique_together, new_unique_together): """ Deal with a model changing its unique_together. The input unique_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_unique_together} news = {tuple(fields) for fields in new_unique_together} # Deleted uniques for fields in olds.difference(news): self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique) # Created uniques for fields in news.difference(olds): columns = [model._meta.get_field(field).column for field in fields] self.execute(self._create_unique_sql(model, columns)) def alter_index_together(self, model, old_index_together, new_index_together): """ Deal with a model changing its index_together. The input index_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_index_together} news = {tuple(fields) for fields in new_index_together} # Deleted indexes for fields in olds.difference(news): self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index) # Created indexes for field_names in news.difference(olds): fields = [model._meta.get_field(field) for field in field_names] self.execute(self._create_index_sql(model, fields, suffix="_idx")) def _delete_composed_index(self, model, fields, constraint_kwargs, sql): columns = [model._meta.get_field(field).column for field in fields] constraint_names = self._constraint_names(model, columns, **constraint_kwargs) if len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % ( len(constraint_names), model._meta.db_table, ", ".join(columns), )) self.execute(self._delete_constraint_sql(sql, model, constraint_names[0])) def alter_db_table(self, model, old_db_table, new_db_table): """Rename the table a model points to.""" if (old_db_table == new_db_table or (self.connection.features.ignores_table_name_case and old_db_table.lower() == new_db_table.lower())): return self.execute(self.sql_rename_table % { "old_table": self.quote_name(old_db_table), "new_table": self.quote_name(new_db_table), }) # Rename all references to the old table name. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_table_references(old_db_table, new_db_table) def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace): """Move a model's table between tablespaces.""" self.execute(self.sql_retablespace_table % { "table": self.quote_name(model._meta.db_table), "old_tablespace": self.quote_name(old_db_tablespace), "new_tablespace": self.quote_name(new_db_tablespace), }) def add_field(self, model, field): """ Create a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields). """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.create_model(field.remote_field.through) # Get the column's definition definition, params = self.column_sql(model, field, include_default=True) # It might not actually have a column behind it if definition is None: return # Check constraints can go on the column SQL here db_params = field.db_parameters(connection=self.connection) if db_params['check']: definition += " CHECK (%s)" % db_params['check'] # Build the SQL and run it sql = self.sql_create_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), "definition": definition, } self.execute(sql, params) # Drop the default if we need to # (Django usually does not use in-database defaults) if not self.skip_default(field) and self.effective_default(field) is not None: changes_sql, params = self._alter_column_default_sql(model, None, field, drop=True) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Add an index, if required self.deferred_sql.extend(self._field_indexes_sql(model, field)) # Add any FK constraints later if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint: self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s")) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.delete_model(field.remote_field.through) # It might not actually have a column behind it if field.db_parameters(connection=self.connection)['type'] is None: return # Drop any FK constraints, MySQL requires explicit deletion if field.remote_field: fk_names = self._constraint_names(model, [field.column], foreign_key=True) for fk_name in fk_names: self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name)) # Delete the column sql = self.sql_delete_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() # Remove all deferred statements referencing the deleted column. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_column(model._meta.db_table, field.column): self.deferred_sql.remove(sql) def alter_field(self, model, old_field, new_field, strict=False): """ Allow a field's type, uniqueness, nullability, default, column, constraints, etc. to be modified. `old_field` is required to compute the necessary changes. If `strict` is True, raise errors if the old column does not match `old_field` precisely. """ # Ensure this field is even column-based old_db_params = old_field.db_parameters(connection=self.connection) old_type = old_db_params['type'] new_db_params = new_field.db_parameters(connection=self.connection) new_type = new_db_params['type'] if ((old_type is None and old_field.remote_field is None) or (new_type is None and new_field.remote_field is None)): raise ValueError( "Cannot alter field %s into %s - they do not properly define " "db_type (are you using a badly-written custom field?)" % (old_field, new_field), ) elif old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and old_field.remote_field.through._meta.auto_created and new_field.remote_field.through._meta.auto_created): return self._alter_many_to_many(model, old_field, new_field, strict) elif old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and not old_field.remote_field.through._meta.auto_created and not new_field.remote_field.through._meta.auto_created): # Both sides have through models; this is a no-op. return elif old_type is None or new_type is None: raise ValueError( "Cannot alter field %s into %s - they are not compatible types " "(you cannot alter to or from M2M fields, or add or remove " "through= on M2M fields)" % (old_field, new_field) ) self._alter_field(model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict) def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): """Perform a "physical" (non-ManyToMany) field update.""" # Drop any FK constraints, we'll remake them later fks_dropped = set() if old_field.remote_field and old_field.db_constraint: fk_names = self._constraint_names(model, [old_field.column], foreign_key=True) if strict and len(fk_names) != 1: raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % ( len(fk_names), model._meta.db_table, old_field.column, )) for fk_name in fk_names: fks_dropped.add((old_field.column,)) self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name)) # Has unique been removed? if old_field.unique and (not new_field.unique or self._field_became_primary_key(old_field, new_field)): # Find the unique constraint for this field constraint_names = self._constraint_names(model, [old_field.column], unique=True, primary_key=False) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, )) for constraint_name in constraint_names: self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name)) # Drop incoming FK constraints if the field is a primary key or unique, # which might be a to_field target, and things are going to change. drop_foreign_keys = ( ( (old_field.primary_key and new_field.primary_key) or (old_field.unique and new_field.unique) ) and old_type != new_type ) if drop_foreign_keys: # '_meta.related_field' also contains M2M reverse fields, these # will be filtered out for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field): rel_fk_names = self._constraint_names( new_rel.related_model, [new_rel.field.column], foreign_key=True ) for fk_name in rel_fk_names: self.execute(self._delete_constraint_sql(self.sql_delete_fk, new_rel.related_model, fk_name)) # Removed an index? (no strict check, as multiple indexes are possible) # Remove indexes if db_index switched to False or a unique constraint # will now be used in lieu of an index. The following lines from the # truth table show all True cases; the rest are False: # # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique # ------------------------------------------------------------------------------ # True | False | False | False # True | False | False | True # True | False | True | True if old_field.db_index and not old_field.unique and (not new_field.db_index or new_field.unique): # Find the index for this field meta_index_names = {index.name for index in model._meta.indexes} # Retrieve only BTREE indexes since this is what's created with # db_index=True. index_names = self._constraint_names(model, [old_field.column], index=True, type_=Index.suffix) for index_name in index_names: if index_name not in meta_index_names: # The only way to check if an index was created with # db_index=True or with Index(['field'], name='foo') # is to look at its name (refs #28053). self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name)) # Change check constraints? if old_db_params['check'] != new_db_params['check'] and old_db_params['check']: constraint_names = self._constraint_names(model, [old_field.column], check=True) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, )) for constraint_name in constraint_names: self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name)) # Have they renamed the column? if old_field.column != new_field.column: self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type)) # Rename all references to the renamed column. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_column_references(model._meta.db_table, old_field.column, new_field.column) # Next, start accumulating actions to do actions = [] null_actions = [] post_actions = [] # Type change? if old_type != new_type: fragment, other_actions = self._alter_column_type_sql(model, old_field, new_field, new_type) actions.append(fragment) post_actions.extend(other_actions) # When changing a column NULL constraint to NOT NULL with a given # default value, we need to perform 4 steps: # 1. Add a default for new incoming writes # 2. Update existing NULL rows with new default # 3. Replace NULL constraint with NOT NULL # 4. Drop the default again. # Default change? old_default = self.effective_default(old_field) new_default = self.effective_default(new_field) needs_database_default = ( old_field.null and not new_field.null and old_default != new_default and new_default is not None and not self.skip_default(new_field) ) if needs_database_default: actions.append(self._alter_column_default_sql(model, old_field, new_field)) # Nullability change? if old_field.null != new_field.null: fragment = self._alter_column_null_sql(model, old_field, new_field) if fragment: null_actions.append(fragment) # Only if we have a default and there is a change from NULL to NOT NULL four_way_default_alteration = ( new_field.has_default() and (old_field.null and not new_field.null) ) if actions or null_actions: if not four_way_default_alteration: # If we don't have to do a 4-way default alteration we can # directly run a (NOT) NULL alteration actions = actions + null_actions # Combine actions together if we can (e.g. postgres) if self.connection.features.supports_combined_alters and actions: sql, params = tuple(zip(*actions)) actions = [(", ".join(sql), sum(params, []))] # Apply those actions for sql, params in actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if four_way_default_alteration: # Update existing rows with default value self.execute( self.sql_update_with_default % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(new_field.column), "default": "%s", }, [new_default], ) # Since we didn't run a NOT NULL change before we need to do it # now for sql, params in null_actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if post_actions: for sql, params in post_actions: self.execute(sql, params) # If primary_key changed to False, delete the primary key constraint. if old_field.primary_key and not new_field.primary_key: self._delete_primary_key(model, strict) # Added a unique? if self._unique_should_be_added(old_field, new_field): self.execute(self._create_unique_sql(model, [new_field.column])) # Added an index? Add an index if db_index switched to True or a unique # constraint will no longer be used in lieu of an index. The following # lines from the truth table show all True cases; the rest are False: # # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique # ------------------------------------------------------------------------------ # False | False | True | False # False | True | True | False # True | True | True | False if (not old_field.db_index or old_field.unique) and new_field.db_index and not new_field.unique: self.execute(self._create_index_sql(model, [new_field])) # Type alteration on primary key? Then we need to alter the column # referring to us. rels_to_update = [] if old_field.primary_key and new_field.primary_key and old_type != new_type: rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Changed to become primary key? if self._field_became_primary_key(old_field, new_field): # Make the new one self.execute( self.sql_create_pk % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name( self._create_index_name(model._meta.db_table, [new_field.column], suffix="_pk") ), "columns": self.quote_name(new_field.column), } ) # Update all referencing columns rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Handle our type alters on the other end of rels from the PK stuff above for old_rel, new_rel in rels_to_update: rel_db_params = new_rel.field.db_parameters(connection=self.connection) rel_type = rel_db_params['type'] fragment, other_actions = self._alter_column_type_sql( new_rel.related_model, old_rel.field, new_rel.field, rel_type ) self.execute( self.sql_alter_column % { "table": self.quote_name(new_rel.related_model._meta.db_table), "changes": fragment[0], }, fragment[1], ) for sql, params in other_actions: self.execute(sql, params) # Does it have a foreign key? if (new_field.remote_field and (fks_dropped or not old_field.remote_field or not old_field.db_constraint) and new_field.db_constraint): self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s")) # Rebuild FKs that pointed to us if we previously had to drop them if drop_foreign_keys: for rel in new_field.model._meta.related_objects: if _is_relevant_relation(rel, new_field) and rel.field.db_constraint: self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk")) # Does it have check constraints we need to add? if old_db_params['check'] != new_db_params['check'] and new_db_params['check']: self.execute( self.sql_create_check % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name( self._create_index_name(model._meta.db_table, [new_field.column], suffix="_check") ), "column": self.quote_name(new_field.column), "check": new_db_params['check'], } ) # Drop the default if we need to # (Django usually does not use in-database defaults) if needs_database_default: changes_sql, params = self._alter_column_default_sql(model, old_field, new_field, drop=True) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def _alter_column_null_sql(self, model, old_field, new_field): """ Hook to specialize column null alteration. Return a (sql, params) fragment to set a column to null or non-null as required by new_field, or None if no changes are required. """ if (self.connection.features.interprets_empty_strings_as_nulls and new_field.get_internal_type() in ("CharField", "TextField")): # The field is nullable in the database anyway, leave it alone. return else: new_db_params = new_field.db_parameters(connection=self.connection) sql = self.sql_alter_column_null if new_field.null else self.sql_alter_column_not_null return ( sql % { 'column': self.quote_name(new_field.column), 'type': new_db_params['type'], }, [], ) def _alter_column_default_sql(self, model, old_field, new_field, drop=False): """ Hook to specialize column default alteration. Return a (sql, params) fragment to add or drop (depending on the drop argument) a default to new_field's column. """ new_default = self.effective_default(new_field) default = '%s' params = [new_default] if drop: params = [] elif self.connection.features.requires_literal_defaults: # Some databases (Oracle) can't take defaults as a parameter # If this is the case, the SchemaEditor for that database should # implement prepare_default(). default = self.prepare_default(new_default) params = [] new_db_params = new_field.db_parameters(connection=self.connection) sql = self.sql_alter_column_no_default if drop else self.sql_alter_column_default return ( sql % { 'column': self.quote_name(new_field.column), 'type': new_db_params['type'], 'default': default, }, params, ) def _alter_column_type_sql(self, model, old_field, new_field, new_type): """ Hook to specialize column type alteration for different backends, for cases when a creation type is different to an alteration type (e.g. SERIAL in PostgreSQL, PostGIS fields). Return a two-tuple of: an SQL fragment of (sql, params) to insert into an ALTER TABLE statement and a list of extra (sql, params) tuples to run once the field is altered. """ return ( ( self.sql_alter_column_type % { "column": self.quote_name(new_field.column), "type": new_type, }, [], ), [], ) def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" # Rename the through table if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table: self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table) # Repoint the FK to the other side self.alter_field( new_field.remote_field.through, # We need the field that points to the target model, so we can tell alter_field to change it - # this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model) old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()), ) self.alter_field( new_field.remote_field.through, # for self-referential models we need to alter field from the other end too old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()), ) def _create_index_name(self, table_name, column_names, suffix=""): """ Generate a unique name for an index/unique constraint. The name is divided into 3 parts: the table name, the column names, and a unique digest and suffix. """ _, table_name = split_identifier(table_name) hash_suffix_part = '%s%s' % (self._digest(table_name, *column_names), suffix) max_length = self.connection.ops.max_name_length() or 200 # If everything fits into max_length, use that name. index_name = '%s_%s_%s' % (table_name, '_'.join(column_names), hash_suffix_part) if len(index_name) <= max_length: return index_name # Shorten a long suffix. if len(hash_suffix_part) > max_length / 3: hash_suffix_part = hash_suffix_part[:max_length // 3] other_length = (max_length - len(hash_suffix_part)) // 2 - 1 index_name = '%s_%s_%s' % ( table_name[:other_length], '_'.join(column_names)[:other_length], hash_suffix_part, ) # Prepend D if needed to prevent the name from starting with an # underscore or a number (not permitted on Oracle). if index_name[0] == "_" or index_name[0].isdigit(): index_name = "D%s" % index_name[:-1] return index_name def _get_index_tablespace_sql(self, model, fields, db_tablespace=None): if db_tablespace is None: if len(fields) == 1 and fields[0].db_tablespace: db_tablespace = fields[0].db_tablespace elif model._meta.db_tablespace: db_tablespace = model._meta.db_tablespace if db_tablespace is not None: return ' ' + self.connection.ops.tablespace_sql(db_tablespace) return '' def _create_index_sql(self, model, fields, *, name=None, suffix='', using='', db_tablespace=None, col_suffixes=(), sql=None): """ Return the SQL statement to create the index for one or several fields. `sql` can be specified if the syntax differs from the standard (GIS indexes, ...). """ tablespace_sql = self._get_index_tablespace_sql(model, fields, db_tablespace=db_tablespace) columns = [field.column for field in fields] sql_create_index = sql or self.sql_create_index table = model._meta.db_table def create_index_name(*args, **kwargs): nonlocal name if name is None: name = self._create_index_name(*args, **kwargs) return self.quote_name(name) return Statement( sql_create_index, table=Table(table, self.quote_name), name=IndexName(table, columns, suffix, create_index_name), using=using, columns=Columns(table, columns, self.quote_name, col_suffixes=col_suffixes), extra=tablespace_sql, ) def _model_indexes_sql(self, model): """ Return a list of all index SQL statements (field indexes, index_together, Meta.indexes) for the specified model. """ if not model._meta.managed or model._meta.proxy or model._meta.swapped: return [] output = [] for field in model._meta.local_fields: output.extend(self._field_indexes_sql(model, field)) for field_names in model._meta.index_together: fields = [model._meta.get_field(field) for field in field_names] output.append(self._create_index_sql(model, fields, suffix="_idx")) for index in model._meta.indexes: output.append(index.create_sql(model, self)) return output def _field_indexes_sql(self, model, field): """ Return a list of all index SQL statements for the specified field. """ output = [] if self._field_should_be_indexed(model, field): output.append(self._create_index_sql(model, [field])) return output def _field_should_be_indexed(self, model, field): return field.db_index and not field.unique def _field_became_primary_key(self, old_field, new_field): return not old_field.primary_key and new_field.primary_key def _unique_should_be_added(self, old_field, new_field): return (not old_field.unique and new_field.unique) or ( old_field.primary_key and not new_field.primary_key and new_field.unique ) def _rename_field_sql(self, table, old_field, new_field, new_type): return self.sql_rename_column % { "table": self.quote_name(table), "old_column": self.quote_name(old_field.column), "new_column": self.quote_name(new_field.column), "type": new_type, } def _create_fk_sql(self, model, field, suffix): from_table = model._meta.db_table from_column = field.column _, to_table = split_identifier(field.target_field.model._meta.db_table) to_column = field.target_field.column def create_fk_name(*args, **kwargs): return self.quote_name(self._create_index_name(*args, **kwargs)) return Statement( self.sql_create_fk, table=Table(from_table, self.quote_name), name=ForeignKeyName(from_table, [from_column], to_table, [to_column], suffix, create_fk_name), column=Columns(from_table, [from_column], self.quote_name), to_table=Table(field.target_field.model._meta.db_table, self.quote_name), to_column=Columns(field.target_field.model._meta.db_table, [to_column], self.quote_name), deferrable=self.connection.ops.deferrable_sql(), ) def _create_unique_sql(self, model, columns): table = model._meta.db_table return Statement( self.sql_create_unique, table=Table(table, self.quote_name), name=IndexName(table, columns, '_uniq', self._create_index_name), columns=Columns(table, columns, self.quote_name), ) def _delete_constraint_sql(self, template, model, name): return template % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name(name), } def _constraint_names(self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None, type_=None): """Return all constraint names matching the columns and conditions.""" if column_names is not None: column_names = [ self.connection.introspection.column_name_converter(name) for name in column_names ] with self.connection.cursor() as cursor: constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table) result = [] for name, infodict in constraints.items(): if column_names is None or column_names == infodict['columns']: if unique is not None and infodict['unique'] != unique: continue if primary_key is not None and infodict['primary_key'] != primary_key: continue if index is not None and infodict['index'] != index: continue if check is not None and infodict['check'] != check: continue if foreign_key is not None and not infodict['foreign_key']: continue if type_ is not None and infodict['type'] != type_: continue result.append(name) return result def _delete_primary_key(self, model, strict=False): constraint_names = self._constraint_names(model, primary_key=True) if strict and len(constraint_names) != 1: raise ValueError('Found wrong number (%s) of PK constraints for %s' % ( len(constraint_names), model._meta.db_table, )) for constraint_name in constraint_names: self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name)) def remove_procedure(self, procedure_name, param_types=()): sql = self.sql_delete_procedure % { 'procedure': self.quote_name(procedure_name), 'param_types': ','.join(param_types), } self.execute(sql)
afa75c461639b4fc7d9730d985b7030b5cfd2c3757c517bfbed9206104a6b842
class BaseDatabaseClient: """Encapsulate backend-specific methods for opening a client shell.""" # This should be a string representing the name of the executable # (e.g., "psql"). Subclasses must override this. executable_name = None def __init__(self, connection): # connection is an instance of BaseDatabaseWrapper. self.connection = connection def runshell(self): raise NotImplementedError('subclasses of BaseDatabaseClient must provide a runshell() method')
381e0c72e049da6a2f4caee5982b36ae7e9451f887b5427ed43bcef49dda31d4
import sys from io import StringIO from django.apps import apps from django.conf import settings from django.core import serializers from django.db import router # The prefix to put on the default database name when creating # the test database. TEST_DATABASE_PREFIX = 'test_' class BaseDatabaseCreation: """ Encapsulate backend-specific differences pertaining to creation and destruction of the test database. """ def __init__(self, connection): self.connection = connection @property def _nodb_connection(self): """ Used to be defined here, now moved to DatabaseWrapper. """ return self.connection._nodb_connection def create_test_db(self, verbosity=1, autoclobber=False, serialize=True, keepdb=False): """ Create a test database, prompting the user for confirmation if the database already exists. Return the name of the test database created. """ # Don't import django.core.management if it isn't needed. from django.core.management import call_command test_database_name = self._get_test_db_name() if verbosity >= 1: action = 'Creating' if keepdb: action = "Using existing" print("%s test database for alias %s..." % ( action, self._get_database_display_str(verbosity, test_database_name), )) # We could skip this call if keepdb is True, but we instead # give it the keepdb param. This is to handle the case # where the test DB doesn't exist, in which case we need to # create it, then just not destroy it. If we instead skip # this, we will get an exception. self._create_test_db(verbosity, autoclobber, keepdb) self.connection.close() settings.DATABASES[self.connection.alias]["NAME"] = test_database_name self.connection.settings_dict["NAME"] = test_database_name # We report migrate messages at one level lower than that requested. # This ensures we don't get flooded with messages during testing # (unless you really ask to be flooded). call_command( 'migrate', verbosity=max(verbosity - 1, 0), interactive=False, database=self.connection.alias, run_syncdb=True, ) # We then serialize the current state of the database into a string # and store it on the connection. This slightly horrific process is so people # who are testing on databases without transactions or who are using # a TransactionTestCase still get a clean database on every test run. if serialize: self.connection._test_serialized_contents = self.serialize_db_to_string() call_command('createcachetable', database=self.connection.alias) # Ensure a connection for the side effect of initializing the test database. self.connection.ensure_connection() return test_database_name def set_as_test_mirror(self, primary_settings_dict): """ Set this database up to be used in testing as a mirror of a primary database whose settings are given. """ self.connection.settings_dict['NAME'] = primary_settings_dict['NAME'] def serialize_db_to_string(self): """ Serialize all data in the database into a JSON string. Designed only for test runner usage; will not handle large amounts of data. """ # Build list of all apps to serialize from django.db.migrations.loader import MigrationLoader loader = MigrationLoader(self.connection) app_list = [] for app_config in apps.get_app_configs(): if ( app_config.models_module is not None and app_config.label in loader.migrated_apps and app_config.name not in settings.TEST_NON_SERIALIZED_APPS ): app_list.append((app_config, None)) # Make a function to iteratively return every object def get_objects(): for model in serializers.sort_dependencies(app_list): if (model._meta.can_migrate(self.connection) and router.allow_migrate_model(self.connection.alias, model)): queryset = model._default_manager.using(self.connection.alias).order_by(model._meta.pk.name) yield from queryset.iterator() # Serialize to a string out = StringIO() serializers.serialize("json", get_objects(), indent=None, stream=out) return out.getvalue() def deserialize_db_from_string(self, data): """ Reload the database with data from a string generated by the serialize_db_to_string() method. """ data = StringIO(data) for obj in serializers.deserialize("json", data, using=self.connection.alias): obj.save() def _get_database_display_str(self, verbosity, database_name): """ Return display string for a database for use in various actions. """ return "'%s'%s" % ( self.connection.alias, (" ('%s')" % database_name) if verbosity >= 2 else '', ) def _get_test_db_name(self): """ Internal implementation - return the name of the test DB that will be created. Only useful when called from create_test_db() and _create_test_db() and when no external munging is done with the 'NAME' settings. """ if self.connection.settings_dict['TEST']['NAME']: return self.connection.settings_dict['TEST']['NAME'] return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] def _execute_create_test_db(self, cursor, parameters, keepdb=False): cursor.execute('CREATE DATABASE %(dbname)s %(suffix)s' % parameters) def _create_test_db(self, verbosity, autoclobber, keepdb=False): """ Internal implementation - create the test db tables. """ test_database_name = self._get_test_db_name() test_db_params = { 'dbname': self.connection.ops.quote_name(test_database_name), 'suffix': self.sql_table_creation_suffix(), } # Create the test database and connect to it. with self._nodb_connection.cursor() as cursor: try: self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception as e: # if we want to keep the db, then no need to do any of the below, # just return and skip it all. if keepdb: return test_database_name sys.stderr.write( "Got an error creating the test database: %s\n" % e) if not autoclobber: confirm = input( "Type 'yes' if you would like to try deleting the test " "database '%s', or 'no' to cancel: " % test_database_name) if autoclobber or confirm == 'yes': try: if verbosity >= 1: print("Destroying old test database for alias %s..." % ( self._get_database_display_str(verbosity, test_database_name), )) cursor.execute('DROP DATABASE %(dbname)s' % test_db_params) self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception as e: sys.stderr.write( "Got an error recreating the test database: %s\n" % e) sys.exit(2) else: print("Tests cancelled.") sys.exit(1) return test_database_name def clone_test_db(self, suffix, verbosity=1, autoclobber=False, keepdb=False): """ Clone a test database. """ source_database_name = self.connection.settings_dict['NAME'] if verbosity >= 1: action = 'Cloning test database' if keepdb: action = 'Using existing clone' print("%s for alias %s..." % ( action, self._get_database_display_str(verbosity, source_database_name), )) # We could skip this call if keepdb is True, but we instead # give it the keepdb param. See create_test_db for details. self._clone_test_db(suffix, verbosity, keepdb) def get_test_db_clone_settings(self, suffix): """ Return a modified connection settings dict for the n-th clone of a DB. """ # When this function is called, the test database has been created # already and its name has been copied to settings_dict['NAME'] so # we don't need to call _get_test_db_name. orig_settings_dict = self.connection.settings_dict return {**orig_settings_dict, 'NAME': '{}_{}'.format(orig_settings_dict['NAME'], suffix)} def _clone_test_db(self, suffix, verbosity, keepdb=False): """ Internal implementation - duplicate the test db tables. """ raise NotImplementedError( "The database backend doesn't support cloning databases. " "Disable the option to run tests in parallel processes.") def destroy_test_db(self, old_database_name=None, verbosity=1, keepdb=False, suffix=None): """ Destroy a test database, prompting the user for confirmation if the database already exists. """ self.connection.close() if suffix is None: test_database_name = self.connection.settings_dict['NAME'] else: test_database_name = self.get_test_db_clone_settings(suffix)['NAME'] if verbosity >= 1: action = 'Destroying' if keepdb: action = 'Preserving' print("%s test database for alias %s..." % ( action, self._get_database_display_str(verbosity, test_database_name), )) # if we want to preserve the database # skip the actual destroying piece. if not keepdb: self._destroy_test_db(test_database_name, verbosity) # Restore the original database name if old_database_name is not None: settings.DATABASES[self.connection.alias]["NAME"] = old_database_name self.connection.settings_dict["NAME"] = old_database_name def _destroy_test_db(self, test_database_name, verbosity): """ Internal implementation - remove the test db tables. """ # Remove the test database to clean up after # ourselves. Connect to the previous database (not the test database) # to do so, because it's not allowed to delete a database while being # connected to it. with self.connection._nodb_connection.cursor() as cursor: cursor.execute("DROP DATABASE %s" % self.connection.ops.quote_name(test_database_name)) def sql_table_creation_suffix(self): """ SQL to append to the end of the test table creation statements. """ return '' def test_db_signature(self): """ Return a tuple with elements of self.connection.settings_dict (a DATABASES setting value) that uniquely identify a database accordingly to the RDBMS particularities. """ settings_dict = self.connection.settings_dict return ( settings_dict['HOST'], settings_dict['PORT'], settings_dict['ENGINE'], self._get_test_db_name(), )
0b430bb6d7f4286114f62d0dd63ad7c29ad1812574b93eea2f812ef72965a514
from django.core import checks from django.db.backends.base.validation import BaseDatabaseValidation from django.utils.version import get_docs_version class DatabaseValidation(BaseDatabaseValidation): def check(self, **kwargs): issues = super().check(**kwargs) issues.extend(self._check_sql_mode(**kwargs)) return issues def _check_sql_mode(self, **kwargs): with self.connection.cursor() as cursor: cursor.execute("SELECT @@sql_mode") sql_mode = cursor.fetchone() modes = set(sql_mode[0].split(',') if sql_mode else ()) if not (modes & {'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES'}): return [checks.Warning( "MySQL Strict Mode is not set for database connection '%s'" % self.connection.alias, hint="MySQL's Strict Mode fixes many data integrity problems in MySQL, " "such as data truncation upon insertion, by escalating warnings into " "errors. It is strongly recommended you activate it. See: " "https://docs.djangoproject.com/en/%s/ref/databases/#mysql-sql-mode" % (get_docs_version(),), id='mysql.W002', )] return [] def check_field_type(self, field, field_type): """ MySQL has the following field length restriction: No character (varchar) fields can have a length exceeding 255 characters if they have a unique index on them. MySQL doesn't support a database index on some data types. """ errors = [] if (field_type.startswith('varchar') and field.unique and (field.max_length is None or int(field.max_length) > 255)): errors.append( checks.Error( 'MySQL does not allow unique CharFields to have a max_length > 255.', obj=field, id='mysql.E001', ) ) if field.db_index and field_type.lower() in self.connection._limited_data_types: errors.append( checks.Warning( 'MySQL does not support a database index on %s columns.' % field_type, hint=( "An index won't be created. Silence this warning if " "you don't care about it." ), obj=field, id='fields.W162', ) ) return errors
62b674356aa9607aea8cbdfbf5b339a16accd4d51ed1962742666311bd393685
from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () update_can_self_select = False allows_group_by_pk = True related_fields_match_type = True # MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME. allow_sliced_subqueries_with_in = False has_select_for_update = True supports_forward_references = False supports_regex_backreferencing = False supports_date_lookup_using_string = False can_introspect_autofield = True can_introspect_binary_field = False can_introspect_small_integer_field = True can_introspect_positive_integer_field = True introspected_boolean_field_type = 'IntegerField' supports_index_column_ordering = False supports_timezones = False requires_explicit_null_ordering_when_grouping = True allows_auto_pk_0 = False uses_savepoints = True can_release_savepoints = True atomic_transactions = False supports_column_check_constraints = False can_clone_databases = True supports_temporal_subtraction = True supports_select_intersection = False supports_select_difference = False supports_slicing_ordering_in_compound = True supports_index_on_text_field = False has_case_insensitive_like = False create_test_procedure_without_params_sql = """ CREATE PROCEDURE test_procedure () BEGIN DECLARE V_I INTEGER; SET V_I = 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE test_procedure (P_I INTEGER) BEGIN DECLARE V_I INTEGER; SET V_I = P_I; END; """ db_functions_convert_bytes_to_str = True # Alias MySQL's TRADITIONAL to TEXT for consistency with other backends. supported_explain_formats = {'JSON', 'TEXT', 'TRADITIONAL'} @cached_property def _mysql_storage_engine(self): "Internal method used in Django tests. Don't rely on this from your code" with self.connection.cursor() as cursor: cursor.execute("SELECT ENGINE FROM INFORMATION_SCHEMA.ENGINES WHERE SUPPORT = 'DEFAULT'") result = cursor.fetchone() return result[0] @cached_property def can_introspect_foreign_keys(self): "Confirm support for introspected foreign keys" return self._mysql_storage_engine != 'MyISAM' @cached_property def has_zoneinfo_database(self): # Test if the time zone definitions are installed. with self.connection.cursor() as cursor: cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1") return cursor.fetchone() is not None @cached_property def is_sql_auto_is_null_enabled(self): with self.connection.cursor() as cursor: cursor.execute('SELECT @@SQL_AUTO_IS_NULL') result = cursor.fetchone() return result and result[0] == 1 @cached_property def supports_over_clause(self): return self.connection.mysql_version >= (8, 0, 2) @cached_property def has_select_for_update_skip_locked(self): return self.connection.mysql_version >= (8, 0, 1) has_select_for_update_nowait = has_select_for_update_skip_locked @cached_property def needs_explain_extended(self): # EXTENDED is deprecated (and not required) in 5.7 and removed in 8.0. return self.connection.mysql_version < (5, 7) @cached_property def supports_transactions(self): """ All storage engines except MyISAM support transactions. """ return self._mysql_storage_engine != 'MyISAM' @cached_property def ignores_table_name_case(self): with self.connection.cursor() as cursor: cursor.execute('SELECT @@LOWER_CASE_TABLE_NAMES') result = cursor.fetchone() return result and result[0] != 0
995bd1e4981b7822b2881056de3acb1339f1d4f553285bb4feaeccdeac7fa3ed
from collections import namedtuple from MySQLdb.constants import FIELD_TYPE from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo, TableInfo, ) from django.db.models.indexes import Index from django.utils.datastructures import OrderedSet FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('extra', 'is_unsigned')) InfoLine = namedtuple('InfoLine', 'col_name data_type max_len num_prec num_scale extra column_default is_unsigned') class DatabaseIntrospection(BaseDatabaseIntrospection): data_types_reverse = { FIELD_TYPE.BLOB: 'TextField', FIELD_TYPE.CHAR: 'CharField', FIELD_TYPE.DECIMAL: 'DecimalField', FIELD_TYPE.NEWDECIMAL: 'DecimalField', FIELD_TYPE.DATE: 'DateField', FIELD_TYPE.DATETIME: 'DateTimeField', FIELD_TYPE.DOUBLE: 'FloatField', FIELD_TYPE.FLOAT: 'FloatField', FIELD_TYPE.INT24: 'IntegerField', FIELD_TYPE.LONG: 'IntegerField', FIELD_TYPE.LONGLONG: 'BigIntegerField', FIELD_TYPE.SHORT: 'SmallIntegerField', FIELD_TYPE.STRING: 'CharField', FIELD_TYPE.TIME: 'TimeField', FIELD_TYPE.TIMESTAMP: 'DateTimeField', FIELD_TYPE.TINY: 'IntegerField', FIELD_TYPE.TINY_BLOB: 'TextField', FIELD_TYPE.MEDIUM_BLOB: 'TextField', FIELD_TYPE.LONG_BLOB: 'TextField', FIELD_TYPE.VAR_STRING: 'CharField', } def get_field_type(self, data_type, description): field_type = super().get_field_type(data_type, description) if 'auto_increment' in description.extra: if field_type == 'IntegerField': return 'AutoField' elif field_type == 'BigIntegerField': return 'BigAutoField' if description.is_unsigned: if field_type == 'IntegerField': return 'PositiveIntegerField' elif field_type == 'SmallIntegerField': return 'PositiveSmallIntegerField' return field_type def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" cursor.execute("SHOW FULL TABLES") return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1])) for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface." """ # information_schema database gives more accurate results for some figures: # - varchar length returned by cursor.description is an internal length, # not visible length (#5725) # - precision and scale (for decimal fields) (#5014) # - auto_increment is not available in cursor.description cursor.execute(""" SELECT column_name, data_type, character_maximum_length, numeric_precision, numeric_scale, extra, column_default, CASE WHEN column_type LIKE '%% unsigned' THEN 1 ELSE 0 END AS is_unsigned FROM information_schema.columns WHERE table_name = %s AND table_schema = DATABASE()""", [table_name]) field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()} cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)) def to_int(i): return int(i) if i is not None else i fields = [] for line in cursor.description: info = field_info[line[0]] fields.append(FieldInfo( *line[:3], to_int(info.max_len) or line[3], to_int(info.num_prec) or line[4], to_int(info.num_scale) or line[5], line[6], info.column_default, info.extra, info.is_unsigned, )) return fields def get_sequences(self, cursor, table_name, table_fields=()): for field_info in self.get_table_description(cursor, table_name): if 'auto_increment' in field_info.extra: # MySQL allows only one auto-increment column per table. return [{'table': table_name, 'column': field_info.name}] return [] def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all relationships to the given table. """ constraints = self.get_key_columns(cursor, table_name) relations = {} for my_fieldname, other_table, other_field in constraints: relations[my_fieldname] = (other_field, other_table) return relations def get_key_columns(self, cursor, table_name): """ Return a list of (column_name, referenced_table_name, referenced_column_name) for all key columns in the given table. """ key_columns = [] cursor.execute(""" SELECT column_name, referenced_table_name, referenced_column_name FROM information_schema.key_column_usage WHERE table_name = %s AND table_schema = DATABASE() AND referenced_table_name IS NOT NULL AND referenced_column_name IS NOT NULL""", [table_name]) key_columns.extend(cursor.fetchall()) return key_columns def get_storage_engine(self, cursor, table_name): """ Retrieve the storage engine for a given table. Return the default storage engine if the table doesn't exist. """ cursor.execute( "SELECT engine " "FROM information_schema.tables " "WHERE table_name = %s", [table_name]) result = cursor.fetchone() if not result: return self.connection.features._mysql_storage_engine return result[0] def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. """ constraints = {} # Get the actual constraint names and columns name_query = """ SELECT kc.`constraint_name`, kc.`column_name`, kc.`referenced_table_name`, kc.`referenced_column_name` FROM information_schema.key_column_usage AS kc WHERE kc.table_schema = DATABASE() AND kc.table_name = %s """ cursor.execute(name_query, [table_name]) for constraint, column, ref_table, ref_column in cursor.fetchall(): if constraint not in constraints: constraints[constraint] = { 'columns': OrderedSet(), 'primary_key': False, 'unique': False, 'index': False, 'check': False, 'foreign_key': (ref_table, ref_column) if ref_column else None, } constraints[constraint]['columns'].add(column) # Now get the constraint types type_query = """ SELECT c.constraint_name, c.constraint_type FROM information_schema.table_constraints AS c WHERE c.table_schema = DATABASE() AND c.table_name = %s """ cursor.execute(type_query, [table_name]) for constraint, kind in cursor.fetchall(): if kind.lower() == "primary key": constraints[constraint]['primary_key'] = True constraints[constraint]['unique'] = True elif kind.lower() == "unique": constraints[constraint]['unique'] = True # Now add in the indexes cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name)) for table, non_unique, index, colseq, column, type_ in [x[:5] + (x[10],) for x in cursor.fetchall()]: if index not in constraints: constraints[index] = { 'columns': OrderedSet(), 'primary_key': False, 'unique': False, 'check': False, 'foreign_key': None, } constraints[index]['index'] = True constraints[index]['type'] = Index.suffix if type_ == 'BTREE' else type_.lower() constraints[index]['columns'].add(column) # Convert the sorted sets to lists for constraint in constraints.values(): constraint['columns'] = list(constraint['columns']) return constraints
277d1b5a02ef89a59f524b5e80ded89d79a9e8268467abbc55eba23780d49381
from django.db.models.sql import compiler class SQLCompiler(compiler.SQLCompiler): def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name sql, params = self.as_sql() return '(%s) IN (%s)' % (', '.join('%s.%s' % (qn(alias), qn2(column)) for column in columns), sql), params class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler): pass class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler): pass class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler): pass class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler): pass
a399abdbfc8599c737f4932235f1df62eebe30ccf98677cd1316b70b947739a3
""" MySQL database backend for Django. Requires mysqlclient: https://pypi.org/project/mysqlclient/ """ import re from django.core.exceptions import ImproperlyConfigured from django.db import utils from django.db.backends import utils as backend_utils from django.db.backends.base.base import BaseDatabaseWrapper from django.utils.functional import cached_property try: import MySQLdb as Database except ImportError as err: raise ImproperlyConfigured( 'Error loading MySQLdb module.\n' 'Did you install mysqlclient?' ) from err from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip from MySQLdb.converters import conversions # isort:skip # Some of these import MySQLdb, so import them after checking if it's installed. from .client import DatabaseClient # isort:skip from .creation import DatabaseCreation # isort:skip from .features import DatabaseFeatures # isort:skip from .introspection import DatabaseIntrospection # isort:skip from .operations import DatabaseOperations # isort:skip from .schema import DatabaseSchemaEditor # isort:skip from .validation import DatabaseValidation # isort:skip version = Database.version_info if version < (1, 3, 7): raise ImproperlyConfigured('mysqlclient 1.3.7 or newer is required; you have %s.' % Database.__version__) # MySQLdb returns TIME columns as timedelta -- they are more like timedelta in # terms of actual behavior as they are signed and include days -- and Django # expects time. django_conversions = { **conversions, **{FIELD_TYPE.TIME: backend_utils.typecast_time}, } # This should match the numerical portion of the version numbers (we can treat # versions like 5.0.24 and 5.0.24a as the same). server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})') class CursorWrapper: """ A thin wrapper around MySQLdb's normal cursor class that catches particular exception instances and reraises them with the correct types. Implemented as a wrapper, rather than a subclass, so that it isn't stuck to the particular underlying representation returned by Connection.cursor(). """ codes_for_integrityerror = ( 1048, # Column cannot be null 1690, # BIGINT UNSIGNED value is out of range ) def __init__(self, cursor): self.cursor = cursor def execute(self, query, args=None): try: # args is None means no string interpolation return self.cursor.execute(query, args) except Database.OperationalError as e: # Map some error codes to IntegrityError, since they seem to be # misclassified and Django would prefer the more logical place. if e.args[0] in self.codes_for_integrityerror: raise utils.IntegrityError(*tuple(e.args)) raise def executemany(self, query, args): try: return self.cursor.executemany(query, args) except Database.OperationalError as e: # Map some error codes to IntegrityError, since they seem to be # misclassified and Django would prefer the more logical place. if e.args[0] in self.codes_for_integrityerror: raise utils.IntegrityError(*tuple(e.args)) raise def __getattr__(self, attr): return getattr(self.cursor, attr) def __iter__(self): return iter(self.cursor) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'mysql' display_name = 'MySQL' # This dictionary maps Field objects to their associated MySQL column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. data_types = { 'AutoField': 'integer AUTO_INCREMENT', 'BigAutoField': 'bigint AUTO_INCREMENT', 'BinaryField': 'longblob', 'BooleanField': 'bool', 'CharField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'datetime(6)', 'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)', 'DurationField': 'bigint', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'double precision', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'char(15)', 'GenericIPAddressField': 'char(39)', 'NullBooleanField': 'bool', 'OneToOneField': 'integer', 'PositiveIntegerField': 'integer UNSIGNED', 'PositiveSmallIntegerField': 'smallint UNSIGNED', 'SlugField': 'varchar(%(max_length)s)', 'SmallIntegerField': 'smallint', 'TextField': 'longtext', 'TimeField': 'time(6)', 'UUIDField': 'char(32)', } # For these columns, MySQL doesn't: # - accept default values and implicitly treats these columns as nullable # - support a database index _limited_data_types = ( 'tinyblob', 'blob', 'mediumblob', 'longblob', 'tinytext', 'text', 'mediumtext', 'longtext', 'json', ) operators = { 'exact': '= %s', 'iexact': 'LIKE %s', 'contains': 'LIKE BINARY %s', 'icontains': 'LIKE %s', 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': 'LIKE BINARY %s', 'endswith': 'LIKE BINARY %s', 'istartswith': 'LIKE %s', 'iendswith': 'LIKE %s', } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { 'contains': "LIKE BINARY CONCAT('%%', {}, '%%')", 'icontains': "LIKE CONCAT('%%', {}, '%%')", 'startswith': "LIKE BINARY CONCAT({}, '%%')", 'istartswith': "LIKE CONCAT({}, '%%')", 'endswith': "LIKE BINARY CONCAT('%%', {})", 'iendswith': "LIKE CONCAT('%%', {})", } isolation_levels = { 'read uncommitted', 'read committed', 'repeatable read', 'serializable', } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations validation_class = DatabaseValidation def get_connection_params(self): kwargs = { 'conv': django_conversions, 'charset': 'utf8', } settings_dict = self.settings_dict if settings_dict['USER']: kwargs['user'] = settings_dict['USER'] if settings_dict['NAME']: kwargs['db'] = settings_dict['NAME'] if settings_dict['PASSWORD']: kwargs['passwd'] = settings_dict['PASSWORD'] if settings_dict['HOST'].startswith('/'): kwargs['unix_socket'] = settings_dict['HOST'] elif settings_dict['HOST']: kwargs['host'] = settings_dict['HOST'] if settings_dict['PORT']: kwargs['port'] = int(settings_dict['PORT']) # We need the number of potentially affected rows after an # "UPDATE", not the number of changed rows. kwargs['client_flag'] = CLIENT.FOUND_ROWS # Validate the transaction isolation level, if specified. options = settings_dict['OPTIONS'].copy() isolation_level = options.pop('isolation_level', 'read committed') if isolation_level: isolation_level = isolation_level.lower() if isolation_level not in self.isolation_levels: raise ImproperlyConfigured( "Invalid transaction isolation level '%s' specified.\n" "Use one of %s, or None." % ( isolation_level, ', '.join("'%s'" % s for s in sorted(self.isolation_levels)) )) self.isolation_level = isolation_level kwargs.update(options) return kwargs def get_new_connection(self, conn_params): return Database.connect(**conn_params) def init_connection_state(self): assignments = [] if self.features.is_sql_auto_is_null_enabled: # SQL_AUTO_IS_NULL controls whether an AUTO_INCREMENT column on # a recently inserted row will return when the field is tested # for NULL. Disabling this brings this aspect of MySQL in line # with SQL standards. assignments.append('SET SQL_AUTO_IS_NULL = 0') if self.isolation_level: assignments.append('SET SESSION TRANSACTION ISOLATION LEVEL %s' % self.isolation_level.upper()) if assignments: with self.cursor() as cursor: cursor.execute('; '.join(assignments)) def create_cursor(self, name=None): cursor = self.connection.cursor() return CursorWrapper(cursor) def _rollback(self): try: BaseDatabaseWrapper._rollback(self) except Database.NotSupportedError: pass def _set_autocommit(self, autocommit): with self.wrap_database_errors: self.connection.autocommit(autocommit) def disable_constraint_checking(self): """ Disable foreign key checks, primarily for use in adding rows with forward references. Always return True to indicate constraint checks need to be re-enabled. """ self.cursor().execute('SET foreign_key_checks=0') return True def enable_constraint_checking(self): """ Re-enable foreign key checks after they have been disabled. """ # Override needs_rollback in case constraint_checks_disabled is # nested inside transaction.atomic. self.needs_rollback, needs_rollback = False, self.needs_rollback try: self.cursor().execute('SET foreign_key_checks=1') finally: self.needs_rollback = needs_rollback def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) if not primary_key_column_name: continue key_columns = self.introspection.get_key_columns(cursor, table_name) for column_name, referenced_table_name, referenced_column_name in key_columns: cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise utils.IntegrityError( "The row in table '%s' with primary key '%s' has an invalid " "foreign key: %s.%s contains a value '%s' that does not " "have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): try: self.connection.ping() except Database.Error: return False else: return True @cached_property def mysql_version(self): with self.temporary_connection() as cursor: cursor.execute('SELECT VERSION()') server_info = cursor.fetchone()[0] match = server_version_re.match(server_info) if not match: raise Exception('Unable to determine MySQL version from version string %r' % server_info) return tuple(int(x) for x in match.groups())
7023440ee1f25eb15ba20c58e1f74a6101794ec80d85850f6200666887c6db36
import uuid from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations from django.utils import timezone from django.utils.duration import duration_microseconds from django.utils.encoding import force_text class DatabaseOperations(BaseDatabaseOperations): compiler_module = "django.db.backends.mysql.compiler" # MySQL stores positive fields as UNSIGNED ints. integer_field_ranges = { **BaseDatabaseOperations.integer_field_ranges, 'PositiveSmallIntegerField': (0, 65535), 'PositiveIntegerField': (0, 4294967295), } cast_data_types = { 'CharField': 'char(%(max_length)s)', 'TextField': 'char', 'IntegerField': 'signed integer', 'BigIntegerField': 'signed integer', 'SmallIntegerField': 'signed integer', 'PositiveIntegerField': 'unsigned integer', 'PositiveSmallIntegerField': 'unsigned integer', } cast_char_field_without_max_length = 'char' explain_prefix = 'EXPLAIN' def date_extract_sql(self, lookup_type, field_name): # http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html if lookup_type == 'week_day': # DAYOFWEEK() returns an integer, 1-7, Sunday=1. # Note: WEEKDAY() returns 0-6, Monday=0. return "DAYOFWEEK(%s)" % field_name elif lookup_type == 'week': # Override the value of default_week_format for consistency with # other database backends. # Mode 3: Monday, 1-53, with 4 or more days this year. return "WEEK(%s, 3)" % field_name else: # EXTRACT returns 1-53 based on ISO-8601 for the week number. return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): fields = { 'year': '%%Y-01-01', 'month': '%%Y-%%m-01', } # Use double percents to escape. if lookup_type in fields: format_str = fields[lookup_type] return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str) elif lookup_type == 'quarter': return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % ( field_name, field_name ) elif lookup_type == 'week': return "DATE_SUB(%s, INTERVAL WEEKDAY(%s) DAY)" % ( field_name, field_name ) else: return "DATE(%s)" % (field_name) def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ: field_name = "CONVERT_TZ(%s, 'UTC', '%s')" % (field_name, tzname) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return "DATE(%s)" % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return "TIME(%s)" % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) fields = ['year', 'month', 'day', 'hour', 'minute', 'second'] format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape. format_def = ('0000-', '01', '-01', ' 00:', '00', ':00') if lookup_type == 'quarter': return ( "CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + " "INTERVAL QUARTER({field_name}) QUARTER - " + "INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)" ).format(field_name=field_name) if lookup_type == 'week': return ( "CAST(DATE_FORMAT(DATE_SUB({field_name}, " "INTERVAL WEEKDAY({field_name}) DAY), " "'%%Y-%%m-%%d 00:00:00') AS DATETIME)" ).format(field_name=field_name) try: i = fields.index(lookup_type) + 1 except ValueError: sql = field_name else: format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]]) sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str) return sql def time_trunc_sql(self, lookup_type, field_name): fields = { 'hour': '%%H:00:00', 'minute': '%%H:%%i:00', 'second': '%%H:%%i:%%s', } # Use double percents to escape. if lookup_type in fields: format_str = fields[lookup_type] return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str) else: return "TIME(%s)" % (field_name) def date_interval_sql(self, timedelta): return 'INTERVAL %s MICROSECOND' % duration_microseconds(timedelta) def format_for_duration_arithmetic(self, sql): return 'INTERVAL %s MICROSECOND' % sql def force_no_ordering(self): """ "ORDER BY NULL" prevents MySQL from implicitly ordering by grouped columns. If no ordering would otherwise be applied, we don't want any implicit sorting going on. """ return [(None, ("NULL", [], False))] def last_executed_query(self, cursor, sql, params): # With MySQLdb, cursor objects have an (undocumented) "_last_executed" # attribute where the exact query sent to the database is saved. # See MySQLdb/cursors.py in the source distribution. return force_text(getattr(cursor, '_last_executed', None), errors='replace') def no_limit_value(self): # 2**64 - 1, as recommended by the MySQL documentation return 18446744073709551615 def quote_name(self, name): if name.startswith("`") and name.endswith("`"): return name # Quoting once is enough. return "`%s`" % name def random_function_sql(self): return 'RAND()' def sql_flush(self, style, tables, sequences, allow_cascade=False): # NB: The generated SQL below is specific to MySQL # 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements # to clear all tables of all data if tables: sql = ['SET FOREIGN_KEY_CHECKS = 0;'] for table in tables: sql.append('%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table)), )) sql.append('SET FOREIGN_KEY_CHECKS = 1;') sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def validate_autopk_value(self, value): # MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653. if value == 0: raise ValueError('The database backend does not accept 0 as a ' 'value for AutoField.') return value def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # MySQL doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.") return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # MySQL doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("MySQL backend does not support timezone-aware times.") return str(value) def max_name_length(self): return 64 def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def combine_expression(self, connector, sub_expressions): if connector == '^': return 'POW(%s)' % ','.join(sub_expressions) # Convert the result to a signed integer since MySQL's binary operators # return an unsigned integer. elif connector in ('&', '|', '<<'): return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions) elif connector == '>>': lhs, rhs = sub_expressions return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} return super().combine_expression(connector, sub_expressions) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'TextField': converters.append(self.convert_textfield_value) elif internal_type in ['BooleanField', 'NullBooleanField']: converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) return converters def convert_textfield_value(self, value, expression, connection): if value is not None: value = force_text(value) return value def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def binary_placeholder_sql(self, value): return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s' def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs if internal_type == 'TimeField': return ( "((TIME_TO_SEC(%(lhs)s) * 1000000 + MICROSECOND(%(lhs)s)) -" " (TIME_TO_SEC(%(rhs)s) * 1000000 + MICROSECOND(%(rhs)s)))" ) % {'lhs': lhs_sql, 'rhs': rhs_sql}, lhs_params * 2 + rhs_params * 2 else: return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), rhs_params + lhs_params def explain_query_prefix(self, format=None, **options): # Alias MySQL's TRADITIONAL to TEXT for consistency with other backends. if format and format.upper() == 'TEXT': format = 'TRADITIONAL' prefix = super().explain_query_prefix(format, **options) if format: prefix += ' FORMAT=%s' % format if self.connection.features.needs_explain_extended and format is None: # EXTENDED and FORMAT are mutually exclusive options. prefix += ' EXTENDED' return prefix def regex_lookup(self, lookup_type): # REGEXP BINARY doesn't work correctly in MySQL 8+ and REGEXP_LIKE # doesn't exist in MySQL 5.6. if self.connection.mysql_version < (8, 0, 0): if lookup_type == 'regex': return '%s REGEXP BINARY %s' return '%s REGEXP %s' match_option = 'c' if lookup_type == 'regex' else 'i' return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option
d3ece18d8adabd84c1c390b10d084ac8785b4c88c101889e7ce2900c0e6ea783
from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.models import NOT_PROVIDED class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s" sql_alter_column_null = "MODIFY %(column)s %(type)s NULL" sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL" sql_alter_column_type = "MODIFY %(column)s %(type)s" # No 'CASCADE' which works as a no-op in MySQL but is undocumented sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s" sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s" sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s" sql_delete_index = "DROP INDEX %(name)s ON %(table)s" sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY" def quote_value(self, value): self.connection.ensure_connection() quoted = self.connection.connection.escape(value, self.connection.connection.encoders) if isinstance(value, str): quoted = quoted.decode() return quoted def _is_limited_data_type(self, field): db_type = field.db_type(self.connection) return db_type is not None and db_type.lower() in self.connection._limited_data_types def skip_default(self, field): return self._is_limited_data_type(field) def add_field(self, model, field): super().add_field(model, field) # Simulate the effect of a one-off default. # field.default may be unhashable, so a set isn't used for "in" check. if self.skip_default(field) and field.default not in (None, NOT_PROVIDED): effective_default = self.effective_default(field) self.execute('UPDATE %(table)s SET %(column)s = %%s' % { 'table': self.quote_name(model._meta.db_table), 'column': self.quote_name(field.column), }, [effective_default]) def _field_should_be_indexed(self, model, field): create_index = super()._field_should_be_indexed(model, field) storage = self.connection.introspection.get_storage_engine( self.connection.cursor(), model._meta.db_table ) # No need to create an index for ForeignKey fields except if # db_constraint=False because the index from that constraint won't be # created. if (storage == "InnoDB" and create_index and field.get_internal_type() == 'ForeignKey' and field.db_constraint): return False return not self._is_limited_data_type(field) and create_index def _delete_composed_index(self, model, fields, *args): """ MySQL can remove an implicit FK index on a field when that field is covered by another index like a unique_together. "covered" here means that the more complex index starts like the simpler one. http://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757 We check here before removing the [unique|index]_together if we have to recreate a FK index. """ first_field = model._meta.get_field(fields[0]) if first_field.get_internal_type() == 'ForeignKey': constraint_names = self._constraint_names(model, [first_field.column], index=True) if not constraint_names: self.execute(self._create_index_sql(model, [first_field], suffix="")) return super()._delete_composed_index(model, fields, *args) def _set_field_new_type_null_status(self, field, new_type): """ Keep the null property of the old field. If it has changed, it will be handled separately. """ if field.null: new_type += " NULL" else: new_type += " NOT NULL" return new_type def _alter_column_type_sql(self, model, old_field, new_field, new_type): new_type = self._set_field_new_type_null_status(old_field, new_type) return super()._alter_column_type_sql(model, old_field, new_field, new_type) def _rename_field_sql(self, table, old_field, new_field, new_type): new_type = self._set_field_new_type_null_status(old_field, new_type) return super()._rename_field_sql(table, old_field, new_field, new_type)
befb75fb07af61e411e099a496b999fe7ad4ad774c6805d0395ee3e365d9c783
import subprocess from django.db.backends.base.client import BaseDatabaseClient class DatabaseClient(BaseDatabaseClient): executable_name = 'mysql' @classmethod def settings_to_cmd_args(cls, settings_dict): args = [cls.executable_name] db = settings_dict['OPTIONS'].get('db', settings_dict['NAME']) user = settings_dict['OPTIONS'].get('user', settings_dict['USER']) passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD']) host = settings_dict['OPTIONS'].get('host', settings_dict['HOST']) port = settings_dict['OPTIONS'].get('port', settings_dict['PORT']) server_ca = settings_dict['OPTIONS'].get('ssl', {}).get('ca') client_cert = settings_dict['OPTIONS'].get('ssl', {}).get('cert') client_key = settings_dict['OPTIONS'].get('ssl', {}).get('key') defaults_file = settings_dict['OPTIONS'].get('read_default_file') # Seems to be no good way to set sql_mode with CLI. if defaults_file: args += ["--defaults-file=%s" % defaults_file] if user: args += ["--user=%s" % user] if passwd: args += ["--password=%s" % passwd] if host: if '/' in host: args += ["--socket=%s" % host] else: args += ["--host=%s" % host] if port: args += ["--port=%s" % port] if server_ca: args += ["--ssl-ca=%s" % server_ca] if client_cert: args += ["--ssl-cert=%s" % client_cert] if client_key: args += ["--ssl-key=%s" % client_key] if db: args += [db] return args def runshell(self): args = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict) subprocess.check_call(args)
bc5850db361b1ab0bf4eb21cef005c6e1fe2edaa460feb74646d21def0ff2094
import subprocess import sys from django.db.backends.base.creation import BaseDatabaseCreation from .client import DatabaseClient class DatabaseCreation(BaseDatabaseCreation): def sql_table_creation_suffix(self): suffix = [] test_settings = self.connection.settings_dict['TEST'] if test_settings['CHARSET']: suffix.append('CHARACTER SET %s' % test_settings['CHARSET']) if test_settings['COLLATION']: suffix.append('COLLATE %s' % test_settings['COLLATION']) return ' '.join(suffix) def _execute_create_test_db(self, cursor, parameters, keepdb=False): try: if keepdb: # If the database should be kept, add "IF NOT EXISTS" to avoid # "database exists" error, also temporarily disable "database # exists" warning. cursor.execute(''' SET @_tmp_sql_notes := @@sql_notes, sql_notes = 0; CREATE DATABASE IF NOT EXISTS %(dbname)s %(suffix)s; SET sql_notes = @_tmp_sql_notes; ''' % parameters) else: super()._execute_create_test_db(cursor, parameters, keepdb) except Exception as e: if len(e.args) < 1 or e.args[0] != 1007: # All errors except "database exists" (1007) cancel tests. sys.stderr.write('Got an error creating the test database: %s\n' % e) sys.exit(2) else: raise e def _clone_test_db(self, suffix, verbosity, keepdb=False): source_database_name = self.connection.settings_dict['NAME'] target_database_name = self.get_test_db_clone_settings(suffix)['NAME'] test_db_params = { 'dbname': self.connection.ops.quote_name(target_database_name), 'suffix': self.sql_table_creation_suffix(), } with self._nodb_connection.cursor() as cursor: try: self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception: try: if verbosity >= 1: print("Destroying old test database for alias %s..." % ( self._get_database_display_str(verbosity, target_database_name), )) cursor.execute('DROP DATABASE %(dbname)s' % test_db_params) self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception as e: sys.stderr.write("Got an error recreating the test database: %s\n" % e) sys.exit(2) dump_cmd = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict) dump_cmd[0] = 'mysqldump' dump_cmd[-1] = source_database_name load_cmd = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict) load_cmd[-1] = target_database_name dump_proc = subprocess.Popen(dump_cmd, stdout=subprocess.PIPE) load_proc = subprocess.Popen(load_cmd, stdin=dump_proc.stdout, stdout=subprocess.PIPE) dump_proc.stdout.close() # allow dump_proc to receive a SIGPIPE if load_proc exits. load_proc.communicate()
9bf6474f2762a31df83d76468fd01b6e98d99e2f545b8e1e2afcff54d6cced48
from django.db.backends.base.features import BaseDatabaseFeatures class DummyDatabaseFeatures(BaseDatabaseFeatures): supports_transactions = False
66c07f84a396f6db9a35b66deb87c663ab64d3f16a32217bdaba7c4c4dcdac30
""" Dummy database backend for Django. Django uses this if the database ENGINE setting is empty (None or empty string). Each of these API functions, except connection.close(), raise ImproperlyConfigured. """ from django.core.exceptions import ImproperlyConfigured from django.db.backends.base.base import BaseDatabaseWrapper from django.db.backends.base.client import BaseDatabaseClient from django.db.backends.base.creation import BaseDatabaseCreation from django.db.backends.base.introspection import BaseDatabaseIntrospection from django.db.backends.base.operations import BaseDatabaseOperations from django.db.backends.dummy.features import DummyDatabaseFeatures def complain(*args, **kwargs): raise ImproperlyConfigured("settings.DATABASES is improperly configured. " "Please supply the ENGINE value. Check " "settings documentation for more details.") def ignore(*args, **kwargs): pass class DatabaseOperations(BaseDatabaseOperations): quote_name = complain class DatabaseClient(BaseDatabaseClient): runshell = complain class DatabaseCreation(BaseDatabaseCreation): create_test_db = ignore destroy_test_db = ignore class DatabaseIntrospection(BaseDatabaseIntrospection): get_table_list = complain get_table_description = complain get_relations = complain get_indexes = complain get_key_columns = complain class DatabaseWrapper(BaseDatabaseWrapper): operators = {} # Override the base class implementations with null # implementations. Anything that tries to actually # do something raises complain; anything that tries # to rollback or undo something raises ignore. _cursor = complain ensure_connection = complain _commit = complain _rollback = ignore _close = ignore _savepoint = ignore _savepoint_commit = complain _savepoint_rollback = ignore _set_autocommit = complain # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DummyDatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations def is_usable(self): return True
e7010c7bf663f921b1439005d3a487ee084cb9a983ff2edd76f27d2c1a99c610
from ..postgresql.features import * # NOQA
ba18ca2418a08644f357425e15826c61e7678b8c3923f37d2e92300da3e63ac1
from ..postgresql.introspection import * # NOQA
e6fe22c24f5dfc434f40c7630df431bcf29394e5f17da5ac9da47eaa008f31c3
import warnings from django.utils.deprecation import RemovedInDjango30Warning warnings.warn( "The django.db.backends.postgresql_psycopg2 module is deprecated in " "favor of django.db.backends.postgresql.", RemovedInDjango30Warning, stacklevel=2 )
836680ced7ccb402fddc5f2d1a63b55b66106e6afec39b26fce6e8a62e091d7e
from ..postgresql.base import * # NOQA
c6420e517fab2c8e988d32ad1076042bcf7085ed89d53f8528d551056d896e1e
from ..postgresql.operations import * # NOQA
47edd3d5e3aadbec83eba2f6fbe2029ed3c5d99b0b67e3a4e13578146c721b97
from ..postgresql.schema import * # NOQA
2110a61ad8414d184ed91546772bb158b268c007391f5ffed5acf0cebbc9f808
from ..postgresql.client import * # NOQA
7a778db396b3ebf434ef1d2af7dd46ed900162013e620320f7e5e4df92d3ecf3
from ..postgresql.utils import * # NOQA
2b6997dee2ab60a3eeeac9cf6ab58e52974f13b9e3325edde45ffdad64c4332e
from ..postgresql.creation import * # NOQA
4d282e8a084e050653323d0ff27ee6cc62f471926864d0b205afc43ce1d3ec5c
from django.db.backends.base.features import BaseDatabaseFeatures from django.db.utils import InterfaceError from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): allows_group_by_selected_pks = True can_return_id_from_insert = True can_return_ids_from_bulk_insert = True has_real_datatype = True has_native_uuid_field = True has_native_duration_field = True can_defer_constraint_checks = True has_select_for_update = True has_select_for_update_nowait = True has_select_for_update_of = True uses_savepoints = True can_release_savepoints = True supports_tablespaces = True supports_transactions = True can_introspect_autofield = True can_introspect_ip_address_field = True can_introspect_small_integer_field = True can_distinct_on_fields = True can_rollback_ddl = True supports_combined_alters = True nulls_order_largest = True closed_cursor_error_class = InterfaceError has_case_insensitive_like = False requires_sqlparse_for_splitting = False greatest_least_ignores_nulls = True can_clone_databases = True supports_temporal_subtraction = True supports_slicing_ordering_in_compound = True create_test_procedure_without_params_sql = """ CREATE FUNCTION test_procedure () RETURNS void AS $$ DECLARE V_I INTEGER; BEGIN V_I := 1; END; $$ LANGUAGE plpgsql;""" create_test_procedure_with_int_param_sql = """ CREATE FUNCTION test_procedure (P_I INTEGER) RETURNS void AS $$ DECLARE V_I INTEGER; BEGIN V_I := P_I; END; $$ LANGUAGE plpgsql;""" supports_over_clause = True supports_aggregate_filter_clause = True supported_explain_formats = {'JSON', 'TEXT', 'XML', 'YAML'} validates_explain_options = False # A query will error on invalid options. @cached_property def is_postgresql_9_5(self): return self.connection.pg_version >= 90500 has_select_for_update_skip_locked = is_postgresql_9_5 has_brin_index_support = is_postgresql_9_5 has_jsonb_agg = is_postgresql_9_5 has_gin_pending_list_limit = is_postgresql_9_5
54f4ddad97581100c1ca07ff07117907874a62cf49e4d6e5bc332b7158a42cc6
from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo, TableInfo, ) from django.db.models.indexes import Index class DatabaseIntrospection(BaseDatabaseIntrospection): # Maps type codes to Django Field types. data_types_reverse = { 16: 'BooleanField', 17: 'BinaryField', 20: 'BigIntegerField', 21: 'SmallIntegerField', 23: 'IntegerField', 25: 'TextField', 700: 'FloatField', 701: 'FloatField', 869: 'GenericIPAddressField', 1042: 'CharField', # blank-padded 1043: 'CharField', 1082: 'DateField', 1083: 'TimeField', 1114: 'DateTimeField', 1184: 'DateTimeField', 1266: 'TimeField', 1700: 'DecimalField', 2950: 'UUIDField', } ignored_tables = [] def get_field_type(self, data_type, description): field_type = super().get_field_type(data_type, description) if description.default and 'nextval' in description.default: if field_type == 'IntegerField': return 'AutoField' elif field_type == 'BigIntegerField': return 'BigAutoField' return field_type def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" cursor.execute(""" SELECT c.relname, c.relkind FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r', 'v') AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND pg_catalog.pg_table_is_visible(c.oid)""") return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1])) for row in cursor.fetchall() if row[0] not in self.ignored_tables] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface. """ # As cursor.description does not return reliably the nullable property, # we have to query the information_schema (#7783) cursor.execute(""" SELECT column_name, is_nullable, column_default FROM information_schema.columns WHERE table_name = %s""", [table_name]) field_map = {line[0]: line[1:] for line in cursor.fetchall()} cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)) return [ FieldInfo(*line[0:6], field_map[line.name][0] == 'YES', field_map[line.name][1]) for line in cursor.description ] def get_sequences(self, cursor, table_name, table_fields=()): sequences = [] cursor.execute(""" SELECT s.relname as sequence_name, col.attname FROM pg_class s JOIN pg_namespace sn ON sn.oid = s.relnamespace JOIN pg_depend d ON d.refobjid = s.oid AND d.refclassid='pg_class'::regclass JOIN pg_attrdef ad ON ad.oid = d.objid AND d.classid = 'pg_attrdef'::regclass JOIN pg_attribute col ON col.attrelid = ad.adrelid AND col.attnum = ad.adnum JOIN pg_class tbl ON tbl.oid = ad.adrelid JOIN pg_namespace n ON n.oid = tbl.relnamespace WHERE s.relkind = 'S' AND d.deptype in ('a', 'n') AND n.nspname = 'public' AND tbl.relname = %s """, [table_name]) for row in cursor.fetchall(): sequences.append({'name': row[0], 'table': table_name, 'column': row[1]}) return sequences def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all relationships to the given table. """ cursor.execute(""" SELECT c2.relname, a1.attname, a2.attname FROM pg_constraint con LEFT JOIN pg_class c1 ON con.conrelid = c1.oid LEFT JOIN pg_class c2 ON con.confrelid = c2.oid LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1] LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1] WHERE c1.relname = %s AND con.contype = 'f'""", [table_name]) relations = {} for row in cursor.fetchall(): relations[row[1]] = (row[2], row[0]) return relations def get_key_columns(self, cursor, table_name): key_columns = [] cursor.execute(""" SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column FROM information_schema.constraint_column_usage ccu LEFT JOIN information_schema.key_column_usage kcu ON ccu.constraint_catalog = kcu.constraint_catalog AND ccu.constraint_schema = kcu.constraint_schema AND ccu.constraint_name = kcu.constraint_name LEFT JOIN information_schema.table_constraints tc ON ccu.constraint_catalog = tc.constraint_catalog AND ccu.constraint_schema = tc.constraint_schema AND ccu.constraint_name = tc.constraint_name WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name]) key_columns.extend(cursor.fetchall()) return key_columns def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. Also retrieve the definition of expression-based indexes. """ constraints = {} # Loop over the key table, collecting things as constraints. The column # array must return column names in the same order in which they were # created. cursor.execute(""" SELECT c.conname, array( SELECT attname FROM unnest(c.conkey) WITH ORDINALITY cols(colid, arridx) JOIN pg_attribute AS ca ON cols.colid = ca.attnum WHERE ca.attrelid = c.conrelid ORDER BY cols.arridx ), c.contype, (SELECT fkc.relname || '.' || fka.attname FROM pg_attribute AS fka JOIN pg_class AS fkc ON fka.attrelid = fkc.oid WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]), cl.reloptions FROM pg_constraint AS c JOIN pg_class AS cl ON c.conrelid = cl.oid JOIN pg_namespace AS ns ON cl.relnamespace = ns.oid WHERE ns.nspname = %s AND cl.relname = %s """, ["public", table_name]) for constraint, columns, kind, used_cols, options in cursor.fetchall(): constraints[constraint] = { "columns": columns, "primary_key": kind == "p", "unique": kind in ["p", "u"], "foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None, "check": kind == "c", "index": False, "definition": None, "options": options, } # Now get indexes cursor.execute(""" SELECT indexname, array_agg(attname ORDER BY arridx), indisunique, indisprimary, array_agg(ordering ORDER BY arridx), amname, exprdef, s2.attoptions FROM ( SELECT c2.relname as indexname, idx.*, attr.attname, am.amname, CASE WHEN idx.indexprs IS NOT NULL THEN pg_get_indexdef(idx.indexrelid) END AS exprdef, CASE am.amname WHEN 'btree' THEN CASE (option & 1) WHEN 1 THEN 'DESC' ELSE 'ASC' END END as ordering, c2.reloptions as attoptions FROM ( SELECT * FROM pg_index i, unnest(i.indkey, i.indoption) WITH ORDINALITY koi(key, option, arridx) ) idx LEFT JOIN pg_class c ON idx.indrelid = c.oid LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid LEFT JOIN pg_am am ON c2.relam = am.oid LEFT JOIN pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key WHERE c.relname = %s ) s2 GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions; """, [table_name]) for index, columns, unique, primary, orders, type_, definition, options in cursor.fetchall(): if index not in constraints: constraints[index] = { "columns": columns if columns != [None] else [], "orders": orders if orders != [None] else [], "primary_key": primary, "unique": unique, "foreign_key": None, "check": False, "index": True, "type": Index.suffix if type_ == 'btree' else type_, "definition": definition, "options": options, } return constraints
2ba56df8287e3c26a8b439630d89b6fd9244df064d61bda5a9bc2521a389ece4
""" PostgreSQL database backend for Django. Requires psycopg 2: http://initd.org/projects/psycopg2 """ import threading import warnings from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import connections from django.db.backends.base.base import BaseDatabaseWrapper from django.db.utils import DatabaseError as WrappedDatabaseError from django.utils.functional import cached_property from django.utils.safestring import SafeText from django.utils.version import get_version_tuple try: import psycopg2 as Database import psycopg2.extensions import psycopg2.extras except ImportError as e: raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e) def psycopg2_version(): version = psycopg2.__version__.split(' ', 1)[0] return get_version_tuple(version) PSYCOPG2_VERSION = psycopg2_version() if PSYCOPG2_VERSION < (2, 5, 4): raise ImproperlyConfigured("psycopg2_version 2.5.4 or newer is required; you have %s" % psycopg2.__version__) # Some of these import psycopg2, so import them after checking if it's installed. from .client import DatabaseClient # NOQA isort:skip from .creation import DatabaseCreation # NOQA isort:skip from .features import DatabaseFeatures # NOQA isort:skip from .introspection import DatabaseIntrospection # NOQA isort:skip from .operations import DatabaseOperations # NOQA isort:skip from .schema import DatabaseSchemaEditor # NOQA isort:skip from .utils import utc_tzinfo_factory # NOQA isort:skip psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString) psycopg2.extras.register_uuid() # Register support for inet[] manually so we don't have to handle the Inet() # object on load all the time. INETARRAY_OID = 1041 INETARRAY = psycopg2.extensions.new_array_type( (INETARRAY_OID,), 'INETARRAY', psycopg2.extensions.UNICODE, ) psycopg2.extensions.register_type(INETARRAY) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'postgresql' display_name = 'PostgreSQL' # This dictionary maps Field objects to their associated PostgreSQL column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. data_types = { 'AutoField': 'serial', 'BigAutoField': 'bigserial', 'BinaryField': 'bytea', 'BooleanField': 'boolean', 'CharField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'timestamp with time zone', 'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)', 'DurationField': 'interval', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'double precision', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'inet', 'GenericIPAddressField': 'inet', 'NullBooleanField': 'boolean', 'OneToOneField': 'integer', 'PositiveIntegerField': 'integer', 'PositiveSmallIntegerField': 'smallint', 'SlugField': 'varchar(%(max_length)s)', 'SmallIntegerField': 'smallint', 'TextField': 'text', 'TimeField': 'time', 'UUIDField': 'uuid', } data_type_check_constraints = { 'PositiveIntegerField': '"%(column)s" >= 0', 'PositiveSmallIntegerField': '"%(column)s" >= 0', } operators = { 'exact': '= %s', 'iexact': '= UPPER(%s)', 'contains': 'LIKE %s', 'icontains': 'LIKE UPPER(%s)', 'regex': '~ %s', 'iregex': '~* %s', 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': 'LIKE %s', 'endswith': 'LIKE %s', 'istartswith': 'LIKE UPPER(%s)', 'iendswith': 'LIKE UPPER(%s)', } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { 'contains': "LIKE '%%' || {} || '%%'", 'icontains': "LIKE '%%' || UPPER({}) || '%%'", 'startswith': "LIKE {} || '%%'", 'istartswith': "LIKE UPPER({}) || '%%'", 'endswith': "LIKE '%%' || {}", 'iendswith': "LIKE '%%' || UPPER({})", } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations # PostgreSQL backend-specific attributes. _named_cursor_idx = 0 def get_connection_params(self): settings_dict = self.settings_dict # None may be used to connect to the default 'postgres' db if settings_dict['NAME'] == '': raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value.") if len(settings_dict['NAME'] or '') > self.ops.max_name_length(): raise ImproperlyConfigured( 'Database names longer than %d characters are not supported by ' 'PostgreSQL. Supply a shorter NAME in settings.DATABASES.' % self.ops.max_name_length() ) conn_params = { 'database': settings_dict['NAME'] or 'postgres', **settings_dict['OPTIONS'], } conn_params.pop('isolation_level', None) if settings_dict['USER']: conn_params['user'] = settings_dict['USER'] if settings_dict['PASSWORD']: conn_params['password'] = settings_dict['PASSWORD'] if settings_dict['HOST']: conn_params['host'] = settings_dict['HOST'] if settings_dict['PORT']: conn_params['port'] = settings_dict['PORT'] return conn_params def get_new_connection(self, conn_params): connection = Database.connect(**conn_params) # self.isolation_level must be set: # - after connecting to the database in order to obtain the database's # default when no value is explicitly specified in options. # - before calling _set_autocommit() because if autocommit is on, that # will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT. options = self.settings_dict['OPTIONS'] try: self.isolation_level = options['isolation_level'] except KeyError: self.isolation_level = connection.isolation_level else: # Set the isolation level to the value from OPTIONS. if self.isolation_level != connection.isolation_level: connection.set_session(isolation_level=self.isolation_level) return connection def ensure_timezone(self): self.ensure_connection() conn_timezone_name = self.connection.get_parameter_status('TimeZone') timezone_name = self.timezone_name if timezone_name and conn_timezone_name != timezone_name: with self.connection.cursor() as cursor: cursor.execute(self.ops.set_time_zone_sql(), [timezone_name]) return True return False def init_connection_state(self): self.connection.set_client_encoding('UTF8') timezone_changed = self.ensure_timezone() if timezone_changed: # Commit after setting the time zone (see #17062) if not self.get_autocommit(): self.connection.commit() def create_cursor(self, name=None): if name: # In autocommit mode, the cursor will be used outside of a # transaction, hence use a holdable cursor. cursor = self.connection.cursor(name, scrollable=False, withhold=self.connection.autocommit) else: cursor = self.connection.cursor() cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None return cursor def chunked_cursor(self): self._named_cursor_idx += 1 return self._cursor( name='_django_curs_%d_%d' % ( # Avoid reusing name in other threads threading.current_thread().ident, self._named_cursor_idx, ) ) def _set_autocommit(self, autocommit): with self.wrap_database_errors: self.connection.autocommit = autocommit def check_constraints(self, table_names=None): """ Check constraints by setting them to immediate. Return them to deferred afterward. """ self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE') self.cursor().execute('SET CONSTRAINTS ALL DEFERRED') def is_usable(self): try: # Use a psycopg cursor directly, bypassing Django's utilities. self.connection.cursor().execute("SELECT 1") except Database.Error: return False else: return True @property def _nodb_connection(self): nodb_connection = super()._nodb_connection try: nodb_connection.ensure_connection() except (Database.DatabaseError, WrappedDatabaseError): warnings.warn( "Normally Django will use a connection to the 'postgres' database " "to avoid running initialization queries against the production " "database when it's not needed (for example, when running tests). " "Django was unable to create a connection to the 'postgres' database " "and will use the first PostgreSQL database instead.", RuntimeWarning ) for connection in connections.all(): if connection.vendor == 'postgresql' and connection.settings_dict['NAME'] != 'postgres': return self.__class__( {**self.settings_dict, 'NAME': connection.settings_dict['NAME']}, alias=self.alias, allow_thread_sharing=False, ) return nodb_connection @cached_property def pg_version(self): with self.temporary_connection(): return self.connection.server_version
8fe7c18d47ee78c99cd33e5a9334c7e6e7f944dd8dd0bc1baffa284ad018261c
from psycopg2.extras import Inet from django.conf import settings from django.db import NotSupportedError from django.db.backends.base.operations import BaseDatabaseOperations class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'varchar' explain_prefix = 'EXPLAIN' def unification_cast_sql(self, output_field): internal_type = output_field.get_internal_type() if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"): # PostgreSQL will resolve a union as type 'text' if input types are # 'unknown'. # https://www.postgresql.org/docs/current/static/typeconv-union-case.html # These fields cannot be implicitly cast back in the default # PostgreSQL configuration so we need to explicitly cast them. # We must also remove components of the type within brackets: # varchar(255) -> varchar. return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0] return '%s' def date_extract_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT if lookup_type == 'week_day': # For consistency across backends, we return Sunday=1, Saturday=7. return "EXTRACT('dow' FROM %s) + 1" % field_name else: return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name) def date_trunc_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ: field_name = "%s AT TIME ZONE '%s'" % (field_name, tzname) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::date' % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::time' % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def time_trunc_sql(self, lookup_type, field_name): return "DATE_TRUNC('%s', %s)::time" % (lookup_type, field_name) def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_ids(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table that has an auto-incrementing ID, return the list of newly created IDs. """ return [item[0] for item in cursor.fetchall()] def lookup_cast(self, lookup_type, internal_type=None): lookup = '%s' # Cast text lookups to text to allow things like filter(x__contains=4) if lookup_type in ('iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'): if internal_type in ('IPAddressField', 'GenericIPAddressField'): lookup = "HOST(%s)" elif internal_type in ('CICharField', 'CIEmailField', 'CITextField'): lookup = '%s::citext' else: lookup = "%s::text" # Use UPPER(x) for case-insensitive lookups; it's faster. if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): lookup = 'UPPER(%s)' % lookup return lookup def no_limit_value(self): return None def prepare_sql_script(self, sql): return [sql] def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def set_time_zone_sql(self): return "SET TIME ZONE %s" def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables: # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows # us to truncate tables referenced by a foreign key in any other # table. tables_sql = ', '.join( style.SQL_FIELD(self.quote_name(table)) for table in tables) if allow_cascade: sql = ['%s %s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, style.SQL_KEYWORD('CASCADE'), )] else: sql = ['%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, )] sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements # to reset sequence indices sql = [] for sequence_info in sequences: table_name = sequence_info['table'] # 'id' will be the case if it's an m2m using an autogenerated # intermediate table (see BaseDatabaseIntrospection.sequence_list). column_name = sequence_info['column'] or 'id' sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(self.quote_name(table_name)), style.SQL_FIELD(column_name), )) return sql def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def sequence_reset_sql(self, style, model_list): from django.db import models output = [] qn = self.quote_name for model in model_list: # Use `coalesce` to set the sequence for each model to the max pk value if there are records, # or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true # if there are records (as the max pk value is already in use), otherwise set it to false. # Use pg_get_serial_sequence to get the underlying sequence name from the table name # and column name (available since PostgreSQL 8) for f in model._meta.local_fields: if isinstance(f, models.AutoField): output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(model._meta.db_table)), style.SQL_FIELD(f.column), style.SQL_FIELD(qn(f.column)), style.SQL_FIELD(qn(f.column)), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(model._meta.db_table)), ) ) break # Only one AutoField is allowed per model, so don't bother continuing. for f in model._meta.many_to_many: if not f.remote_field.through: output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(f.m2m_db_table())), style.SQL_FIELD('id'), style.SQL_FIELD(qn('id')), style.SQL_FIELD(qn('id')), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(f.m2m_db_table())) ) ) return output def prep_for_iexact_query(self, x): return x def max_name_length(self): """ Return the maximum length of an identifier. The maximum length of an identifier is 63 by default, but can be changed by recompiling PostgreSQL after editing the NAMEDATALEN macro in src/include/pg_config_manual.h. This implementation returns 63, but can be overridden by a custom database backend that inherits most of its behavior from this one. """ return 63 def distinct_sql(self, fields, params): if fields: params = [param for param_list in params for param in param_list] return (['DISTINCT ON (%s)' % ', '.join(fields)], params) else: return ['DISTINCT'], [] def last_executed_query(self, cursor, sql, params): # http://initd.org/psycopg/docs/cursor.html#cursor.query # The query attribute is a Psycopg extension to the DB API 2.0. if cursor.query is not None: return cursor.query.decode() return None def return_insert_id(self): return "RETURNING %s", () def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def adapt_datefield_value(self, value): return value def adapt_datetimefield_value(self, value): return value def adapt_timefield_value(self, value): return value def adapt_ipaddressfield_value(self, value): if value: return Inet(value) return None def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), lhs_params + rhs_params return super().subtract_temporals(internal_type, lhs, rhs) def window_frame_range_start_end(self, start=None, end=None): start_, end_ = super().window_frame_range_start_end(start, end) if (start and start < 0) or (end and end > 0): raise NotSupportedError( 'PostgreSQL only supports UNBOUNDED together with PRECEDING ' 'and FOLLOWING.' ) return start_, end_ def explain_query_prefix(self, format=None, **options): prefix = super().explain_query_prefix(format) extra = {} if format: extra['FORMAT'] = format if options: extra.update({ name.upper(): 'true' if value else 'false' for name, value in options.items() }) if extra: prefix += ' (%s)' % ', '.join('%s %s' % i for i in extra.items()) return prefix
d741ab1d14254398f5a4687abe37718163fbc82872066a62efad396ac500f227
import psycopg2 from django.db.backends.base.schema import BaseDatabaseSchemaEditor class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s USING %(column)s::%(type)s" sql_create_sequence = "CREATE SEQUENCE %(sequence)s" sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE" sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s" sql_create_index = "CREATE INDEX %(name)s ON %(table)s%(using)s (%(columns)s)%(extra)s" sql_create_varchar_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s varchar_pattern_ops)%(extra)s" sql_create_text_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s text_pattern_ops)%(extra)s" sql_delete_index = "DROP INDEX IF EXISTS %(name)s" # Setting the constraint to IMMEDIATE runs any deferred checks to allow # dropping it in the same transaction. sql_delete_fk = "SET CONSTRAINTS %(name)s IMMEDIATE; ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_delete_procedure = 'DROP FUNCTION %(procedure)s(%(param_types)s)' def quote_value(self, value): return psycopg2.extensions.adapt(value) def _field_indexes_sql(self, model, field): output = super()._field_indexes_sql(model, field) like_index_statement = self._create_like_index_sql(model, field) if like_index_statement is not None: output.append(like_index_statement) return output def _create_like_index_sql(self, model, field): """ Return the statement to create an index with varchar operator pattern when the column type is 'varchar' or 'text', otherwise return None. """ db_type = field.db_type(connection=self.connection) if db_type is not None and (field.db_index or field.unique): # Fields with database column types of `varchar` and `text` need # a second index that specifies their operator class, which is # needed when performing correct LIKE queries outside the # C locale. See #12234. # # The same doesn't apply to array fields such as varchar[size] # and text[size], so skip them. if '[' in db_type: return None if db_type.startswith('varchar'): return self._create_index_sql(model, [field], suffix='_like', sql=self.sql_create_varchar_index) elif db_type.startswith('text'): return self._create_index_sql(model, [field], suffix='_like', sql=self.sql_create_text_index) return None def _alter_column_type_sql(self, model, old_field, new_field, new_type): """Make ALTER TYPE with SERIAL make sense.""" table = model._meta.db_table if new_type.lower() in ("serial", "bigserial"): column = new_field.column sequence_name = "%s_%s_seq" % (table, column) col_type = "integer" if new_type.lower() == "serial" else "bigint" return ( ( self.sql_alter_column_type % { "column": self.quote_name(column), "type": col_type, }, [], ), [ ( self.sql_delete_sequence % { "sequence": self.quote_name(sequence_name), }, [], ), ( self.sql_create_sequence % { "sequence": self.quote_name(sequence_name), }, [], ), ( self.sql_alter_column % { "table": self.quote_name(table), "changes": self.sql_alter_column_default % { "column": self.quote_name(column), "default": "nextval('%s')" % self.quote_name(sequence_name), } }, [], ), ( self.sql_set_sequence_max % { "table": self.quote_name(table), "column": self.quote_name(column), "sequence": self.quote_name(sequence_name), }, [], ), ], ) else: return super()._alter_column_type_sql(model, old_field, new_field, new_type) def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): # Drop indexes on varchar/text/citext columns that are changing to a # different type. if (old_field.db_index or old_field.unique) and ( (old_type.startswith('varchar') and not new_type.startswith('varchar')) or (old_type.startswith('text') and not new_type.startswith('text')) or (old_type.startswith('citext') and not new_type.startswith('citext')) ): index_name = self._create_index_name(model._meta.db_table, [old_field.column], suffix='_like') self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name)) super()._alter_field( model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict, ) # Added an index? Create any PostgreSQL-specific indexes. if ((not (old_field.db_index or old_field.unique) and new_field.db_index) or (not old_field.unique and new_field.unique)): like_index_statement = self._create_like_index_sql(model, new_field) if like_index_statement is not None: self.execute(like_index_statement) # Removed an index? Drop any PostgreSQL-specific indexes. if old_field.unique and not (new_field.db_index or new_field.unique): index_to_remove = self._create_index_name(model._meta.db_table, [old_field.column], suffix='_like') self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_to_remove))
d2ecd93d5b23b07dbca747819fa8564b7bb685e5507c1256ce6d2c401e3d8599
import os import signal import subprocess from django.core.files.temp import NamedTemporaryFile from django.db.backends.base.client import BaseDatabaseClient def _escape_pgpass(txt): """ Escape a fragment of a PostgreSQL .pgpass file. """ return txt.replace('\\', '\\\\').replace(':', '\\:') class DatabaseClient(BaseDatabaseClient): executable_name = 'psql' @classmethod def runshell_db(cls, conn_params): args = [cls.executable_name] host = conn_params.get('host', '') port = conn_params.get('port', '') dbname = conn_params.get('database', '') user = conn_params.get('user', '') passwd = conn_params.get('password', '') if user: args += ['-U', user] if host: args += ['-h', host] if port: args += ['-p', str(port)] args += [dbname] temp_pgpass = None sigint_handler = signal.getsignal(signal.SIGINT) try: if passwd: # Create temporary .pgpass file. temp_pgpass = NamedTemporaryFile(mode='w+') try: print( _escape_pgpass(host) or '*', str(port) or '*', _escape_pgpass(dbname) or '*', _escape_pgpass(user) or '*', _escape_pgpass(passwd), file=temp_pgpass, sep=':', flush=True, ) os.environ['PGPASSFILE'] = temp_pgpass.name except UnicodeEncodeError: # If the current locale can't encode the data, let the # user input the password manually. pass # Allow SIGINT to pass to psql to abort queries. signal.signal(signal.SIGINT, signal.SIG_IGN) subprocess.check_call(args) finally: # Restore the original SIGINT handler. signal.signal(signal.SIGINT, sigint_handler) if temp_pgpass: temp_pgpass.close() if 'PGPASSFILE' in os.environ: # unit tests need cleanup del os.environ['PGPASSFILE'] def runshell(self): DatabaseClient.runshell_db(self.connection.get_connection_params())
dc65ee4c4a323cda9f51c5ce0959c2fa0c3bc5d015d7b66f6586f842ee9fecc7
from django.utils.timezone import utc def utc_tzinfo_factory(offset): if offset != 0: raise AssertionError("database connection isn't set to UTC") return utc
80713795632872623f3a7acbd9a90496aa9b120de3b8fded9c1c85ac08e77dc2
import sys from psycopg2 import errorcodes from django.db.backends.base.creation import BaseDatabaseCreation class DatabaseCreation(BaseDatabaseCreation): def _quote_name(self, name): return self.connection.ops.quote_name(name) def _get_database_create_suffix(self, encoding=None, template=None): suffix = "" if encoding: suffix += " ENCODING '{}'".format(encoding) if template: suffix += " TEMPLATE {}".format(self._quote_name(template)) return suffix and "WITH" + suffix def sql_table_creation_suffix(self): test_settings = self.connection.settings_dict['TEST'] assert test_settings['COLLATION'] is None, ( "PostgreSQL does not support collation setting at database creation time." ) return self._get_database_create_suffix( encoding=test_settings['CHARSET'], template=test_settings.get('TEMPLATE'), ) def _execute_create_test_db(self, cursor, parameters, keepdb=False): try: super()._execute_create_test_db(cursor, parameters, keepdb) except Exception as e: if getattr(e.__cause__, 'pgcode', '') != errorcodes.DUPLICATE_DATABASE: # All errors except "database already exists" cancel tests. sys.stderr.write('Got an error creating the test database: %s\n' % e) sys.exit(2) elif not keepdb: # If the database should be kept, ignore "database already # exists". raise e def _clone_test_db(self, suffix, verbosity, keepdb=False): # CREATE DATABASE ... WITH TEMPLATE ... requires closing connections # to the template database. self.connection.close() source_database_name = self.connection.settings_dict['NAME'] target_database_name = self.get_test_db_clone_settings(suffix)['NAME'] test_db_params = { 'dbname': self._quote_name(target_database_name), 'suffix': self._get_database_create_suffix(template=source_database_name), } with self._nodb_connection.cursor() as cursor: try: self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception as e: try: if verbosity >= 1: print("Destroying old test database for alias %s..." % ( self._get_database_display_str(verbosity, target_database_name), )) cursor.execute('DROP DATABASE %(dbname)s' % test_db_params) self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception as e: sys.stderr.write("Got an error cloning the test database: %s\n" % e) sys.exit(2)
d6b855dbaec9d8ebaba781009a03230fee644ef539fc506805649fc1cf2f84c8
from django.db import utils from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): # SQLite cannot handle us only partially reading from a cursor's result set # and then writing the same rows to the database in another cursor. This # setting ensures we always read result sets fully into memory all in one # go. can_use_chunked_reads = False test_db_allows_multiple_connections = False supports_unspecified_pk = True supports_timezones = False max_query_params = 999 supports_mixed_date_datetime_comparisons = False autocommits_when_autocommit_is_off = True can_introspect_decimal_field = False can_introspect_positive_integer_field = True can_introspect_small_integer_field = True supports_transactions = True atomic_transactions = False can_rollback_ddl = True supports_atomic_references_rename = False supports_paramstyle_pyformat = False supports_sequence_reset = False can_clone_databases = True supports_temporal_subtraction = True ignores_table_name_case = True supports_cast_with_precision = False uses_savepoints = True can_release_savepoints = True @cached_property def supports_stddev(self): """ Confirm support for STDDEV and related stats functions. SQLite supports STDDEV as an extension package; so connection.ops.check_expression_support() can't unilaterally rule out support for STDDEV. Manually check whether the call works. """ with self.connection.cursor() as cursor: cursor.execute('CREATE TABLE STDDEV_TEST (X INT)') try: cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST') has_support = True except utils.DatabaseError: has_support = False cursor.execute('DROP TABLE STDDEV_TEST') return has_support
ba135ac54e4781daffe93ece3e031e5a72e840a7ad40692fb0283353b2c5ab91
import re from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo, TableInfo, ) from django.db.models.indexes import Index field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$') def get_field_size(name): """ Extract the size number from a "varchar(11)" type name """ m = field_size_re.search(name) return int(m.group(1)) if m else None # This light wrapper "fakes" a dictionary interface, because some SQLite data # types include variables in them -- e.g. "varchar(30)" -- and can't be matched # as a simple dictionary lookup. class FlexibleFieldLookupDict: # Maps SQL types to Django Field types. Some of the SQL types have multiple # entries here because SQLite allows for anything and doesn't normalize the # field type; it uses whatever was given. base_data_types_reverse = { 'bool': 'BooleanField', 'boolean': 'BooleanField', 'smallint': 'SmallIntegerField', 'smallint unsigned': 'PositiveSmallIntegerField', 'smallinteger': 'SmallIntegerField', 'int': 'IntegerField', 'integer': 'IntegerField', 'bigint': 'BigIntegerField', 'integer unsigned': 'PositiveIntegerField', 'decimal': 'DecimalField', 'real': 'FloatField', 'text': 'TextField', 'char': 'CharField', 'blob': 'BinaryField', 'date': 'DateField', 'datetime': 'DateTimeField', 'time': 'TimeField', } def __getitem__(self, key): key = key.lower() try: return self.base_data_types_reverse[key] except KeyError: size = get_field_size(key) if size is not None: return ('CharField', {'max_length': size}) raise KeyError class DatabaseIntrospection(BaseDatabaseIntrospection): data_types_reverse = FlexibleFieldLookupDict() def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" # Skip the sqlite_sequence system table used for autoincrement key # generation. cursor.execute(""" SELECT name, type FROM sqlite_master WHERE type in ('table', 'view') AND NOT name='sqlite_sequence' ORDER BY name""") return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface. """ return [ FieldInfo( info['name'], info['type'], None, info['size'], None, None, info['null_ok'], info['default'], ) for info in self._table_info(cursor, table_name) ] def get_sequences(self, cursor, table_name, table_fields=()): pk_col = self.get_primary_key_column(cursor, table_name) return [{'table': table_name, 'column': pk_col}] def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all relationships to the given table. """ # Dictionary of relations to return relations = {} # Schema for this table cursor.execute( "SELECT sql, type FROM sqlite_master " "WHERE tbl_name = %s AND type IN ('table', 'view')", [table_name] ) create_sql, table_type = cursor.fetchone() if table_type == 'view': # It might be a view, then no results will be returned return relations results = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')] # Walk through and look for references to other tables. SQLite doesn't # really have enforced references, but since it echoes out the SQL used # to create the table we can look for REFERENCES statements used there. for field_desc in results.split(','): field_desc = field_desc.strip() if field_desc.startswith("UNIQUE"): continue m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I) if not m: continue table, column = [s.strip('"') for s in m.groups()] if field_desc.startswith("FOREIGN KEY"): # Find name of the target FK field m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I) field_name = m.groups()[0].strip('"') else: field_name = field_desc.split()[0].strip('"') cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table]) result = cursor.fetchall()[0] other_table_results = result[0].strip() li, ri = other_table_results.index('('), other_table_results.rindex(')') other_table_results = other_table_results[li + 1:ri] for other_desc in other_table_results.split(','): other_desc = other_desc.strip() if other_desc.startswith('UNIQUE'): continue other_name = other_desc.split(' ', 1)[0].strip('"') if other_name == column: relations[field_name] = (other_name, table) break return relations def get_key_columns(self, cursor, table_name): """ Return a list of (column_name, referenced_table_name, referenced_column_name) for all key columns in given table. """ key_columns = [] # Schema for this table cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) results = cursor.fetchone()[0].strip() results = results[results.index('(') + 1:results.rindex(')')] # Walk through and look for references to other tables. SQLite doesn't # really have enforced references, but since it echoes out the SQL used # to create the table we can look for REFERENCES statements used there. for field_index, field_desc in enumerate(results.split(',')): field_desc = field_desc.strip() if field_desc.startswith("UNIQUE"): continue m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I) if not m: continue # This will append (column_name, referenced_table_name, referenced_column_name) to key_columns key_columns.append(tuple(s.strip('"') for s in m.groups())) return key_columns def get_primary_key_column(self, cursor, table_name): """Return the column name of the primary key for the given table.""" # Don't use PRAGMA because that causes issues with some transactions cursor.execute( "SELECT sql, type FROM sqlite_master " "WHERE tbl_name = %s AND type IN ('table', 'view')", [table_name] ) row = cursor.fetchone() if row is None: raise ValueError("Table %s does not exist" % table_name) create_sql, table_type = row if table_type == 'view': # Views don't have a primary key. return None fields_sql = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')] for field_desc in fields_sql.split(','): field_desc = field_desc.strip() m = re.match(r'(?:(?:["`\[])(.*)(?:["`\]])|(\w+)).*PRIMARY KEY.*', field_desc) if m: return m.group(1) if m.group(1) else m.group(2) return None def _table_info(self, cursor, name): cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name)) # cid, name, type, notnull, default_value, pk return [{ 'name': field[1], 'type': field[2], 'size': get_field_size(field[2]), 'null_ok': not field[3], 'default': field[4], 'pk': field[5], # undocumented } for field in cursor.fetchall()] def _get_foreign_key_constraints(self, cursor, table_name): constraints = {} cursor.execute('PRAGMA foreign_key_list(%s)' % self.connection.ops.quote_name(table_name)) for row in cursor.fetchall(): # Remaining on_update/on_delete/match values are of no interest. id_, _, table, from_, to = row[:5] constraints['fk_%d' % id_] = { 'columns': [from_], 'primary_key': False, 'unique': False, 'foreign_key': (table, to), 'check': False, 'index': False, } return constraints def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. """ constraints = {} # Find inline check constraints. try: table_schema = cursor.execute( "SELECT sql FROM sqlite_master WHERE type='table' and name=%s" % ( self.connection.ops.quote_name(table_name), ) ).fetchone()[0] except TypeError: # table_name is a view. pass else: fields_with_check_constraints = [ schema_row.strip().split(' ')[0][1:-1] for schema_row in table_schema.split(',') if schema_row.find('CHECK') >= 0 ] for field_name in fields_with_check_constraints: # An arbitrary made up name. constraints['__check__%s' % field_name] = { 'columns': [field_name], 'primary_key': False, 'unique': False, 'foreign_key': False, 'check': True, 'index': False, } # Get the index info cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name)) for row in cursor.fetchall(): # Sqlite3 3.8.9+ has 5 columns, however older versions only give 3 # columns. Discard last 2 columns if there. number, index, unique = row[:3] # Get the index info for that index cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index)) for index_rank, column_rank, column in cursor.fetchall(): if index not in constraints: constraints[index] = { "columns": [], "primary_key": False, "unique": bool(unique), "foreign_key": False, "check": False, "index": True, } constraints[index]['columns'].append(column) # Add type and column orders for indexes if constraints[index]['index'] and not constraints[index]['unique']: # SQLite doesn't support any index type other than b-tree constraints[index]['type'] = Index.suffix cursor.execute( "SELECT sql FROM sqlite_master " "WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index) ) orders = [] # There would be only 1 row to loop over for sql, in cursor.fetchall(): order_info = sql.split('(')[-1].split(')')[0].split(',') orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info] constraints[index]['orders'] = orders # Get the PK pk_column = self.get_primary_key_column(cursor, table_name) if pk_column: # SQLite doesn't actually give a name to the PK constraint, # so we invent one. This is fine, as the SQLite backend never # deletes PK constraints by name, as you can't delete constraints # in SQLite; we remake the table with a new PK instead. constraints["__primary__"] = { "columns": [pk_column], "primary_key": True, "unique": False, # It's not actually a unique constraint. "foreign_key": False, "check": False, "index": False, } constraints.update(self._get_foreign_key_constraints(cursor, table_name)) return constraints
3c445951f81cdf1188600c8543f3e86f66a7fe6feffee50dfffd72240d37807d
""" SQLite3 backend for the sqlite3 module in the standard library. """ import datetime import decimal import math import operator import re import warnings from sqlite3 import dbapi2 as Database import pytz from django.core.exceptions import ImproperlyConfigured from django.db import utils from django.db.backends import utils as backend_utils from django.db.backends.base.base import BaseDatabaseWrapper from django.utils import timezone from django.utils.dateparse import parse_datetime, parse_time from django.utils.duration import duration_microseconds from .client import DatabaseClient # isort:skip from .creation import DatabaseCreation # isort:skip from .features import DatabaseFeatures # isort:skip from .introspection import DatabaseIntrospection # isort:skip from .operations import DatabaseOperations # isort:skip from .schema import DatabaseSchemaEditor # isort:skip def decoder(conv_func): """ Convert bytestrings from Python's sqlite3 interface to a regular string. """ return lambda s: conv_func(s.decode()) Database.register_converter("bool", b'1'.__eq__) Database.register_converter("time", decoder(parse_time)) Database.register_converter("datetime", decoder(parse_datetime)) Database.register_converter("timestamp", decoder(parse_datetime)) Database.register_converter("TIMESTAMP", decoder(parse_datetime)) Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'sqlite' display_name = 'SQLite' # SQLite doesn't actually support most of these types, but it "does the right # thing" given more verbose field definitions, so leave them as is so that # schema inspection is more useful. data_types = { 'AutoField': 'integer', 'BigAutoField': 'integer', 'BinaryField': 'BLOB', 'BooleanField': 'bool', 'CharField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'datetime', 'DecimalField': 'decimal', 'DurationField': 'bigint', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'real', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'char(15)', 'GenericIPAddressField': 'char(39)', 'NullBooleanField': 'bool', 'OneToOneField': 'integer', 'PositiveIntegerField': 'integer unsigned', 'PositiveSmallIntegerField': 'smallint unsigned', 'SlugField': 'varchar(%(max_length)s)', 'SmallIntegerField': 'smallint', 'TextField': 'text', 'TimeField': 'time', 'UUIDField': 'char(32)', } data_type_check_constraints = { 'PositiveIntegerField': '"%(column)s" >= 0', 'PositiveSmallIntegerField': '"%(column)s" >= 0', } data_types_suffix = { 'AutoField': 'AUTOINCREMENT', 'BigAutoField': 'AUTOINCREMENT', } # SQLite requires LIKE statements to include an ESCAPE clause if the value # being escaped has a percent or underscore in it. # See http://www.sqlite.org/lang_expr.html for an explanation. operators = { 'exact': '= %s', 'iexact': "LIKE %s ESCAPE '\\'", 'contains': "LIKE %s ESCAPE '\\'", 'icontains': "LIKE %s ESCAPE '\\'", 'regex': 'REGEXP %s', 'iregex': "REGEXP '(?i)' || %s", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE %s ESCAPE '\\'", 'endswith': "LIKE %s ESCAPE '\\'", 'istartswith': "LIKE %s ESCAPE '\\'", 'iendswith': "LIKE %s ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { 'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'", 'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'", 'startswith': r"LIKE {} || '%%' ESCAPE '\'", 'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'", 'endswith': r"LIKE '%%' || {} ESCAPE '\'", 'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'", } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations def get_connection_params(self): settings_dict = self.settings_dict if not settings_dict['NAME']: raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value.") kwargs = { 'database': settings_dict['NAME'], 'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES, **settings_dict['OPTIONS'], } # Always allow the underlying SQLite connection to be shareable # between multiple threads. The safe-guarding will be handled at a # higher level by the `BaseDatabaseWrapper.allow_thread_sharing` # property. This is necessary as the shareability is disabled by # default in pysqlite and it cannot be changed once a connection is # opened. if 'check_same_thread' in kwargs and kwargs['check_same_thread']: warnings.warn( 'The `check_same_thread` option was provided and set to ' 'True. It will be overridden with False. Use the ' '`DatabaseWrapper.allow_thread_sharing` property instead ' 'for controlling thread shareability.', RuntimeWarning ) kwargs.update({'check_same_thread': False, 'uri': True}) return kwargs def get_new_connection(self, conn_params): conn = Database.connect(**conn_params) conn.create_function("django_date_extract", 2, _sqlite_date_extract) conn.create_function("django_date_trunc", 2, _sqlite_date_trunc) conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date) conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time) conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract) conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc) conn.create_function("django_time_extract", 2, _sqlite_time_extract) conn.create_function("django_time_trunc", 2, _sqlite_time_trunc) conn.create_function("django_time_diff", 2, _sqlite_time_diff) conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff) conn.create_function("regexp", 2, _sqlite_regexp) conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta) conn.create_function("django_power", 2, _sqlite_power) conn.create_function('LPAD', 3, _sqlite_lpad) conn.create_function('REPEAT', 2, operator.mul) conn.create_function('RPAD', 3, _sqlite_rpad) conn.execute('PRAGMA foreign_keys = ON') return conn def init_connection_state(self): pass def create_cursor(self, name=None): return self.connection.cursor(factory=SQLiteCursorWrapper) def close(self): self.validate_thread_sharing() # If database is in memory, closing the connection destroys the # database. To prevent accidental data loss, ignore close requests on # an in-memory db. if not self.is_in_memory_db(): BaseDatabaseWrapper.close(self) def _savepoint_allowed(self): # Two conditions are required here: # - A sufficiently recent version of SQLite to support savepoints, # - Being in a transaction, which can only happen inside 'atomic'. # When 'isolation_level' is not None, sqlite3 commits before each # savepoint; it's a bug. When it is None, savepoints don't make sense # because autocommit is enabled. The only exception is inside 'atomic' # blocks. To work around that bug, on SQLite, 'atomic' starts a # transaction explicitly rather than simply disable autocommit. return self.features.uses_savepoints and self.in_atomic_block def _set_autocommit(self, autocommit): if autocommit: level = None else: # sqlite3's internal default is ''. It's different from None. # See Modules/_sqlite/connection.c. level = '' # 'isolation_level' is a misleading API. # SQLite always runs at the SERIALIZABLE isolation level. with self.wrap_database_errors: self.connection.isolation_level = level def disable_constraint_checking(self): if self.in_atomic_block: # sqlite3 cannot disable constraint checking inside a transaction. return False self.cursor().execute('PRAGMA foreign_keys = OFF') return True def enable_constraint_checking(self): self.cursor().execute('PRAGMA foreign_keys = ON') def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) if not primary_key_column_name: continue key_columns = self.introspection.get_key_columns(cursor, table_name) for column_name, referenced_table_name, referenced_column_name in key_columns: cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise utils.IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): return True def _start_transaction_under_autocommit(self): """ Start a transaction explicitly in autocommit mode. Staying in autocommit mode works around a bug of sqlite3 that breaks savepoints when autocommit is disabled. """ self.cursor().execute("BEGIN") def is_in_memory_db(self): return self.creation.is_in_memory_db(self.settings_dict['NAME']) FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s') class SQLiteCursorWrapper(Database.Cursor): """ Django uses "format" style placeholders, but pysqlite2 uses "qmark" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". """ def execute(self, query, params=None): if params is None: return Database.Cursor.execute(self, query) query = self.convert_query(query) return Database.Cursor.execute(self, query, params) def executemany(self, query, param_list): query = self.convert_query(query) return Database.Cursor.executemany(self, query, param_list) def convert_query(self, query): return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%') def _sqlite_date_extract(lookup_type, dt): if dt is None: return None try: dt = backend_utils.typecast_timestamp(dt) except (ValueError, TypeError): return None if lookup_type == 'week_day': return (dt.isoweekday() % 7) + 1 elif lookup_type == 'week': return dt.isocalendar()[1] elif lookup_type == 'quarter': return math.ceil(dt.month / 3) else: return getattr(dt, lookup_type) def _sqlite_date_trunc(lookup_type, dt): try: dt = backend_utils.typecast_timestamp(dt) except (ValueError, TypeError): return None if lookup_type == 'year': return "%i-01-01" % dt.year elif lookup_type == 'quarter': month_in_quarter = dt.month - (dt.month - 1) % 3 return '%i-%02i-01' % (dt.year, month_in_quarter) elif lookup_type == 'month': return "%i-%02i-01" % (dt.year, dt.month) elif lookup_type == 'week': dt = dt - datetime.timedelta(days=dt.weekday()) return "%i-%02i-%02i" % (dt.year, dt.month, dt.day) elif lookup_type == 'day': return "%i-%02i-%02i" % (dt.year, dt.month, dt.day) def _sqlite_time_trunc(lookup_type, dt): try: dt = backend_utils.typecast_time(dt) except (ValueError, TypeError): return None if lookup_type == 'hour': return "%02i:00:00" % dt.hour elif lookup_type == 'minute': return "%02i:%02i:00" % (dt.hour, dt.minute) elif lookup_type == 'second': return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second) def _sqlite_datetime_parse(dt, tzname): if dt is None: return None try: dt = backend_utils.typecast_timestamp(dt) except (ValueError, TypeError): return None if tzname is not None: dt = timezone.localtime(dt, pytz.timezone(tzname)) return dt def _sqlite_datetime_cast_date(dt, tzname): dt = _sqlite_datetime_parse(dt, tzname) if dt is None: return None return dt.date().isoformat() def _sqlite_datetime_cast_time(dt, tzname): dt = _sqlite_datetime_parse(dt, tzname) if dt is None: return None return dt.time().isoformat() def _sqlite_datetime_extract(lookup_type, dt, tzname): dt = _sqlite_datetime_parse(dt, tzname) if dt is None: return None if lookup_type == 'week_day': return (dt.isoweekday() % 7) + 1 elif lookup_type == 'week': return dt.isocalendar()[1] elif lookup_type == 'quarter': return math.ceil(dt.month / 3) else: return getattr(dt, lookup_type) def _sqlite_datetime_trunc(lookup_type, dt, tzname): dt = _sqlite_datetime_parse(dt, tzname) if dt is None: return None if lookup_type == 'year': return "%i-01-01 00:00:00" % dt.year elif lookup_type == 'quarter': month_in_quarter = dt.month - (dt.month - 1) % 3 return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter) elif lookup_type == 'month': return "%i-%02i-01 00:00:00" % (dt.year, dt.month) elif lookup_type == 'week': dt = dt - datetime.timedelta(days=dt.weekday()) return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'day': return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'hour': return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour) elif lookup_type == 'minute': return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) elif lookup_type == 'second': return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) def _sqlite_time_extract(lookup_type, dt): if dt is None: return None try: dt = backend_utils.typecast_time(dt) except (ValueError, TypeError): return None return getattr(dt, lookup_type) def _sqlite_format_dtdelta(conn, lhs, rhs): """ LHS and RHS can be either: - An integer number of microseconds - A string representing a datetime """ try: real_lhs = datetime.timedelta(0, 0, lhs) if isinstance(lhs, int) else backend_utils.typecast_timestamp(lhs) real_rhs = datetime.timedelta(0, 0, rhs) if isinstance(rhs, int) else backend_utils.typecast_timestamp(rhs) if conn.strip() == '+': out = real_lhs + real_rhs else: out = real_lhs - real_rhs except (ValueError, TypeError): return None # typecast_timestamp returns a date or a datetime without timezone. # It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]" return str(out) def _sqlite_time_diff(lhs, rhs): left = backend_utils.typecast_time(lhs) right = backend_utils.typecast_time(rhs) return ( (left.hour * 60 * 60 * 1000000) + (left.minute * 60 * 1000000) + (left.second * 1000000) + (left.microsecond) - (right.hour * 60 * 60 * 1000000) - (right.minute * 60 * 1000000) - (right.second * 1000000) - (right.microsecond) ) def _sqlite_timestamp_diff(lhs, rhs): left = backend_utils.typecast_timestamp(lhs) right = backend_utils.typecast_timestamp(rhs) return duration_microseconds(left - right) def _sqlite_regexp(re_pattern, re_string): return bool(re.search(re_pattern, str(re_string))) if re_string is not None else False def _sqlite_lpad(text, length, fill_text): if len(text) >= length: return text[:length] return (fill_text * length)[:length - len(text)] + text def _sqlite_rpad(text, length, fill_text): return (text + fill_text * length)[:length] def _sqlite_power(x, y): return x ** y
7598ef076dce5ba430c1b62ffa2f353b4aa3aacbccd19d0701520d67e116f706
import datetime import decimal import uuid from django.conf import settings from django.core.exceptions import FieldError from django.db import utils from django.db.backends.base.operations import BaseDatabaseOperations from django.db.models import aggregates, fields from django.db.models.expressions import Col from django.utils import timezone from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.duration import duration_microseconds class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'text' cast_data_types = { 'DateField': 'TEXT', 'DateTimeField': 'TEXT', } explain_prefix = 'EXPLAIN QUERY PLAN' def bulk_batch_size(self, fields, objs): """ SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of 999 variables per query. If there's only a single field to insert, the limit is 500 (SQLITE_MAX_COMPOUND_SELECT). """ if len(fields) == 1: return 500 elif len(fields) > 1: return self.connection.features.max_query_params // len(fields) else: return len(objs) def check_expression_support(self, expression): bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField) bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev) if isinstance(expression, bad_aggregates): for expr in expression.get_source_expressions(): try: output_field = expr.output_field except FieldError: # Not every subexpression has an output_field which is fine # to ignore. pass else: if isinstance(output_field, bad_fields): raise utils.NotSupportedError( 'You cannot use Sum, Avg, StdDev, and Variance ' 'aggregations on date/time fields in sqlite3 ' 'since date/time is saved as text.' ) def date_extract_sql(self, lookup_type, field_name): """ Support EXTRACT with a user-defined function django_date_extract() that's registered in connect(). Use single quotes because this is a string and could otherwise cause a collision with a field name. """ return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name) def date_interval_sql(self, timedelta): return str(duration_microseconds(timedelta)) def format_for_duration_arithmetic(self, sql): """Do nothing since formatting is handled in the custom function.""" return sql def date_trunc_sql(self, lookup_type, field_name): return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name) def time_trunc_sql(self, lookup_type, field_name): return "django_time_trunc('%s', %s)" % (lookup_type.lower(), field_name) def _convert_tzname_to_sql(self, tzname): return "'%s'" % tzname if settings.USE_TZ else 'NULL' def datetime_cast_date_sql(self, field_name, tzname): return "django_datetime_cast_date(%s, %s)" % ( field_name, self._convert_tzname_to_sql(tzname), ) def datetime_cast_time_sql(self, field_name, tzname): return "django_datetime_cast_time(%s, %s)" % ( field_name, self._convert_tzname_to_sql(tzname), ) def datetime_extract_sql(self, lookup_type, field_name, tzname): return "django_datetime_extract('%s', %s, %s)" % ( lookup_type.lower(), field_name, self._convert_tzname_to_sql(tzname), ) def datetime_trunc_sql(self, lookup_type, field_name, tzname): return "django_datetime_trunc('%s', %s, %s)" % ( lookup_type.lower(), field_name, self._convert_tzname_to_sql(tzname), ) def time_extract_sql(self, lookup_type, field_name): return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name) def pk_default_value(self): return "NULL" def _quote_params_for_last_executed_query(self, params): """ Only for last_executed_query! Don't use this to execute SQL queries! """ # This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the # number of parameters, default = 999) and SQLITE_MAX_COLUMN (the # number of return values, default = 2000). Since Python's sqlite3 # module doesn't expose the get_limit() C API, assume the default # limits are in effect and split the work in batches if needed. BATCH_SIZE = 999 if len(params) > BATCH_SIZE: results = () for index in range(0, len(params), BATCH_SIZE): chunk = params[index:index + BATCH_SIZE] results += self._quote_params_for_last_executed_query(chunk) return results sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params)) # Bypass Django's wrappers and use the underlying sqlite3 connection # to avoid logging this query - it would trigger infinite recursion. cursor = self.connection.connection.cursor() # Native sqlite3 cursors cannot be used as context managers. try: return cursor.execute(sql, params).fetchone() finally: cursor.close() def last_executed_query(self, cursor, sql, params): # Python substitutes parameters in Modules/_sqlite/cursor.c with: # pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars); # Unfortunately there is no way to reach self->statement from Python, # so we quote and substitute parameters manually. if params: if isinstance(params, (list, tuple)): params = self._quote_params_for_last_executed_query(params) else: values = tuple(params.values()) values = self._quote_params_for_last_executed_query(values) params = dict(zip(params, values)) return sql % params # For consistency with SQLiteCursorWrapper.execute(), just return sql # when there are no parameters. See #13648 and #17158. else: return sql def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def no_limit_value(self): return -1 def sql_flush(self, style, tables, sequences, allow_cascade=False): sql = ['%s %s %s;' % ( style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'), style.SQL_FIELD(self.quote_name(table)) ) for table in tables] # Note: No requirement for reset of auto-incremented indices (cf. other # sql_flush() implementations). Just return SQL at this point return sql def execute_sql_flush(self, using, sql_list): # To prevent possible violation of foreign key constraints, deactivate # constraints outside of the transaction created in super(). with self.connection.constraint_checks_disabled(): super().execute_sql_flush(using, sql_list) def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.") return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): raise ValueError("SQLite backend does not support timezone-aware times.") return str(value) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'DateTimeField': converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'DecimalField': converters.append(self.get_decimalfield_converter(expression)) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) elif internal_type in ('NullBooleanField', 'BooleanField'): converters.append(self.convert_booleanfield_value) return converters def convert_datetimefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.datetime): value = parse_datetime(value) if settings.USE_TZ and not timezone.is_aware(value): value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.date): value = parse_date(value) return value def convert_timefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.time): value = parse_time(value) return value def get_decimalfield_converter(self, expression): # SQLite stores only 15 significant digits. Digits coming from # float inaccuracy must be removed. create_decimal = decimal.Context(prec=15).create_decimal_from_float if isinstance(expression, Col): quantize_value = decimal.Decimal(1).scaleb(-expression.output_field.decimal_places) def converter(value, expression, connection): if value is not None: return create_decimal(value).quantize(quantize_value, context=expression.output_field.context) else: def converter(value, expression, connection): if value is not None: return create_decimal(value) return converter def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def convert_booleanfield_value(self, value, expression, connection): return bool(value) if value in (1, 0) else value def bulk_insert_sql(self, fields, placeholder_rows): return " UNION ALL ".join( "SELECT %s" % ", ".join(row) for row in placeholder_rows ) def combine_expression(self, connector, sub_expressions): # SQLite doesn't have a power function, so we fake it with a # user-defined function django_power that's registered in connect(). if connector == '^': return 'django_power(%s)' % ','.join(sub_expressions) return super().combine_expression(connector, sub_expressions) def combine_duration_expression(self, connector, sub_expressions): if connector not in ['+', '-']: raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector) fn_params = ["'%s'" % connector] + sub_expressions if len(fn_params) > 3: raise ValueError('Too many params for timedelta operations.') return "django_format_dtdelta(%s)" % ', '.join(fn_params) def integer_field_range(self, internal_type): # SQLite doesn't enforce any integer constraints return (None, None) def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs if internal_type == 'TimeField': return "django_time_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params return "django_timestamp_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
7ece12cb4fa675a4afeab58a0e80f946f8b51b6a4cd18aa9ad925ce615fcfc5c
import contextlib import copy from decimal import Decimal from django.apps.registry import Apps from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.backends.ddl_references import Statement from django.db.transaction import atomic from django.db.utils import NotSupportedError class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_delete_table = "DROP TABLE %(table)s" sql_create_fk = None sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED" sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)" sql_delete_unique = "DROP INDEX %(name)s" def __enter__(self): # Some SQLite schema alterations need foreign key constraints to be # disabled. Enforce it here for the duration of the transaction. self.connection.disable_constraint_checking() return super().__enter__() def __exit__(self, exc_type, exc_value, traceback): super().__exit__(exc_type, exc_value, traceback) self.connection.enable_constraint_checking() def quote_value(self, value): # The backend "mostly works" without this function and there are use # cases for compiling Python without the sqlite3 libraries (e.g. # security hardening). try: import sqlite3 value = sqlite3.adapt(value) except ImportError: pass except sqlite3.ProgrammingError: pass # Manual emulation of SQLite parameter quoting if isinstance(value, bool): return str(int(value)) elif isinstance(value, (Decimal, float, int)): return str(value) elif isinstance(value, str): return "'%s'" % value.replace("\'", "\'\'") elif value is None: return "NULL" elif isinstance(value, (bytes, bytearray, memoryview)): # Bytes are only allowed for BLOB fields, encoded as string # literals containing hexadecimal data and preceded by a single "X" # character. return "X'%s'" % value.hex() else: raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value))) def _is_referenced_by_fk_constraint(self, table_name, column_name=None, ignore_self=False): """ Return whether or not the provided table name is referenced by another one. If `column_name` is specified, only references pointing to that column are considered. If `ignore_self` is True, self-referential constraints are ignored. """ with self.connection.cursor() as cursor: for other_table in self.connection.introspection.get_table_list(cursor): if ignore_self and other_table.name == table_name: continue constraints = self.connection.introspection._get_foreign_key_constraints(cursor, other_table.name) for constraint in constraints.values(): constraint_table, constraint_column = constraint['foreign_key'] if (constraint_table == table_name and (column_name is None or constraint_column == column_name)): return True return False def alter_db_table(self, model, old_db_table, new_db_table, disable_constraints=True): if disable_constraints and self._is_referenced_by_fk_constraint(old_db_table): if self.connection.in_atomic_block: raise NotSupportedError(( 'Renaming the %r table while in a transaction is not ' 'supported on SQLite because it would break referential ' 'integrity. Try adding `atomic = False` to the Migration class.' ) % old_db_table) self.connection.enable_constraint_checking() super().alter_db_table(model, old_db_table, new_db_table) self.connection.disable_constraint_checking() else: super().alter_db_table(model, old_db_table, new_db_table) def alter_field(self, model, old_field, new_field, strict=False): old_field_name = old_field.name table_name = model._meta.db_table _, old_column_name = old_field.get_attname_column() if (new_field.name != old_field_name and self._is_referenced_by_fk_constraint(table_name, old_column_name, ignore_self=True)): if self.connection.in_atomic_block: raise NotSupportedError(( 'Renaming the %r.%r column while in a transaction is not ' 'supported on SQLite because it would break referential ' 'integrity. Try adding `atomic = False` to the Migration class.' ) % (model._meta.db_table, old_field_name)) with atomic(self.connection.alias): super().alter_field(model, old_field, new_field, strict=strict) # Follow SQLite's documented procedure for performing changes # that don't affect the on-disk content. # https://sqlite.org/lang_altertable.html#otheralter with self.connection.cursor() as cursor: schema_version = cursor.execute('PRAGMA schema_version').fetchone()[0] cursor.execute('PRAGMA writable_schema = 1') references_template = ' REFERENCES "%s" ("%%s") ' % table_name new_column_name = new_field.get_attname_column()[1] search = references_template % old_column_name replacement = references_template % new_column_name cursor.execute('UPDATE sqlite_master SET sql = replace(sql, %s, %s)', (search, replacement)) cursor.execute('PRAGMA schema_version = %d' % (schema_version + 1)) cursor.execute('PRAGMA writable_schema = 0') # The integrity check will raise an exception and rollback # the transaction if the sqlite_master updates corrupt the # database. cursor.execute('PRAGMA integrity_check') # Perform a VACUUM to refresh the database representation from # the sqlite_master table. with self.connection.cursor() as cursor: cursor.execute('VACUUM') else: super().alter_field(model, old_field, new_field, strict=strict) def _remake_table(self, model, create_field=None, delete_field=None, alter_field=None): """ Shortcut to transform a model from old_model into new_model The essential steps are: 1. rename the model's existing table, e.g. "app_model" to "app_model__old" 2. create a table with the updated definition called "app_model" 3. copy the data from the old renamed table to the new table 4. delete the "app_model__old" table """ # Self-referential fields must be recreated rather than copied from # the old model to ensure their remote_field.field_name doesn't refer # to an altered field. def is_self_referential(f): return f.is_relation and f.remote_field.model is model # Work out the new fields dict / mapping body = { f.name: f.clone() if is_self_referential(f) else f for f in model._meta.local_concrete_fields } # Since mapping might mix column names and default values, # its values must be already quoted. mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields} # This maps field names (not columns) for things like unique_together rename_mapping = {} # If any of the new or altered fields is introducing a new PK, # remove the old one restore_pk_field = None if getattr(create_field, 'primary_key', False) or ( alter_field and getattr(alter_field[1], 'primary_key', False)): for name, field in list(body.items()): if field.primary_key: field.primary_key = False restore_pk_field = field if field.auto_created: del body[name] del mapping[field.column] # Add in any created fields if create_field: body[create_field.name] = create_field # Choose a default and insert it into the copy map if not create_field.many_to_many and create_field.concrete: mapping[create_field.column] = self.quote_value( self.effective_default(create_field) ) # Add in any altered fields if alter_field: old_field, new_field = alter_field body.pop(old_field.name, None) mapping.pop(old_field.column, None) body[new_field.name] = new_field if old_field.null and not new_field.null: case_sql = "coalesce(%(col)s, %(default)s)" % { 'col': self.quote_name(old_field.column), 'default': self.quote_value(self.effective_default(new_field)) } mapping[new_field.column] = case_sql else: mapping[new_field.column] = self.quote_name(old_field.column) rename_mapping[old_field.name] = new_field.name # Remove any deleted fields if delete_field: del body[delete_field.name] del mapping[delete_field.column] # Remove any implicit M2M tables if delete_field.many_to_many and delete_field.remote_field.through._meta.auto_created: return self.delete_model(delete_field.remote_field.through) # Work inside a new app registry apps = Apps() # Provide isolated instances of the fields to the new model body so # that the existing model's internals aren't interfered with when # the dummy model is constructed. body = copy.deepcopy(body) # Work out the new value of unique_together, taking renames into # account unique_together = [ [rename_mapping.get(n, n) for n in unique] for unique in model._meta.unique_together ] # Work out the new value for index_together, taking renames into # account index_together = [ [rename_mapping.get(n, n) for n in index] for index in model._meta.index_together ] indexes = model._meta.indexes if delete_field: indexes = [ index for index in indexes if delete_field.name not in index.fields ] # Construct a new model for the new state meta_contents = { 'app_label': model._meta.app_label, 'db_table': model._meta.db_table, 'unique_together': unique_together, 'index_together': index_together, 'indexes': indexes, 'apps': apps, } meta = type("Meta", (), meta_contents) body['Meta'] = meta body['__module__'] = model.__module__ temp_model = type(model._meta.object_name, model.__bases__, body) # We need to modify model._meta.db_table, but everything explodes # if the change isn't reversed before the end of this method. This # context manager helps us avoid that situation. @contextlib.contextmanager def altered_table_name(model, temporary_table_name): original_table_name = model._meta.db_table model._meta.db_table = temporary_table_name yield model._meta.db_table = original_table_name with altered_table_name(model, model._meta.db_table + "__old"): # Rename the old table to make way for the new self.alter_db_table( model, temp_model._meta.db_table, model._meta.db_table, disable_constraints=False, ) # Create a new table with the updated schema. self.create_model(temp_model) # Copy data from the old table into the new table field_maps = list(mapping.items()) self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(temp_model._meta.db_table), ', '.join(self.quote_name(x) for x, y in field_maps), ', '.join(y for x, y in field_maps), self.quote_name(model._meta.db_table), )) # Delete the old table self.delete_model(model, handle_autom2m=False) # Run deferred SQL on correct table for sql in self.deferred_sql: self.execute(sql) self.deferred_sql = [] # Fix any PK-removed field if restore_pk_field: restore_pk_field.primary_key = True def delete_model(self, model, handle_autom2m=True): if handle_autom2m: super().delete_model(model) else: # Delete the table (and only that) self.execute(self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), }) # Remove all deferred statements referencing the deleted table. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_table(model._meta.db_table): self.deferred_sql.remove(sql) def add_field(self, model, field): """ Create a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields). """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.create_model(field.remote_field.through) self._remake_table(model, create_field=field) def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # M2M fields are a special case if field.many_to_many: # For implicit M2M tables, delete the auto-created table if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # For explicit "through" M2M fields, do nothing # For everything else, remake. else: # It might not actually have a column behind it if field.db_parameters(connection=self.connection)['type'] is None: return self._remake_table(model, delete_field=field) def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): """Perform a "physical" (non-ManyToMany) field update.""" # Alter by remaking table self._remake_table(model, alter_field=(old_field, new_field)) # Rebuild tables with FKs pointing to this field if the PK type changed. if old_field.primary_key and new_field.primary_key and old_type != new_type: for rel in new_field.model._meta.related_objects: if not rel.many_to_many: self._remake_table(rel.related_model) def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table: # The field name didn't change, but some options did; we have to propagate this altering. self._remake_table( old_field.remote_field.through, alter_field=( # We need the field that points to the target model, so we can tell alter_field to change it - # this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model) old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()), ), ) return # Make a new through table self.create_model(new_field.remote_field.through) # Copy the data across self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_field.remote_field.through._meta.db_table), ', '.join([ "id", new_field.m2m_column_name(), new_field.m2m_reverse_name(), ]), ', '.join([ "id", old_field.m2m_column_name(), old_field.m2m_reverse_name(), ]), self.quote_name(old_field.remote_field.through._meta.db_table), )) # Delete the old through table self.delete_model(old_field.remote_field.through)
c224a3dabe2c086fa7dd2c05289576995130f461578b215b1e00ff21cfc38f10
import subprocess from django.db.backends.base.client import BaseDatabaseClient class DatabaseClient(BaseDatabaseClient): executable_name = 'sqlite3' def runshell(self): args = [self.executable_name, self.connection.settings_dict['NAME']] subprocess.check_call(args)
2da796e65da010d7a97fcb622157e4ef01ed301220a8659c137bd688c975c705
import os import shutil import sys from django.db.backends.base.creation import BaseDatabaseCreation class DatabaseCreation(BaseDatabaseCreation): @staticmethod def is_in_memory_db(database_name): return database_name == ':memory:' or 'mode=memory' in database_name def _get_test_db_name(self): test_database_name = self.connection.settings_dict['TEST']['NAME'] or ':memory:' if test_database_name == ':memory:': return 'file:memorydb_%s?mode=memory&cache=shared' % self.connection.alias return test_database_name def _create_test_db(self, verbosity, autoclobber, keepdb=False): test_database_name = self._get_test_db_name() if keepdb: return test_database_name if not self.is_in_memory_db(test_database_name): # Erase the old test database if verbosity >= 1: print("Destroying old test database for alias %s..." % ( self._get_database_display_str(verbosity, test_database_name), )) if os.access(test_database_name, os.F_OK): if not autoclobber: confirm = input( "Type 'yes' if you would like to try deleting the test " "database '%s', or 'no' to cancel: " % test_database_name ) if autoclobber or confirm == 'yes': try: os.remove(test_database_name) except Exception as e: sys.stderr.write("Got an error deleting the old test database: %s\n" % e) sys.exit(2) else: print("Tests cancelled.") sys.exit(1) return test_database_name def get_test_db_clone_settings(self, suffix): orig_settings_dict = self.connection.settings_dict source_database_name = orig_settings_dict['NAME'] if self.is_in_memory_db(source_database_name): return orig_settings_dict else: root, ext = os.path.splitext(orig_settings_dict['NAME']) return {**orig_settings_dict, 'NAME': '{}_{}.{}'.format(root, suffix, ext)} def _clone_test_db(self, suffix, verbosity, keepdb=False): source_database_name = self.connection.settings_dict['NAME'] target_database_name = self.get_test_db_clone_settings(suffix)['NAME'] # Forking automatically makes a copy of an in-memory database. if not self.is_in_memory_db(source_database_name): # Erase the old test database if os.access(target_database_name, os.F_OK): if keepdb: return if verbosity >= 1: print("Destroying old test database for alias %s..." % ( self._get_database_display_str(verbosity, target_database_name), )) try: os.remove(target_database_name) except Exception as e: sys.stderr.write("Got an error deleting the old test database: %s\n" % e) sys.exit(2) try: shutil.copy(source_database_name, target_database_name) except Exception as e: sys.stderr.write("Got an error cloning the test database: %s\n" % e) sys.exit(2) def _destroy_test_db(self, test_database_name, verbosity): if test_database_name and not self.is_in_memory_db(test_database_name): # Remove the SQLite database file os.remove(test_database_name) def test_db_signature(self): """ Return a tuple that uniquely identifies a test database. This takes into account the special cases of ":memory:" and "" for SQLite since the databases will be distinct despite having the same TEST NAME. See http://www.sqlite.org/inmemorydb.html """ test_database_name = self._get_test_db_name() sig = [self.connection.settings_dict['NAME']] if self.is_in_memory_db(test_database_name): sig.append(self.connection.alias) return tuple(sig)
670c88de000b29797d138059dc7d0bb69d5945269d07517ce7ee57b4161051a5
""" HTTP server that implements the Python WSGI protocol (PEP 333, rev 1.21). Based on wsgiref.simple_server which is part of the standard library since 2.5. This is a simple server for use in testing or debugging Django apps. It hasn't been reviewed for security issues. DON'T USE IT FOR PRODUCTION USE! """ import logging import socket import socketserver import sys from wsgiref import simple_server from django.core.exceptions import ImproperlyConfigured from django.core.wsgi import get_wsgi_application from django.utils.module_loading import import_string __all__ = ('WSGIServer', 'WSGIRequestHandler') logger = logging.getLogger('django.server') def get_internal_wsgi_application(): """ Load and return the WSGI application as configured by the user in ``settings.WSGI_APPLICATION``. With the default ``startproject`` layout, this will be the ``application`` object in ``projectname/wsgi.py``. This function, and the ``WSGI_APPLICATION`` setting itself, are only useful for Django's internal server (runserver); external WSGI servers should just be configured to point to the correct application object directly. If settings.WSGI_APPLICATION is not set (is ``None``), return whatever ``django.core.wsgi.get_wsgi_application`` returns. """ from django.conf import settings app_path = getattr(settings, 'WSGI_APPLICATION') if app_path is None: return get_wsgi_application() try: return import_string(app_path) except ImportError as err: raise ImproperlyConfigured( "WSGI application '%s' could not be loaded; " "Error importing module." % app_path ) from err def is_broken_pipe_error(): exc_type, exc_value = sys.exc_info()[:2] return issubclass(exc_type, socket.error) and exc_value.args[0] == 32 class WSGIServer(simple_server.WSGIServer): """BaseHTTPServer that implements the Python WSGI protocol""" request_queue_size = 10 def __init__(self, *args, ipv6=False, allow_reuse_address=True, **kwargs): if ipv6: self.address_family = socket.AF_INET6 self.allow_reuse_address = allow_reuse_address super().__init__(*args, **kwargs) def handle_error(self, request, client_address): if is_broken_pipe_error(): logger.info("- Broken pipe from %s\n", client_address) else: super().handle_error(request, client_address) class ThreadedWSGIServer(socketserver.ThreadingMixIn, WSGIServer): """A threaded version of the WSGIServer""" pass class ServerHandler(simple_server.ServerHandler): http_version = '1.1' def handle_error(self): # Ignore broken pipe errors, otherwise pass on if not is_broken_pipe_error(): super().handle_error() class WSGIRequestHandler(simple_server.WSGIRequestHandler): protocol_version = 'HTTP/1.1' def address_string(self): # Short-circuit parent method to not call socket.getfqdn return self.client_address[0] def log_message(self, format, *args): extra = { 'request': self.request, 'server_time': self.log_date_time_string(), } if args[1][0] == '4': # 0x16 = Handshake, 0x03 = SSL 3.0 or TLS 1.x if args[0].startswith('\x16\x03'): extra['status_code'] = 500 logger.error( "You're accessing the development server over HTTPS, but " "it only supports HTTP.\n", extra=extra, ) return if args[1].isdigit() and len(args[1]) == 3: status_code = int(args[1]) extra['status_code'] = status_code if status_code >= 500: level = logger.error elif status_code >= 400: level = logger.warning else: level = logger.info else: level = logger.info level(format, *args, extra=extra) def get_environ(self): # Strip all headers with underscores in the name before constructing # the WSGI environ. This prevents header-spoofing based on ambiguity # between underscores and dashes both normalized to underscores in WSGI # env vars. Nginx and Apache 2.4+ both do this as well. for k in self.headers: if '_' in k: del self.headers[k] return super().get_environ() def handle(self): """Copy of WSGIRequestHandler.handle() but with different ServerHandler""" self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' self.send_error(414) return if not self.parse_request(): # An error code has been sent, just exit return handler = ServerHandler( self.rfile, self.wfile, self.get_stderr(), self.get_environ() ) handler.request_handler = self # backpointer for logging handler.run(self.server.get_app()) def run(addr, port, wsgi_handler, ipv6=False, threading=False, server_cls=WSGIServer): server_address = (addr, port) if threading: httpd_cls = type('WSGIServer', (socketserver.ThreadingMixIn, server_cls), {}) else: httpd_cls = server_cls httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=ipv6) if threading: # ThreadingMixIn.daemon_threads indicates how threads will behave on an # abrupt shutdown; like quitting the server by the user or restarting # by the auto-reloader. True means the server will not wait for thread # termination before it quits. This will make auto-reloader faster # and will prevent the need to kill the server manually if a thread # isn't terminating correctly. httpd.daemon_threads = True httpd.set_app(wsgi_handler) httpd.serve_forever()
8d1d52e63f94b5131b4046291c063ec9f7e9e9008ac6781bcea8ba300d6d3b02
""" The temp module provides a NamedTemporaryFile that can be reopened in the same process on any platform. Most platforms use the standard Python tempfile.NamedTemporaryFile class, but Windows users are given a custom class. This is needed because the Python implementation of NamedTemporaryFile uses the O_TEMPORARY flag under Windows, which prevents the file from being reopened if the same flag is not provided [1][2]. Note that this does not address the more general issue of opening a file for writing and reading in multiple processes in a manner that works across platforms. The custom version of NamedTemporaryFile doesn't support the same keyword arguments available in tempfile.NamedTemporaryFile. 1: https://mail.python.org/pipermail/python-list/2005-December/336957.html 2: http://bugs.python.org/issue14243 """ import os import tempfile from django.core.files.utils import FileProxyMixin __all__ = ('NamedTemporaryFile', 'gettempdir',) if os.name == 'nt': class TemporaryFile(FileProxyMixin): """ Temporary file object constructor that supports reopening of the temporary file in Windows. Unlike tempfile.NamedTemporaryFile from the standard library, __init__() doesn't support the 'delete', 'buffering', 'encoding', or 'newline' keyword arguments. """ def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='', dir=None): fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir) self.name = name self.file = os.fdopen(fd, mode, bufsize) self.close_called = False # Because close can be called during shutdown # we need to cache os.unlink and access it # as self.unlink only unlink = os.unlink def close(self): if not self.close_called: self.close_called = True try: self.file.close() except (OSError, IOError): pass try: self.unlink(self.name) except OSError: pass def __del__(self): self.close() def __enter__(self): self.file.__enter__() return self def __exit__(self, exc, value, tb): self.file.__exit__(exc, value, tb) NamedTemporaryFile = TemporaryFile else: NamedTemporaryFile = tempfile.NamedTemporaryFile gettempdir = tempfile.gettempdir
3a36a514bbecfaf3da4c4def3f4798656c8dc0c8fda4b2593601b801c1a7dbf6
from django.core.files.base import File __all__ = ['File']
0ed9541764f45a40c2adfe9f5269ab89f7d7ffc6992f3c09bd1667ec20acbea8
""" Base file upload handler classes, and the built-in concrete subclasses """ from io import BytesIO from django.conf import settings from django.core.files.uploadedfile import ( InMemoryUploadedFile, TemporaryUploadedFile, ) from django.utils.module_loading import import_string __all__ = [ 'UploadFileException', 'StopUpload', 'SkipFile', 'FileUploadHandler', 'TemporaryFileUploadHandler', 'MemoryFileUploadHandler', 'load_handler', 'StopFutureHandlers' ] class UploadFileException(Exception): """ Any error having to do with uploading files. """ pass class StopUpload(UploadFileException): """ This exception is raised when an upload must abort. """ def __init__(self, connection_reset=False): """ If ``connection_reset`` is ``True``, Django knows will halt the upload without consuming the rest of the upload. This will cause the browser to show a "connection reset" error. """ self.connection_reset = connection_reset def __str__(self): if self.connection_reset: return 'StopUpload: Halt current upload.' else: return 'StopUpload: Consume request data, then halt.' class SkipFile(UploadFileException): """ This exception is raised by an upload handler that wants to skip a given file. """ pass class StopFutureHandlers(UploadFileException): """ Upload handers that have handled a file and do not want future handlers to run should raise this exception instead of returning None. """ pass class FileUploadHandler: """ Base class for streaming upload handlers. """ chunk_size = 64 * 2 ** 10 # : The default chunk size is 64 KB. def __init__(self, request=None): self.file_name = None self.content_type = None self.content_length = None self.charset = None self.content_type_extra = None self.request = request def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None): """ Handle the raw input from the client. Parameters: :input_data: An object that supports reading via .read(). :META: ``request.META``. :content_length: The (integer) value of the Content-Length header from the client. :boundary: The boundary from the Content-Type header. Be sure to prepend two '--'. """ pass def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None): """ Signal that a new file has been started. Warning: As with any data from the client, you should not trust content_length (and sometimes won't even get it). """ self.field_name = field_name self.file_name = file_name self.content_type = content_type self.content_length = content_length self.charset = charset self.content_type_extra = content_type_extra def receive_data_chunk(self, raw_data, start): """ Receive data from the streamed upload parser. ``start`` is the position in the file of the chunk. """ raise NotImplementedError('subclasses of FileUploadHandler must provide a receive_data_chunk() method') def file_complete(self, file_size): """ Signal that a file has completed. File size corresponds to the actual size accumulated by all the chunks. Subclasses should return a valid ``UploadedFile`` object. """ raise NotImplementedError('subclasses of FileUploadHandler must provide a file_complete() method') def upload_complete(self): """ Signal that the upload is complete. Subclasses should perform cleanup that is necessary for this handler. """ pass class TemporaryFileUploadHandler(FileUploadHandler): """ Upload handler that streams data into a temporary file. """ def new_file(self, *args, **kwargs): """ Create the file object to append to as data is coming in. """ super().new_file(*args, **kwargs) self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra) def receive_data_chunk(self, raw_data, start): self.file.write(raw_data) def file_complete(self, file_size): self.file.seek(0) self.file.size = file_size return self.file class MemoryFileUploadHandler(FileUploadHandler): """ File upload handler to stream uploads into memory (used for small files). """ def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None): """ Use the content_length to signal whether or not this handler should be used. """ # Check the content-length header to see if we should # If the post is too large, we cannot use the Memory handler. self.activated = content_length <= settings.FILE_UPLOAD_MAX_MEMORY_SIZE def new_file(self, *args, **kwargs): super().new_file(*args, **kwargs) if self.activated: self.file = BytesIO() raise StopFutureHandlers() def receive_data_chunk(self, raw_data, start): """Add the data to the BytesIO file.""" if self.activated: self.file.write(raw_data) else: return raw_data def file_complete(self, file_size): """Return a file object if this handler is activated.""" if not self.activated: return self.file.seek(0) return InMemoryUploadedFile( file=self.file, field_name=self.field_name, name=self.file_name, content_type=self.content_type, size=file_size, charset=self.charset, content_type_extra=self.content_type_extra ) def load_handler(path, *args, **kwargs): """ Given a path to a handler, return an instance of that handler. E.g.:: >>> from django.http import HttpRequest >>> request = HttpRequest() >>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request) <TemporaryFileUploadHandler object at 0x...> """ return import_string(path)(*args, **kwargs)
0fddc9f3b9cb5bab0a59b0673fb885633e2503a60b7eed75740bbcca04ca29ed
import os from io import BytesIO, StringIO, UnsupportedOperation from django.core.files.utils import FileProxyMixin from django.utils.functional import cached_property class File(FileProxyMixin): DEFAULT_CHUNK_SIZE = 64 * 2 ** 10 def __init__(self, file, name=None): self.file = file if name is None: name = getattr(file, 'name', None) self.name = name if hasattr(file, 'mode'): self.mode = file.mode def __str__(self): return self.name or '' def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self or "None") def __bool__(self): return bool(self.name) def __len__(self): return self.size @cached_property def size(self): if hasattr(self.file, 'size'): return self.file.size if hasattr(self.file, 'name'): try: return os.path.getsize(self.file.name) except (OSError, TypeError): pass if hasattr(self.file, 'tell') and hasattr(self.file, 'seek'): pos = self.file.tell() self.file.seek(0, os.SEEK_END) size = self.file.tell() self.file.seek(pos) return size raise AttributeError("Unable to determine the file's size.") def chunks(self, chunk_size=None): """ Read the file and yield chunks of ``chunk_size`` bytes (defaults to ``File.DEFAULT_CHUNK_SIZE``). """ chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE try: self.seek(0) except (AttributeError, UnsupportedOperation): pass while True: data = self.read(chunk_size) if not data: break yield data def multiple_chunks(self, chunk_size=None): """ Return ``True`` if you can expect multiple chunks. NB: If a particular file representation is in memory, subclasses should always return ``False`` -- there's no good reason to read from memory in chunks. """ return self.size > (chunk_size or self.DEFAULT_CHUNK_SIZE) def __iter__(self): # Iterate over this file-like object by newlines buffer_ = None for chunk in self.chunks(): for line in chunk.splitlines(True): if buffer_: if endswith_cr(buffer_) and not equals_lf(line): # Line split after a \r newline; yield buffer_. yield buffer_ # Continue with line. else: # Line either split without a newline (line # continues after buffer_) or with \r\n # newline (line == b'\n'). line = buffer_ + line # buffer_ handled, clear it. buffer_ = None # If this is the end of a \n or \r\n line, yield. if endswith_lf(line): yield line else: buffer_ = line if buffer_ is not None: yield buffer_ def __enter__(self): return self def __exit__(self, exc_type, exc_value, tb): self.close() def open(self, mode=None): if not self.closed: self.seek(0) elif self.name and os.path.exists(self.name): self.file = open(self.name, mode or self.mode) else: raise ValueError("The file cannot be reopened.") return self def close(self): self.file.close() class ContentFile(File): """ A File-like object that take just raw content, rather than an actual file. """ def __init__(self, content, name=None): stream_class = StringIO if isinstance(content, str) else BytesIO super().__init__(stream_class(content), name=name) self.size = len(content) def __str__(self): return 'Raw content' def __bool__(self): return True def open(self, mode=None): self.seek(0) return self def close(self): pass def write(self, data): self.__dict__.pop('size', None) # Clear the computed size. return self.file.write(data) def endswith_cr(line): """Return True if line (a text or byte string) ends with '\r'.""" return line.endswith('\r' if isinstance(line, str) else b'\r') def endswith_lf(line): """Return True if line (a text or byte string) ends with '\n'.""" return line.endswith('\n' if isinstance(line, str) else b'\n') def equals_lf(line): """Return True if line (a text or byte string) equals '\n'.""" return line == ('\n' if isinstance(line, str) else b'\n')
d51df14c9288f4f968d314649f3f94bf6fd0411c3fbe6afead411cad3265a0c0
""" Move a file in the safest way possible:: >>> from django.core.files.move import file_move_safe >>> file_move_safe("/tmp/old_file", "/tmp/new_file") """ import errno import os from shutil import copystat from django.core.files import locks __all__ = ['file_move_safe'] def _samefile(src, dst): # Macintosh, Unix. if hasattr(os.path, 'samefile'): try: return os.path.samefile(src, dst) except OSError: return False # All other platforms: check for same pathname. return (os.path.normcase(os.path.abspath(src)) == os.path.normcase(os.path.abspath(dst))) def file_move_safe(old_file_name, new_file_name, chunk_size=1024 * 64, allow_overwrite=False): """ Move a file from one location to another in the safest way possible. First, try ``os.rename``, which is simple but will break across filesystems. If that fails, stream manually from one file to another in pure Python. If the destination file exists and ``allow_overwrite`` is ``False``, raise ``IOError``. """ # There's no reason to move if we don't have to. if _samefile(old_file_name, new_file_name): return try: if not allow_overwrite and os.access(new_file_name, os.F_OK): raise IOError("Destination file %s exists and allow_overwrite is False" % new_file_name) os.rename(old_file_name, new_file_name) return except OSError: # OSError happens with os.rename() if moving to another filesystem or # when moving opened files on certain operating systems. pass # first open the old file, so that it won't go away with open(old_file_name, 'rb') as old_file: # now open the new file, not forgetting allow_overwrite fd = os.open(new_file_name, (os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0) | (os.O_EXCL if not allow_overwrite else 0))) try: locks.lock(fd, locks.LOCK_EX) current_chunk = None while current_chunk != b'': current_chunk = old_file.read(chunk_size) os.write(fd, current_chunk) finally: locks.unlock(fd) os.close(fd) try: copystat(old_file_name, new_file_name) except PermissionError as e: # Certain filesystems (e.g. CIFS) fail to copy the file's metadata if # the type of the destination filesystem isn't the same as the source # filesystem; ignore that. if e.errno != errno.EPERM: raise try: os.remove(old_file_name) except PermissionError as e: # Certain operating systems (Cygwin and Windows) # fail when deleting opened files, ignore it. (For the # systems where this happens, temporary files will be auto-deleted # on close anyway.) if getattr(e, 'winerror', 0) != 32: raise
fe59e1be4c275c9fc38a1ea7348026f54bd3c3139dec248f8b83233d5c4cc9b6
""" Utility functions for handling images. Requires Pillow as you might imagine. """ import struct import zlib from django.core.files import File class ImageFile(File): """ A mixin for use alongside django.core.files.base.File, which provides additional features for dealing with images. """ @property def width(self): return self._get_image_dimensions()[0] @property def height(self): return self._get_image_dimensions()[1] def _get_image_dimensions(self): if not hasattr(self, '_dimensions_cache'): close = self.closed self.open() self._dimensions_cache = get_image_dimensions(self, close=close) return self._dimensions_cache def get_image_dimensions(file_or_path, close=False): """ Return the (width, height) of an image, given an open file or a path. Set 'close' to True to close the file at the end if it is initially in an open state. """ from PIL import ImageFile as PillowImageFile p = PillowImageFile.Parser() if hasattr(file_or_path, 'read'): file = file_or_path file_pos = file.tell() file.seek(0) else: file = open(file_or_path, 'rb') close = True try: # Most of the time Pillow only needs a small chunk to parse the image # and get the dimensions, but with some TIFF files Pillow needs to # parse the whole file. chunk_size = 1024 while 1: data = file.read(chunk_size) if not data: break try: p.feed(data) except zlib.error as e: # ignore zlib complaining on truncated stream, just feed more # data to parser (ticket #19457). if e.args[0].startswith("Error -5"): pass else: raise except struct.error: # Ignore PIL failing on a too short buffer when reads return # less bytes than expected. Skip and feed more data to the # parser (ticket #24544). pass if p.image: return p.image.size chunk_size *= 2 return (None, None) finally: if close: file.close() else: file.seek(file_pos)
e40772e89bb30986ff540b52c1cf43afe8939a9b8c215b89dd129c935fac6339
class FileProxyMixin: """ A mixin class used to forward file methods to an underlaying file object. The internal file object has to be called "file":: class FileProxy(FileProxyMixin): def __init__(self, file): self.file = file """ encoding = property(lambda self: self.file.encoding) fileno = property(lambda self: self.file.fileno) flush = property(lambda self: self.file.flush) isatty = property(lambda self: self.file.isatty) newlines = property(lambda self: self.file.newlines) read = property(lambda self: self.file.read) readinto = property(lambda self: self.file.readinto) readline = property(lambda self: self.file.readline) readlines = property(lambda self: self.file.readlines) seek = property(lambda self: self.file.seek) tell = property(lambda self: self.file.tell) truncate = property(lambda self: self.file.truncate) write = property(lambda self: self.file.write) writelines = property(lambda self: self.file.writelines) @property def closed(self): return not self.file or self.file.closed def readable(self): if self.closed: return False if hasattr(self.file, 'readable'): return self.file.readable() return True def writable(self): if self.closed: return False if hasattr(self.file, 'writable'): return self.file.writable() return 'w' in getattr(self.file, 'mode', '') def seekable(self): if self.closed: return False if hasattr(self.file, 'seekable'): return self.file.seekable() return True def __iter__(self): return iter(self.file)
77365c0b539614c2b9d96a7a8f3546228f8c61b9248521ce85147825981aa101
""" Classes representing uploaded files. """ import os from io import BytesIO from django.conf import settings from django.core.files import temp as tempfile from django.core.files.base import File __all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile', 'SimpleUploadedFile') class UploadedFile(File): """ An abstract uploaded file (``TemporaryUploadedFile`` and ``InMemoryUploadedFile`` are the built-in concrete subclasses). An ``UploadedFile`` object behaves somewhat like a file object and represents some file data that the user submitted with a form. """ def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None): super().__init__(file, name) self.size = size self.content_type = content_type self.charset = charset self.content_type_extra = content_type_extra def __repr__(self): return "<%s: %s (%s)>" % (self.__class__.__name__, self.name, self.content_type) def _get_name(self): return self._name def _set_name(self, name): # Sanitize the file name so that it can't be dangerous. if name is not None: # Just use the basename of the file -- anything else is dangerous. name = os.path.basename(name) # File names longer than 255 characters can cause problems on older OSes. if len(name) > 255: name, ext = os.path.splitext(name) ext = ext[:255] name = name[:255 - len(ext)] + ext self._name = name name = property(_get_name, _set_name) class TemporaryUploadedFile(UploadedFile): """ A file uploaded to a temporary location (i.e. stream-to-disk). """ def __init__(self, name, content_type, size, charset, content_type_extra=None): _, ext = os.path.splitext(name) file = tempfile.NamedTemporaryFile(suffix='.upload' + ext, dir=settings.FILE_UPLOAD_TEMP_DIR) super().__init__(file, name, content_type, size, charset, content_type_extra) def temporary_file_path(self): """Return the full path of this file.""" return self.file.name def close(self): try: return self.file.close() except FileNotFoundError: # The file was moved or deleted before the tempfile could unlink # it. Still sets self.file.close_called and calls # self.file.file.close() before the exception. pass class InMemoryUploadedFile(UploadedFile): """ A file uploaded into memory (i.e. stream-to-memory). """ def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None): super().__init__(file, name, content_type, size, charset, content_type_extra) self.field_name = field_name def open(self, mode=None): self.file.seek(0) return self def chunks(self, chunk_size=None): self.file.seek(0) yield self.read() def multiple_chunks(self, chunk_size=None): # Since it's in memory, we'll never have multiple chunks. return False class SimpleUploadedFile(InMemoryUploadedFile): """ A simple representation of a file, which just has content, size, and a name. """ def __init__(self, name, content, content_type='text/plain'): content = content or b'' super().__init__(BytesIO(content), None, name, content_type, len(content), None, None) @classmethod def from_dict(cls, file_dict): """ Create a SimpleUploadedFile object from a dictionary with keys: - filename - content-type - content """ return cls(file_dict['filename'], file_dict['content'], file_dict.get('content-type', 'text/plain'))
3eb0b7b442399acdecd158bb70a29b3dd3a65016cef33796465e944662d244dc
import os from datetime import datetime from urllib.parse import urljoin from django.conf import settings from django.core.exceptions import SuspiciousFileOperation from django.core.files import File, locks from django.core.files.move import file_move_safe from django.core.signals import setting_changed from django.utils import timezone from django.utils._os import safe_join from django.utils.crypto import get_random_string from django.utils.deconstruct import deconstructible from django.utils.encoding import filepath_to_uri from django.utils.functional import LazyObject, cached_property from django.utils.module_loading import import_string from django.utils.text import get_valid_filename __all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage') class Storage: """ A base storage class, providing some default behaviors that all other storage systems can inherit or override, as necessary. """ # The following methods represent a public interface to private methods. # These shouldn't be overridden by subclasses unless absolutely necessary. def open(self, name, mode='rb'): """Retrieve the specified file from storage.""" return self._open(name, mode) def save(self, name, content, max_length=None): """ Save new content to the file specified by name. The content should be a proper File object or any python file-like object, ready to be read from the beginning. """ # Get the proper name for the file, as it will actually be saved. if name is None: name = content.name if not hasattr(content, 'chunks'): content = File(content, name) name = self.get_available_name(name, max_length=max_length) return self._save(name, content) # These methods are part of the public API, with default implementations. def get_valid_name(self, name): """ Return a filename, based on the provided filename, that's suitable for use in the target storage system. """ return get_valid_filename(name) def get_available_name(self, name, max_length=None): """ Return a filename that's free on the target storage system and available for new content to be written to. """ dir_name, file_name = os.path.split(name) file_root, file_ext = os.path.splitext(file_name) # If the filename already exists, add an underscore and a random 7 # character alphanumeric string (before the file extension, if one # exists) to the filename until the generated filename doesn't exist. # Truncate original name if required, so the new filename does not # exceed the max_length. while self.exists(name) or (max_length and len(name) > max_length): # file_ext includes the dot. name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext)) if max_length is None: continue # Truncate file_root if max_length exceeded. truncation = len(name) - max_length if truncation > 0: file_root = file_root[:-truncation] # Entire file_root was truncated in attempt to find an available filename. if not file_root: raise SuspiciousFileOperation( 'Storage can not find an available filename for "%s". ' 'Please make sure that the corresponding file field ' 'allows sufficient "max_length".' % name ) name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext)) return name def generate_filename(self, filename): """ Validate the filename by calling get_valid_name() and return a filename to be passed to the save() method. """ # `filename` may include a path as returned by FileField.upload_to. dirname, filename = os.path.split(filename) return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename))) def path(self, name): """ Return a local filesystem path where the file can be retrieved using Python's built-in open() function. Storage systems that can't be accessed using open() should *not* implement this method. """ raise NotImplementedError("This backend doesn't support absolute paths.") # The following methods form the public API for storage systems, but with # no default implementations. Subclasses must implement *all* of these. def delete(self, name): """ Delete the specified file from the storage system. """ raise NotImplementedError('subclasses of Storage must provide a delete() method') def exists(self, name): """ Return True if a file referenced by the given name already exists in the storage system, or False if the name is available for a new file. """ raise NotImplementedError('subclasses of Storage must provide an exists() method') def listdir(self, path): """ List the contents of the specified path. Return a 2-tuple of lists: the first item being directories, the second item being files. """ raise NotImplementedError('subclasses of Storage must provide a listdir() method') def size(self, name): """ Return the total size, in bytes, of the file specified by name. """ raise NotImplementedError('subclasses of Storage must provide a size() method') def url(self, name): """ Return an absolute URL where the file's contents can be accessed directly by a Web browser. """ raise NotImplementedError('subclasses of Storage must provide a url() method') def get_accessed_time(self, name): """ Return the last accessed time (as a datetime) of the file specified by name. The datetime will be timezone-aware if USE_TZ=True. """ raise NotImplementedError('subclasses of Storage must provide a get_accessed_time() method') def get_created_time(self, name): """ Return the creation time (as a datetime) of the file specified by name. The datetime will be timezone-aware if USE_TZ=True. """ raise NotImplementedError('subclasses of Storage must provide a get_created_time() method') def get_modified_time(self, name): """ Return the last modified time (as a datetime) of the file specified by name. The datetime will be timezone-aware if USE_TZ=True. """ raise NotImplementedError('subclasses of Storage must provide a get_modified_time() method') @deconstructible class FileSystemStorage(Storage): """ Standard filesystem storage """ def __init__(self, location=None, base_url=None, file_permissions_mode=None, directory_permissions_mode=None): self._location = location self._base_url = base_url self._file_permissions_mode = file_permissions_mode self._directory_permissions_mode = directory_permissions_mode setting_changed.connect(self._clear_cached_properties) def _clear_cached_properties(self, setting, **kwargs): """Reset setting based property values.""" if setting == 'MEDIA_ROOT': self.__dict__.pop('base_location', None) self.__dict__.pop('location', None) elif setting == 'MEDIA_URL': self.__dict__.pop('base_url', None) elif setting == 'FILE_UPLOAD_PERMISSIONS': self.__dict__.pop('file_permissions_mode', None) elif setting == 'FILE_UPLOAD_DIRECTORY_PERMISSIONS': self.__dict__.pop('directory_permissions_mode', None) def _value_or_setting(self, value, setting): return setting if value is None else value @cached_property def base_location(self): return self._value_or_setting(self._location, settings.MEDIA_ROOT) @cached_property def location(self): return os.path.abspath(self.base_location) @cached_property def base_url(self): if self._base_url is not None and not self._base_url.endswith('/'): self._base_url += '/' return self._value_or_setting(self._base_url, settings.MEDIA_URL) @cached_property def file_permissions_mode(self): return self._value_or_setting(self._file_permissions_mode, settings.FILE_UPLOAD_PERMISSIONS) @cached_property def directory_permissions_mode(self): return self._value_or_setting(self._directory_permissions_mode, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS) def _open(self, name, mode='rb'): return File(open(self.path(name), mode)) def _save(self, name, content): full_path = self.path(name) # Create any intermediate directories that do not exist. directory = os.path.dirname(full_path) if not os.path.exists(directory): try: if self.directory_permissions_mode is not None: # os.makedirs applies the global umask, so we reset it, # for consistency with file_permissions_mode behavior. old_umask = os.umask(0) try: os.makedirs(directory, self.directory_permissions_mode) finally: os.umask(old_umask) else: os.makedirs(directory) except FileNotFoundError: # There's a race between os.path.exists() and os.makedirs(). # If os.makedirs() fails with FileNotFoundError, the directory # was created concurrently. pass if not os.path.isdir(directory): raise IOError("%s exists and is not a directory." % directory) # There's a potential race condition between get_available_name and # saving the file; it's possible that two threads might return the # same name, at which point all sorts of fun happens. So we need to # try to create the file, but if it already exists we have to go back # to get_available_name() and try again. while True: try: # This file has a file path that we can move. if hasattr(content, 'temporary_file_path'): file_move_safe(content.temporary_file_path(), full_path) # This is a normal uploadedfile that we can stream. else: # This fun binary flag incantation makes os.open throw an # OSError if the file already exists before we open it. flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL | getattr(os, 'O_BINARY', 0)) # The current umask value is masked out by os.open! fd = os.open(full_path, flags, 0o666) _file = None try: locks.lock(fd, locks.LOCK_EX) for chunk in content.chunks(): if _file is None: mode = 'wb' if isinstance(chunk, bytes) else 'wt' _file = os.fdopen(fd, mode) _file.write(chunk) finally: locks.unlock(fd) if _file is not None: _file.close() else: os.close(fd) except FileExistsError: # A new name is needed if the file exists. name = self.get_available_name(name) full_path = self.path(name) else: # OK, the file save worked. Break out of the loop. break if self.file_permissions_mode is not None: os.chmod(full_path, self.file_permissions_mode) # Store filenames with forward slashes, even on Windows. return name.replace('\\', '/') def delete(self, name): assert name, "The name argument is not allowed to be empty." name = self.path(name) # If the file or directory exists, delete it from the filesystem. try: if os.path.isdir(name): os.rmdir(name) else: os.remove(name) except FileNotFoundError: # FileNotFoundError is raised if the file or directory was removed # concurrently. pass def exists(self, name): return os.path.exists(self.path(name)) def listdir(self, path): path = self.path(path) directories, files = [], [] for entry in os.listdir(path): if os.path.isdir(os.path.join(path, entry)): directories.append(entry) else: files.append(entry) return directories, files def path(self, name): return safe_join(self.location, name) def size(self, name): return os.path.getsize(self.path(name)) def url(self, name): if self.base_url is None: raise ValueError("This file is not accessible via a URL.") url = filepath_to_uri(name) if url is not None: url = url.lstrip('/') return urljoin(self.base_url, url) def _datetime_from_timestamp(self, ts): """ If timezone support is enabled, make an aware datetime object in UTC; otherwise make a naive one in the local timezone. """ return datetime.fromtimestamp(ts, timezone.utc if settings.USE_TZ else None) def get_accessed_time(self, name): return self._datetime_from_timestamp(os.path.getatime(self.path(name))) def get_created_time(self, name): return self._datetime_from_timestamp(os.path.getctime(self.path(name))) def get_modified_time(self, name): return self._datetime_from_timestamp(os.path.getmtime(self.path(name))) def get_storage_class(import_path=None): return import_string(import_path or settings.DEFAULT_FILE_STORAGE) class DefaultStorage(LazyObject): def _setup(self): self._wrapped = get_storage_class()() default_storage = DefaultStorage()
9e7d0a246ff25e8213fe91eacde66106442c035b6085a57337ea780c0e55111e
""" Portable file locking utilities. Based partially on an example by Jonathan Feignberg in the Python Cookbook [1] (licensed under the Python Software License) and a ctypes port by Anatoly Techtonik for Roundup [2] (license [3]). [1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203 [2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py [3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt Example Usage:: >>> from django.core.files import locks >>> with open('./file', 'wb') as f: ... locks.lock(f, locks.LOCK_EX) ... f.write('Django') """ import os __all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock') def _fd(f): """Get a filedescriptor from something which could be a file or an fd.""" return f.fileno() if hasattr(f, 'fileno') else f if os.name == 'nt': import msvcrt from ctypes import (sizeof, c_ulong, c_void_p, c_int64, Structure, Union, POINTER, windll, byref) from ctypes.wintypes import BOOL, DWORD, HANDLE LOCK_SH = 0 # the default LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK # --- Adapted from the pyserial project --- # detect size of ULONG_PTR if sizeof(c_ulong) != sizeof(c_void_p): ULONG_PTR = c_int64 else: ULONG_PTR = c_ulong PVOID = c_void_p # --- Union inside Structure by stackoverflow:3480240 --- class _OFFSET(Structure): _fields_ = [ ('Offset', DWORD), ('OffsetHigh', DWORD)] class _OFFSET_UNION(Union): _anonymous_ = ['_offset'] _fields_ = [ ('_offset', _OFFSET), ('Pointer', PVOID)] class OVERLAPPED(Structure): _anonymous_ = ['_offset_union'] _fields_ = [ ('Internal', ULONG_PTR), ('InternalHigh', ULONG_PTR), ('_offset_union', _OFFSET_UNION), ('hEvent', HANDLE)] LPOVERLAPPED = POINTER(OVERLAPPED) # --- Define function prototypes for extra safety --- LockFileEx = windll.kernel32.LockFileEx LockFileEx.restype = BOOL LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED] UnlockFileEx = windll.kernel32.UnlockFileEx UnlockFileEx.restype = BOOL UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED] def lock(f, flags): hfile = msvcrt.get_osfhandle(_fd(f)) overlapped = OVERLAPPED() ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped)) return bool(ret) def unlock(f): hfile = msvcrt.get_osfhandle(_fd(f)) overlapped = OVERLAPPED() ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped)) return bool(ret) else: try: import fcntl LOCK_SH = fcntl.LOCK_SH # shared lock LOCK_NB = fcntl.LOCK_NB # non-blocking LOCK_EX = fcntl.LOCK_EX except (ImportError, AttributeError): # File locking is not supported. LOCK_EX = LOCK_SH = LOCK_NB = 0 # Dummy functions that don't do anything. def lock(f, flags): # File is not locked return False def unlock(f): # File is unlocked return True else: def lock(f, flags): ret = fcntl.flock(_fd(f), flags) return ret == 0 def unlock(f): ret = fcntl.flock(_fd(f), fcntl.LOCK_UN) return ret == 0
54db87d221945bd88ec0c802499a7db73c7ed47b210e703bcf70b42fd0bd676a
from itertools import chain from django.utils.itercompat import is_iterable class Tags: """ Built-in tags for internal checks. """ admin = 'admin' caches = 'caches' compatibility = 'compatibility' database = 'database' models = 'models' security = 'security' signals = 'signals' templates = 'templates' urls = 'urls' class CheckRegistry: def __init__(self): self.registered_checks = set() self.deployment_checks = set() def register(self, check=None, *tags, **kwargs): """ Can be used as a function or a decorator. Register given function `f` labeled with given `tags`. The function should receive **kwargs and return list of Errors and Warnings. Example:: registry = CheckRegistry() @registry.register('mytag', 'anothertag') def my_check(apps, **kwargs): # ... perform checks and collect `errors` ... return errors # or registry.register(my_check, 'mytag', 'anothertag') """ def inner(check): check.tags = tags checks = self.deployment_checks if kwargs.get('deploy') else self.registered_checks checks.add(check) return check if callable(check): return inner(check) else: if check: tags += (check,) return inner def run_checks(self, app_configs=None, tags=None, include_deployment_checks=False): """ Run all registered checks and return list of Errors and Warnings. """ errors = [] checks = self.get_checks(include_deployment_checks) if tags is not None: checks = [check for check in checks if not set(check.tags).isdisjoint(tags)] else: # By default, 'database'-tagged checks are not run as they do more # than mere static code analysis. checks = [check for check in checks if Tags.database not in check.tags] for check in checks: new_errors = check(app_configs=app_configs) assert is_iterable(new_errors), ( "The function %r did not return a list. All functions registered " "with the checks registry must return a list." % check) errors.extend(new_errors) return errors def tag_exists(self, tag, include_deployment_checks=False): return tag in self.tags_available(include_deployment_checks) def tags_available(self, deployment_checks=False): return set(chain.from_iterable( check.tags for check in self.get_checks(deployment_checks) )) def get_checks(self, include_deployment_checks=False): checks = list(self.registered_checks) if include_deployment_checks: checks.extend(self.deployment_checks) return checks registry = CheckRegistry() register = registry.register run_checks = registry.run_checks tag_exists = registry.tag_exists
1a27f891f9dd05d05ee2c73635f2d385efffa8a709058e5ea83fb63a1ead6a40
import inspect import types from itertools import chain from django.apps import apps from django.core.checks import Error, Tags, register @register(Tags.models) def check_all_models(app_configs=None, **kwargs): errors = [] if app_configs is None: models = apps.get_models() else: models = chain.from_iterable(app_config.get_models() for app_config in app_configs) for model in models: if not inspect.ismethod(model.check): errors.append( Error( "The '%s.check()' class method is currently overridden by %r." % (model.__name__, model.check), obj=model, id='models.E020' ) ) else: errors.extend(model.check(**kwargs)) return errors def _check_lazy_references(apps, ignore=None): """ Ensure all lazy (i.e. string) model references have been resolved. Lazy references are used in various places throughout Django, primarily in related fields and model signals. Identify those common cases and provide more helpful error messages for them. The ignore parameter is used by StateApps to exclude swappable models from this check. """ pending_models = set(apps._pending_operations) - (ignore or set()) # Short circuit if there aren't any errors. if not pending_models: return [] from django.db.models import signals model_signals = { signal: name for name, signal in vars(signals).items() if isinstance(signal, signals.ModelSignal) } def extract_operation(obj): """ Take a callable found in Apps._pending_operations and identify the original callable passed to Apps.lazy_model_operation(). If that callable was a partial, return the inner, non-partial function and any arguments and keyword arguments that were supplied with it. obj is a callback defined locally in Apps.lazy_model_operation() and annotated there with a `func` attribute so as to imitate a partial. """ operation, args, keywords = obj, [], {} while hasattr(operation, 'func'): # The or clauses are redundant but work around a bug (#25945) in # functools.partial in Python <= 3.5.1. args.extend(getattr(operation, 'args', []) or []) keywords.update(getattr(operation, 'keywords', {}) or {}) operation = operation.func return operation, args, keywords def app_model_error(model_key): try: apps.get_app_config(model_key[0]) model_error = "app '%s' doesn't provide model '%s'" % model_key except LookupError: model_error = "app '%s' isn't installed" % model_key[0] return model_error # Here are several functions which return CheckMessage instances for the # most common usages of lazy operations throughout Django. These functions # take the model that was being waited on as an (app_label, modelname) # pair, the original lazy function, and its positional and keyword args as # determined by extract_operation(). def field_error(model_key, func, args, keywords): error_msg = ( "The field %(field)s was declared with a lazy reference " "to '%(model)s', but %(model_error)s." ) params = { 'model': '.'.join(model_key), 'field': keywords['field'], 'model_error': app_model_error(model_key), } return Error(error_msg % params, obj=keywords['field'], id='fields.E307') def signal_connect_error(model_key, func, args, keywords): error_msg = ( "%(receiver)s was connected to the '%(signal)s' signal with a " "lazy reference to the sender '%(model)s', but %(model_error)s." ) receiver = args[0] # The receiver is either a function or an instance of class # defining a `__call__` method. if isinstance(receiver, types.FunctionType): description = "The function '%s'" % receiver.__name__ elif isinstance(receiver, types.MethodType): description = "Bound method '%s.%s'" % (receiver.__self__.__class__.__name__, receiver.__name__) else: description = "An instance of class '%s'" % receiver.__class__.__name__ signal_name = model_signals.get(func.__self__, 'unknown') params = { 'model': '.'.join(model_key), 'receiver': description, 'signal': signal_name, 'model_error': app_model_error(model_key), } return Error(error_msg % params, obj=receiver.__module__, id='signals.E001') def default_error(model_key, func, args, keywords): error_msg = "%(op)s contains a lazy reference to %(model)s, but %(model_error)s." params = { 'op': func, 'model': '.'.join(model_key), 'model_error': app_model_error(model_key), } return Error(error_msg % params, obj=func, id='models.E022') # Maps common uses of lazy operations to corresponding error functions # defined above. If a key maps to None, no error will be produced. # default_error() will be used for usages that don't appear in this dict. known_lazy = { ('django.db.models.fields.related', 'resolve_related_class'): field_error, ('django.db.models.fields.related', 'set_managed'): None, ('django.dispatch.dispatcher', 'connect'): signal_connect_error, } def build_error(model_key, func, args, keywords): key = (func.__module__, func.__name__) error_fn = known_lazy.get(key, default_error) return error_fn(model_key, func, args, keywords) if error_fn else None return sorted(filter(None, ( build_error(model_key, *extract_operation(func)) for model_key in pending_models for func in apps._pending_operations[model_key] )), key=lambda error: error.msg) @register(Tags.models) def check_lazy_references(app_configs=None, **kwargs): return _check_lazy_references(apps)
8e1c9f5ff9ba4dea5361105afa3dea875a300f55bede399c7aef1bf1d205a959
from django.conf import settings from django.core.cache import DEFAULT_CACHE_ALIAS from . import Error, Tags, register E001 = Error( "You must define a '%s' cache in your CACHES setting." % DEFAULT_CACHE_ALIAS, id='caches.E001', ) @register(Tags.caches) def check_default_cache_is_configured(app_configs, **kwargs): if DEFAULT_CACHE_ALIAS not in settings.CACHES: return [E001] return []
fc73f6744aef3d88229dcd11d41c1b96ad51fa0ca4376b31718082c8c69a9478
from .messages import ( CRITICAL, DEBUG, ERROR, INFO, WARNING, CheckMessage, Critical, Debug, Error, Info, Warning, ) from .registry import Tags, register, run_checks, tag_exists # Import these to force registration of checks import django.core.checks.caches # NOQA isort:skip import django.core.checks.database # NOQA isort:skip import django.core.checks.model_checks # NOQA isort:skip import django.core.checks.security.base # NOQA isort:skip import django.core.checks.security.csrf # NOQA isort:skip import django.core.checks.security.sessions # NOQA isort:skip import django.core.checks.templates # NOQA isort:skip import django.core.checks.urls # NOQA isort:skip __all__ = [ 'CheckMessage', 'Debug', 'Info', 'Warning', 'Error', 'Critical', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', 'register', 'run_checks', 'tag_exists', 'Tags', ]
65b6ac187ecbfcc788188c1bfe76223bd67f317174f9a5ced6bbb6c450028fa6
# Levels DEBUG = 10 INFO = 20 WARNING = 30 ERROR = 40 CRITICAL = 50 class CheckMessage: def __init__(self, level, msg, hint=None, obj=None, id=None): assert isinstance(level, int), "The first argument should be level." self.level = level self.msg = msg self.hint = hint self.obj = obj self.id = id def __eq__(self, other): return ( isinstance(other, self.__class__) and all(getattr(self, attr) == getattr(other, attr) for attr in ['level', 'msg', 'hint', 'obj', 'id']) ) def __str__(self): from django.db import models if self.obj is None: obj = "?" elif isinstance(self.obj, models.base.ModelBase): # We need to hardcode ModelBase and Field cases because its __str__ # method doesn't return "applabel.modellabel" and cannot be changed. obj = self.obj._meta.label else: obj = str(self.obj) id = "(%s) " % self.id if self.id else "" hint = "\n\tHINT: %s" % self.hint if self.hint else '' return "%s: %s%s%s" % (obj, id, self.msg, hint) def __repr__(self): return "<%s: level=%r, msg=%r, hint=%r, obj=%r, id=%r>" % \ (self.__class__.__name__, self.level, self.msg, self.hint, self.obj, self.id) def is_serious(self, level=ERROR): return self.level >= level def is_silenced(self): from django.conf import settings return self.id in settings.SILENCED_SYSTEM_CHECKS class Debug(CheckMessage): def __init__(self, *args, **kwargs): super().__init__(DEBUG, *args, **kwargs) class Info(CheckMessage): def __init__(self, *args, **kwargs): super().__init__(INFO, *args, **kwargs) class Warning(CheckMessage): def __init__(self, *args, **kwargs): super().__init__(WARNING, *args, **kwargs) class Error(CheckMessage): def __init__(self, *args, **kwargs): super().__init__(ERROR, *args, **kwargs) class Critical(CheckMessage): def __init__(self, *args, **kwargs): super().__init__(CRITICAL, *args, **kwargs)
940f306f0d960c2f9ee1902bfbda28116b46beb372321d46f8c65ba231aaf56f
from collections import Counter from django.conf import settings from . import Error, Tags, Warning, register @register(Tags.urls) def check_url_config(app_configs, **kwargs): if getattr(settings, 'ROOT_URLCONF', None): from django.urls import get_resolver resolver = get_resolver() return check_resolver(resolver) return [] def check_resolver(resolver): """ Recursively check the resolver. """ check_method = getattr(resolver, 'check', None) if check_method is not None: return check_method() elif not hasattr(resolver, 'resolve'): return get_warning_for_invalid_pattern(resolver) else: return [] @register(Tags.urls) def check_url_namespaces_unique(app_configs, **kwargs): """ Warn if URL namespaces used in applications aren't unique. """ if not getattr(settings, 'ROOT_URLCONF', None): return [] from django.urls import get_resolver resolver = get_resolver() all_namespaces = _load_all_namespaces(resolver) counter = Counter(all_namespaces) non_unique_namespaces = [n for n, count in counter.items() if count > 1] errors = [] for namespace in non_unique_namespaces: errors.append(Warning( "URL namespace '{}' isn't unique. You may not be able to reverse " "all URLs in this namespace".format(namespace), id="urls.W005", )) return errors def _load_all_namespaces(resolver, parents=()): """ Recursively load all namespaces from URL patterns. """ url_patterns = getattr(resolver, 'url_patterns', []) namespaces = [ ':'.join(parents + (url.namespace,)) for url in url_patterns if getattr(url, 'namespace', None) is not None ] for pattern in url_patterns: namespace = getattr(pattern, 'namespace', None) current = parents if namespace is not None: current += (namespace,) namespaces.extend(_load_all_namespaces(pattern, current)) return namespaces def get_warning_for_invalid_pattern(pattern): """ Return a list containing a warning that the pattern is invalid. describe_pattern() cannot be used here, because we cannot rely on the urlpattern having regex or name attributes. """ if isinstance(pattern, str): hint = ( "Try removing the string '{}'. The list of urlpatterns should not " "have a prefix string as the first element.".format(pattern) ) elif isinstance(pattern, tuple): hint = "Try using path() instead of a tuple." else: hint = None return [Error( "Your URL pattern {!r} is invalid. Ensure that urlpatterns is a list " "of path() and/or re_path() instances.".format(pattern), hint=hint, id="urls.E004", )] @register(Tags.urls) def check_url_settings(app_configs, **kwargs): errors = [] for name in ('STATIC_URL', 'MEDIA_URL'): value = getattr(settings, name) if value and not value.endswith('/'): errors.append(E006(name)) return errors def E006(name): return Error( 'The {} setting must end with a slash.'.format(name), id='urls.E006', )
2295f2212f930d31f8a74dfb686da5d2a5a701516da7ca33244b1732f6a822ef
from django.db import connections from . import Tags, register @register(Tags.database) def check_database_backends(*args, **kwargs): issues = [] for conn in connections.all(): issues.extend(conn.validation.check(**kwargs)) return issues
f7fa999ff3165fde22db4f4c56edae4bae8d3d181b296b64fd7c5eb4a733c9f5
import copy from django.conf import settings from . import Error, Tags, register E001 = Error( "You have 'APP_DIRS': True in your TEMPLATES but also specify 'loaders' " "in OPTIONS. Either remove APP_DIRS or remove the 'loaders' option.", id='templates.E001', ) E002 = Error( "'string_if_invalid' in TEMPLATES OPTIONS must be a string but got: {} ({}).", id="templates.E002", ) @register(Tags.templates) def check_setting_app_dirs_loaders(app_configs, **kwargs): return [E001] if any( conf.get('APP_DIRS') and 'loaders' in conf.get('OPTIONS', {}) for conf in settings.TEMPLATES ) else [] @register(Tags.templates) def check_string_if_invalid_is_string(app_configs, **kwargs): errors = [] for conf in settings.TEMPLATES: string_if_invalid = conf.get('OPTIONS', {}).get('string_if_invalid', '') if not isinstance(string_if_invalid, str): error = copy.copy(E002) error.msg = error.msg.format(string_if_invalid, type(string_if_invalid).__name__) errors.append(error) return errors
2e357bb48c717bbecf795559db10e0efb80356188374f1c91ddd9ae41359a6c7
import functools import os import pkgutil import sys from collections import OrderedDict, defaultdict from difflib import get_close_matches from importlib import import_module import django from django.apps import apps from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management.base import ( BaseCommand, CommandError, CommandParser, handle_default_options, ) from django.core.management.color import color_style from django.utils import autoreload def find_commands(management_dir): """ Given a path to a management directory, return a list of all the command names that are available. """ command_dir = os.path.join(management_dir, 'commands') return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir]) if not is_pkg and not name.startswith('_')] def load_command_class(app_name, name): """ Given a command name and an application name, return the Command class instance. Allow all errors raised by the import process (ImportError, AttributeError) to propagate. """ module = import_module('%s.management.commands.%s' % (app_name, name)) return module.Command() @functools.lru_cache(maxsize=None) def get_commands(): """ Return a dictionary mapping command names to their callback applications. Look for a management.commands package in django.core, and in each installed application -- if a commands package exists, register all commands in that package. Core commands are always included. If a settings module has been specified, also include user-defined commands. The dictionary is in the format {command_name: app_name}. Key-value pairs from this dictionary can then be used in calls to load_command_class(app_name, command_name) If a specific version of a command must be loaded (e.g., with the startapp command), the instantiated module can be placed in the dictionary in place of the application name. The dictionary is cached on the first call and reused on subsequent calls. """ commands = {name: 'django.core' for name in find_commands(__path__[0])} if not settings.configured: return commands for app_config in reversed(list(apps.get_app_configs())): path = os.path.join(app_config.path, 'management') commands.update({name: app_config.name for name in find_commands(path)}) return commands def call_command(command_name, *args, **options): """ Call the given command, with the given options and args/kwargs. This is the primary API you should use for calling specific commands. `command_name` may be a string or a command object. Using a string is preferred unless the command object is required for further processing or testing. Some examples: call_command('migrate') call_command('shell', plain=True) call_command('sqlmigrate', 'myapp') from django.core.management.commands import flush cmd = flush.Command() call_command(cmd, verbosity=0, interactive=False) # Do something with cmd ... """ if isinstance(command_name, BaseCommand): # Command object passed in. command = command_name command_name = command.__class__.__module__.split('.')[-1] else: # Load the command object by name. try: app_name = get_commands()[command_name] except KeyError: raise CommandError("Unknown command: %r" % command_name) if isinstance(app_name, BaseCommand): # If the command is already loaded, use it directly. command = app_name else: command = load_command_class(app_name, command_name) # Simulate argument parsing to get the option defaults (see #10080 for details). parser = command.create_parser('', command_name) # Use the `dest` option name from the parser option opt_mapping = { min(s_opt.option_strings).lstrip('-').replace('-', '_'): s_opt.dest for s_opt in parser._actions if s_opt.option_strings } arg_options = {opt_mapping.get(key, key): value for key, value in options.items()} parse_args = [str(a) for a in args] # Any required arguments which are passed in via **options must must be # passed to parse_args(). parse_args += [ '{}={}'.format(min(opt.option_strings), arg_options[opt.dest]) for opt in parser._actions if opt.required and opt.dest in options ] defaults = parser.parse_args(args=parse_args) defaults = dict(defaults._get_kwargs(), **arg_options) # Raise an error if any unknown options were passed. stealth_options = set(command.base_stealth_options + command.stealth_options) dest_parameters = {action.dest for action in parser._actions} valid_options = (dest_parameters | stealth_options).union(opt_mapping) unknown_options = set(options) - valid_options if unknown_options: raise TypeError( "Unknown option(s) for %s command: %s. " "Valid options are: %s." % ( command_name, ', '.join(sorted(unknown_options)), ', '.join(sorted(valid_options)), ) ) # Move positional args out of options to mimic legacy optparse args = defaults.pop('args', ()) if 'skip_checks' not in options: defaults['skip_checks'] = True return command.execute(*args, **defaults) class ManagementUtility: """ Encapsulate the logic of the django-admin and manage.py utilities. """ def __init__(self, argv=None): self.argv = argv or sys.argv[:] self.prog_name = os.path.basename(self.argv[0]) if self.prog_name == '__main__.py': self.prog_name = 'python -m django' self.settings_exception = None def main_help_text(self, commands_only=False): """Return the script's main help text, as a string.""" if commands_only: usage = sorted(get_commands()) else: usage = [ "", "Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name, "", "Available subcommands:", ] commands_dict = defaultdict(lambda: []) for name, app in get_commands().items(): if app == 'django.core': app = 'django' else: app = app.rpartition('.')[-1] commands_dict[app].append(name) style = color_style() for app in sorted(commands_dict): usage.append("") usage.append(style.NOTICE("[%s]" % app)) for name in sorted(commands_dict[app]): usage.append(" %s" % name) # Output an extra note if settings are not properly configured if self.settings_exception is not None: usage.append(style.NOTICE( "Note that only Django core commands are listed " "as settings are not properly configured (error: %s)." % self.settings_exception)) return '\n'.join(usage) def fetch_command(self, subcommand): """ Try to fetch the given subcommand, printing a message with the appropriate command called from the command line (usually "django-admin" or "manage.py") if it can't be found. """ # Get commands outside of try block to prevent swallowing exceptions commands = get_commands() try: app_name = commands[subcommand] except KeyError: if os.environ.get('DJANGO_SETTINGS_MODULE'): # If `subcommand` is missing due to misconfigured settings, the # following line will retrigger an ImproperlyConfigured exception # (get_commands() swallows the original one) so the user is # informed about it. settings.INSTALLED_APPS else: sys.stderr.write("No Django settings specified.\n") possible_matches = get_close_matches(subcommand, commands) sys.stderr.write('Unknown command: %r' % subcommand) if possible_matches: sys.stderr.write('. Did you mean %s?' % possible_matches[0]) sys.stderr.write("\nType '%s help' for usage.\n" % self.prog_name) sys.exit(1) if isinstance(app_name, BaseCommand): # If the command is already loaded, use it directly. klass = app_name else: klass = load_command_class(app_name, subcommand) return klass def autocomplete(self): """ Output completion suggestions for BASH. The output of this function is passed to BASH's `COMREPLY` variable and treated as completion suggestions. `COMREPLY` expects a space separated string as the result. The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used to get information about the cli input. Please refer to the BASH man-page for more information about this variables. Subcommand options are saved as pairs. A pair consists of the long option string (e.g. '--exclude') and a boolean value indicating if the option requires arguments. When printing to stdout, an equal sign is appended to options which require arguments. Note: If debugging this function, it is recommended to write the debug output in a separate file. Otherwise the debug output will be treated and formatted as potential completion suggestions. """ # Don't complete if user hasn't sourced bash_completion file. if 'DJANGO_AUTO_COMPLETE' not in os.environ: return cwords = os.environ['COMP_WORDS'].split()[1:] cword = int(os.environ['COMP_CWORD']) try: curr = cwords[cword - 1] except IndexError: curr = '' subcommands = list(get_commands()) + ['help'] options = [('--help', False)] # subcommand if cword == 1: print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands)))) # subcommand options # special case: the 'help' subcommand has no options elif cwords[0] in subcommands and cwords[0] != 'help': subcommand_cls = self.fetch_command(cwords[0]) # special case: add the names of installed apps to options if cwords[0] in ('dumpdata', 'sqlmigrate', 'sqlsequencereset', 'test'): try: app_configs = apps.get_app_configs() # Get the last part of the dotted path as the app name. options.extend((app_config.label, 0) for app_config in app_configs) except ImportError: # Fail silently if DJANGO_SETTINGS_MODULE isn't set. The # user will find out once they execute the command. pass parser = subcommand_cls.create_parser('', cwords[0]) options.extend( (min(s_opt.option_strings), s_opt.nargs != 0) for s_opt in parser._actions if s_opt.option_strings ) # filter out previously specified options from available options prev_opts = {x.split('=')[0] for x in cwords[1:cword - 1]} options = (opt for opt in options if opt[0] not in prev_opts) # filter options by current input options = sorted((k, v) for k, v in options if k.startswith(curr)) for opt_label, require_arg in options: # append '=' to options which require args if require_arg: opt_label += '=' print(opt_label) # Exit code of the bash completion function is never passed back to # the user, so it's safe to always exit with 0. # For more details see #25420. sys.exit(0) def execute(self): """ Given the command-line arguments, figure out which subcommand is being run, create a parser appropriate to that command, and run it. """ try: subcommand = self.argv[1] except IndexError: subcommand = 'help' # Display help if no arguments were given. # Preprocess options to extract --settings and --pythonpath. # These options could affect the commands that are available, so they # must be processed early. parser = CommandParser(usage='%(prog)s subcommand [options] [args]', add_help=False, allow_abbrev=False) parser.add_argument('--settings') parser.add_argument('--pythonpath') parser.add_argument('args', nargs='*') # catch-all try: options, args = parser.parse_known_args(self.argv[2:]) handle_default_options(options) except CommandError: pass # Ignore any option errors at this point. try: settings.INSTALLED_APPS except ImproperlyConfigured as exc: self.settings_exception = exc except ImportError as exc: self.settings_exception = exc if settings.configured: # Start the auto-reloading dev server even if the code is broken. # The hardcoded condition is a code smell but we can't rely on a # flag on the command class because we haven't located it yet. if subcommand == 'runserver' and '--noreload' not in self.argv: try: autoreload.check_errors(django.setup)() except Exception: # The exception will be raised later in the child process # started by the autoreloader. Pretend it didn't happen by # loading an empty list of applications. apps.all_models = defaultdict(OrderedDict) apps.app_configs = OrderedDict() apps.apps_ready = apps.models_ready = apps.ready = True # Remove options not compatible with the built-in runserver # (e.g. options for the contrib.staticfiles' runserver). # Changes here require manually testing as described in # #27522. _parser = self.fetch_command('runserver').create_parser('django', 'runserver') _options, _args = _parser.parse_known_args(self.argv[2:]) for _arg in _args: self.argv.remove(_arg) # In all other cases, django.setup() is required to succeed. else: django.setup() self.autocomplete() if subcommand == 'help': if '--commands' in args: sys.stdout.write(self.main_help_text(commands_only=True) + '\n') elif not options.args: sys.stdout.write(self.main_help_text() + '\n') else: self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0]) # Special-cases: We want 'django-admin --version' and # 'django-admin --help' to work, for backwards compatibility. elif subcommand == 'version' or self.argv[1:] == ['--version']: sys.stdout.write(django.get_version() + '\n') elif self.argv[1:] in (['--help'], ['-h']): sys.stdout.write(self.main_help_text() + '\n') else: self.fetch_command(subcommand).run_from_argv(self.argv) def execute_from_command_line(argv=None): """Run a ManagementUtility.""" utility = ManagementUtility(argv) utility.execute()
a5851b033620e5bc09062853ebb4a6db414e5147ecba54cf8765c6462d9841cb
""" Base classes for writing management commands (named commands which can be executed through ``django-admin`` or ``manage.py``). """ import os import sys from argparse import ArgumentParser, HelpFormatter from io import TextIOBase import django from django.core import checks from django.core.exceptions import ImproperlyConfigured from django.core.management.color import color_style, no_style from django.db import DEFAULT_DB_ALIAS, connections class CommandError(Exception): """ Exception class indicating a problem while executing a management command. If this exception is raised during the execution of a management command, it will be caught and turned into a nicely-printed error message to the appropriate output stream (i.e., stderr); as a result, raising this exception (with a sensible description of the error) is the preferred way to indicate that something has gone wrong in the execution of a command. """ pass class SystemCheckError(CommandError): """ The system check framework detected unrecoverable errors. """ pass class CommandParser(ArgumentParser): """ Customized ArgumentParser class to improve some error messages and prevent SystemExit in several occasions, as SystemExit is unacceptable when a command is called programmatically. """ def __init__(self, **kwargs): self.missing_args_message = kwargs.pop('missing_args_message', None) self.called_from_command_line = kwargs.pop('called_from_command_line', None) super().__init__(**kwargs) def parse_args(self, args=None, namespace=None): # Catch missing argument for a better error message if (self.missing_args_message and not (args or any(not arg.startswith('-') for arg in args))): self.error(self.missing_args_message) return super().parse_args(args, namespace) def error(self, message): if self.called_from_command_line: super().error(message) else: raise CommandError("Error: %s" % message) def handle_default_options(options): """ Include any default options that all commands should accept here so that ManagementUtility can handle them before searching for user commands. """ if options.settings: os.environ['DJANGO_SETTINGS_MODULE'] = options.settings if options.pythonpath: sys.path.insert(0, options.pythonpath) def no_translations(handle_func): """Decorator that forces a command to run with translations deactivated.""" def wrapped(*args, **kwargs): from django.utils import translation saved_locale = translation.get_language() translation.deactivate_all() try: res = handle_func(*args, **kwargs) finally: if saved_locale is not None: translation.activate(saved_locale) return res return wrapped class DjangoHelpFormatter(HelpFormatter): """ Customized formatter so that command-specific arguments appear in the --help output before arguments common to all commands. """ show_last = { '--version', '--verbosity', '--traceback', '--settings', '--pythonpath', '--no-color', } def _reordered_actions(self, actions): return sorted( actions, key=lambda a: set(a.option_strings) & self.show_last != set() ) def add_usage(self, usage, actions, *args, **kwargs): super().add_usage(usage, self._reordered_actions(actions), *args, **kwargs) def add_arguments(self, actions): super().add_arguments(self._reordered_actions(actions)) class OutputWrapper(TextIOBase): """ Wrapper around stdout/stderr """ @property def style_func(self): return self._style_func @style_func.setter def style_func(self, style_func): if style_func and self.isatty(): self._style_func = style_func else: self._style_func = lambda x: x def __init__(self, out, style_func=None, ending='\n'): self._out = out self.style_func = None self.ending = ending def __getattr__(self, name): return getattr(self._out, name) def isatty(self): return hasattr(self._out, 'isatty') and self._out.isatty() def write(self, msg, style_func=None, ending=None): ending = self.ending if ending is None else ending if ending and not msg.endswith(ending): msg += ending style_func = style_func or self.style_func self._out.write(style_func(msg)) class BaseCommand: """ The base class from which all management commands ultimately derive. Use this class if you want access to all of the mechanisms which parse the command-line arguments and work out what code to call in response; if you don't need to change any of that behavior, consider using one of the subclasses defined in this file. If you are interested in overriding/customizing various aspects of the command-parsing and -execution behavior, the normal flow works as follows: 1. ``django-admin`` or ``manage.py`` loads the command class and calls its ``run_from_argv()`` method. 2. The ``run_from_argv()`` method calls ``create_parser()`` to get an ``ArgumentParser`` for the arguments, parses them, performs any environment changes requested by options like ``pythonpath``, and then calls the ``execute()`` method, passing the parsed arguments. 3. The ``execute()`` method attempts to carry out the command by calling the ``handle()`` method with the parsed arguments; any output produced by ``handle()`` will be printed to standard output and, if the command is intended to produce a block of SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``. 4. If ``handle()`` or ``execute()`` raised any exception (e.g. ``CommandError``), ``run_from_argv()`` will instead print an error message to ``stderr``. Thus, the ``handle()`` method is typically the starting point for subclasses; many built-in commands and command types either place all of their logic in ``handle()``, or perform some additional parsing work in ``handle()`` and then delegate from it to more specialized methods as needed. Several attributes affect behavior at various steps along the way: ``help`` A short description of the command, which will be printed in help messages. ``output_transaction`` A boolean indicating whether the command outputs SQL statements; if ``True``, the output will automatically be wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is ``False``. ``requires_migrations_checks`` A boolean; if ``True``, the command prints a warning if the set of migrations on disk don't match the migrations in the database. ``requires_system_checks`` A boolean; if ``True``, entire Django project will be checked for errors prior to executing the command. Default value is ``True``. To validate an individual application's models rather than all applications' models, call ``self.check(app_configs)`` from ``handle()``, where ``app_configs`` is the list of application's configuration provided by the app registry. ``stealth_options`` A tuple of any options the command uses which aren't defined by the argument parser. """ # Metadata about this command. help = '' # Configuration shortcuts that alter various logic. _called_from_command_line = False output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;" requires_migrations_checks = False requires_system_checks = True # Arguments, common to all commands, which aren't defined by the argument # parser. base_stealth_options = ('skip_checks', 'stderr', 'stdout') # Command-specific options not defined by the argument parser. stealth_options = () def __init__(self, stdout=None, stderr=None, no_color=False): self.stdout = OutputWrapper(stdout or sys.stdout) self.stderr = OutputWrapper(stderr or sys.stderr) if no_color: self.style = no_style() else: self.style = color_style() self.stderr.style_func = self.style.ERROR def get_version(self): """ Return the Django version, which should be correct for all built-in Django commands. User-supplied commands can override this method to return their own version. """ return django.get_version() def create_parser(self, prog_name, subcommand, **kwargs): """ Create and return the ``ArgumentParser`` which will be used to parse the arguments to this command. """ parser = CommandParser( prog='%s %s' % (os.path.basename(prog_name), subcommand), description=self.help or None, formatter_class=DjangoHelpFormatter, missing_args_message=getattr(self, 'missing_args_message', None), called_from_command_line=getattr(self, '_called_from_command_line', None), **kwargs ) parser.add_argument('--version', action='version', version=self.get_version()) parser.add_argument( '-v', '--verbosity', action='store', dest='verbosity', default=1, type=int, choices=[0, 1, 2, 3], help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output', ) parser.add_argument( '--settings', help=( 'The Python path to a settings module, e.g. ' '"myproject.settings.main". If this isn\'t provided, the ' 'DJANGO_SETTINGS_MODULE environment variable will be used.' ), ) parser.add_argument( '--pythonpath', help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".', ) parser.add_argument('--traceback', action='store_true', help='Raise on CommandError exceptions') parser.add_argument( '--no-color', action='store_true', dest='no_color', help="Don't colorize the command output.", ) self.add_arguments(parser) return parser def add_arguments(self, parser): """ Entry point for subclassed commands to add custom arguments. """ pass def print_help(self, prog_name, subcommand): """ Print the help message for this command, derived from ``self.usage()``. """ parser = self.create_parser(prog_name, subcommand) parser.print_help() def run_from_argv(self, argv): """ Set up any environment changes requested (e.g., Python path and Django settings), then run this command. If the command raises a ``CommandError``, intercept it and print it sensibly to stderr. If the ``--traceback`` option is present or the raised ``Exception`` is not ``CommandError``, raise it. """ self._called_from_command_line = True parser = self.create_parser(argv[0], argv[1]) options = parser.parse_args(argv[2:]) cmd_options = vars(options) # Move positional args out of options to mimic legacy optparse args = cmd_options.pop('args', ()) handle_default_options(options) try: self.execute(*args, **cmd_options) except Exception as e: if options.traceback or not isinstance(e, CommandError): raise # SystemCheckError takes care of its own formatting. if isinstance(e, SystemCheckError): self.stderr.write(str(e), lambda x: x) else: self.stderr.write('%s: %s' % (e.__class__.__name__, e)) sys.exit(1) finally: try: connections.close_all() except ImproperlyConfigured: # Ignore if connections aren't setup at this point (e.g. no # configured settings). pass def execute(self, *args, **options): """ Try to execute this command, performing system checks if needed (as controlled by the ``requires_system_checks`` attribute, except if force-skipped). """ if options['no_color']: self.style = no_style() self.stderr.style_func = None if options.get('stdout'): self.stdout = OutputWrapper(options['stdout']) if options.get('stderr'): self.stderr = OutputWrapper(options['stderr'], self.stderr.style_func) if self.requires_system_checks and not options.get('skip_checks'): self.check() if self.requires_migrations_checks: self.check_migrations() output = self.handle(*args, **options) if output: if self.output_transaction: connection = connections[options.get('database', DEFAULT_DB_ALIAS)] output = '%s\n%s\n%s' % ( self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()), output, self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()), ) self.stdout.write(output) return output def _run_checks(self, **kwargs): return checks.run_checks(**kwargs) def check(self, app_configs=None, tags=None, display_num_errors=False, include_deployment_checks=False, fail_level=checks.ERROR): """ Use the system check framework to validate entire Django project. Raise CommandError for any serious message (error or critical errors). If there are only light messages (like warnings), print them to stderr and don't raise an exception. """ all_issues = self._run_checks( app_configs=app_configs, tags=tags, include_deployment_checks=include_deployment_checks, ) header, body, footer = "", "", "" visible_issue_count = 0 # excludes silenced warnings if all_issues: debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()] infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()] warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()] errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL and not e.is_silenced()] criticals = [e for e in all_issues if checks.CRITICAL <= e.level and not e.is_silenced()] sorted_issues = [ (criticals, 'CRITICALS'), (errors, 'ERRORS'), (warnings, 'WARNINGS'), (infos, 'INFOS'), (debugs, 'DEBUGS'), ] for issues, group_name in sorted_issues: if issues: visible_issue_count += len(issues) formatted = ( self.style.ERROR(str(e)) if e.is_serious() else self.style.WARNING(str(e)) for e in issues) formatted = "\n".join(sorted(formatted)) body += '\n%s:\n%s\n' % (group_name, formatted) if visible_issue_count: header = "System check identified some issues:\n" if display_num_errors: if visible_issue_count: footer += '\n' footer += "System check identified %s (%s silenced)." % ( "no issues" if visible_issue_count == 0 else "1 issue" if visible_issue_count == 1 else "%s issues" % visible_issue_count, len(all_issues) - visible_issue_count, ) if any(e.is_serious(fail_level) and not e.is_silenced() for e in all_issues): msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer raise SystemCheckError(msg) else: msg = header + body + footer if msg: if visible_issue_count: self.stderr.write(msg, lambda x: x) else: self.stdout.write(msg) def check_migrations(self): """ Print a warning if the set of migrations on disk don't match the migrations in the database. """ from django.db.migrations.executor import MigrationExecutor try: executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS]) except ImproperlyConfigured: # No databases are configured (or the dummy one) return plan = executor.migration_plan(executor.loader.graph.leaf_nodes()) if plan: apps_waiting_migration = sorted({migration.app_label for migration, backwards in plan}) self.stdout.write( self.style.NOTICE( "\nYou have %(unpplied_migration_count)s unapplied migration(s). " "Your project may not work properly until you apply the " "migrations for app(s): %(apps_waiting_migration)s." % { "unpplied_migration_count": len(plan), "apps_waiting_migration": ", ".join(apps_waiting_migration), } ) ) self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n")) def handle(self, *args, **options): """ The actual logic of the command. Subclasses must implement this method. """ raise NotImplementedError('subclasses of BaseCommand must provide a handle() method') class AppCommand(BaseCommand): """ A management command which takes one or more installed application labels as arguments, and does something with each of them. Rather than implementing ``handle()``, subclasses must implement ``handle_app_config()``, which will be called once for each application. """ missing_args_message = "Enter at least one application label." def add_arguments(self, parser): parser.add_argument('args', metavar='app_label', nargs='+', help='One or more application label.') def handle(self, *app_labels, **options): from django.apps import apps try: app_configs = [apps.get_app_config(app_label) for app_label in app_labels] except (LookupError, ImportError) as e: raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e) output = [] for app_config in app_configs: app_output = self.handle_app_config(app_config, **options) if app_output: output.append(app_output) return '\n'.join(output) def handle_app_config(self, app_config, **options): """ Perform the command's actions for app_config, an AppConfig instance corresponding to an application label given on the command line. """ raise NotImplementedError( "Subclasses of AppCommand must provide" "a handle_app_config() method.") class LabelCommand(BaseCommand): """ A management command which takes one or more arbitrary arguments (labels) on the command line, and does something with each of them. Rather than implementing ``handle()``, subclasses must implement ``handle_label()``, which will be called once for each label. If the arguments should be names of installed applications, use ``AppCommand`` instead. """ label = 'label' missing_args_message = "Enter at least one %s." % label def add_arguments(self, parser): parser.add_argument('args', metavar=self.label, nargs='+') def handle(self, *labels, **options): output = [] for label in labels: label_output = self.handle_label(label, **options) if label_output: output.append(label_output) return '\n'.join(output) def handle_label(self, label, **options): """ Perform the command's actions for ``label``, which will be the string as given on the command line. """ raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
b744ede0db3eadaf072da93f314097fd5aee83077413d6ffa9c11ec28239fbf5
import os from subprocess import PIPE, Popen from django.apps import apps as installed_apps from django.utils.crypto import get_random_string from django.utils.encoding import DEFAULT_LOCALE_ENCODING from .base import CommandError def popen_wrapper(args, stdout_encoding='utf-8'): """ Friendly wrapper around Popen. Return stdout output, stderr output, and OS status code. """ try: p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt') except OSError as err: raise CommandError('Error executing %s' % args[0]) from err output, errors = p.communicate() return ( output.decode(stdout_encoding), errors.decode(DEFAULT_LOCALE_ENCODING, errors='replace'), p.returncode ) def handle_extensions(extensions): """ Organize multiple extensions that are separated with commas or passed by using --extension/-e multiple times. For example: running 'django-admin makemessages -e js,txt -e xhtml -a' would result in an extension list: ['.js', '.txt', '.xhtml'] >>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py']) {'.html', '.js', '.py'} >>> handle_extensions(['.html, txt,.tpl']) {'.html', '.tpl', '.txt'} """ ext_list = [] for ext in extensions: ext_list.extend(ext.replace(' ', '').split(',')) for i, ext in enumerate(ext_list): if not ext.startswith('.'): ext_list[i] = '.%s' % ext_list[i] return set(ext_list) def find_command(cmd, path=None, pathext=None): if path is None: path = os.environ.get('PATH', '').split(os.pathsep) if isinstance(path, str): path = [path] # check if there are funny path extensions for executables, e.g. Windows if pathext is None: pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep) # don't use extensions if the command ends with one of them for ext in pathext: if cmd.endswith(ext): pathext = [''] break # check if we find the command on PATH for p in path: f = os.path.join(p, cmd) if os.path.isfile(f): return f for ext in pathext: fext = f + ext if os.path.isfile(fext): return fext return None def get_random_secret_key(): """ Return a 50 character random string usable as a SECRET_KEY setting value. """ chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)' return get_random_string(50, chars) def parse_apps_and_model_labels(labels): """ Parse a list of "app_label.ModelName" or "app_label" strings into actual objects and return a two-element tuple: (set of model classes, set of app_configs). Raise a CommandError if some specified models or apps don't exist. """ apps = set() models = set() for label in labels: if '.' in label: try: model = installed_apps.get_model(label) except LookupError: raise CommandError('Unknown model: %s' % label) models.add(model) else: try: app_config = installed_apps.get_app_config(label) except LookupError as e: raise CommandError(str(e)) apps.add(app_config) return models, apps
8c8f4a2d5f3000b6ff41ce19464fa373703136bd42bc4bc6d7914a2994d0d2e9
import cgi import mimetypes import os import posixpath import shutil import stat import tempfile from importlib import import_module from os import path from urllib.request import urlretrieve import django from django.conf import settings from django.core.management.base import BaseCommand, CommandError from django.core.management.utils import handle_extensions from django.template import Context, Engine from django.utils import archive from django.utils.version import get_docs_version class TemplateCommand(BaseCommand): """ Copy either a Django application layout template or a Django project layout template into the specified directory. :param style: A color style object (see django.core.management.color). :param app_or_project: The string 'app' or 'project'. :param name: The name of the application or project. :param directory: The directory to which the template should be copied. :param options: The additional variables passed to project or app templates """ requires_system_checks = False # The supported URL schemes url_schemes = ['http', 'https', 'ftp'] # Rewrite the following suffixes when determining the target filename. rewrite_template_suffixes = ( # Allow shipping invalid .py files without byte-compilation. ('.py-tpl', '.py'), ) def add_arguments(self, parser): parser.add_argument('name', help='Name of the application or project.') parser.add_argument('directory', nargs='?', help='Optional destination directory') parser.add_argument('--template', help='The path or URL to load the template from.') parser.add_argument( '--extension', '-e', dest='extensions', action='append', default=['py'], help='The file extension(s) to render (default: "py"). ' 'Separate multiple extensions with commas, or use ' '-e multiple times.' ) parser.add_argument( '--name', '-n', dest='files', action='append', default=[], help='The file name(s) to render. Separate multiple file names ' 'with commas, or use -n multiple times.' ) def handle(self, app_or_project, name, target=None, **options): self.app_or_project = app_or_project self.paths_to_remove = [] self.verbosity = options['verbosity'] self.validate_name(name, app_or_project) # if some directory is given, make sure it's nicely expanded if target is None: top_dir = path.join(os.getcwd(), name) try: os.makedirs(top_dir) except FileExistsError: raise CommandError("'%s' already exists" % top_dir) except OSError as e: raise CommandError(e) else: top_dir = os.path.abspath(path.expanduser(target)) if not os.path.exists(top_dir): raise CommandError("Destination directory '%s' does not " "exist, please create it first." % top_dir) extensions = tuple(handle_extensions(options['extensions'])) extra_files = [] for file in options['files']: extra_files.extend(map(lambda x: x.strip(), file.split(','))) if self.verbosity >= 2: self.stdout.write("Rendering %s template files with " "extensions: %s\n" % (app_or_project, ', '.join(extensions))) self.stdout.write("Rendering %s template files with " "filenames: %s\n" % (app_or_project, ', '.join(extra_files))) base_name = '%s_name' % app_or_project base_subdir = '%s_template' % app_or_project base_directory = '%s_directory' % app_or_project camel_case_name = 'camel_case_%s_name' % app_or_project camel_case_value = ''.join(x for x in name.title() if x != '_') context = Context({ **options, base_name: name, base_directory: top_dir, camel_case_name: camel_case_value, 'docs_version': get_docs_version(), 'django_version': django.__version__, }, autoescape=False) # Setup a stub settings environment for template rendering if not settings.configured: settings.configure() django.setup() template_dir = self.handle_template(options['template'], base_subdir) prefix_length = len(template_dir) + 1 for root, dirs, files in os.walk(template_dir): path_rest = root[prefix_length:] relative_dir = path_rest.replace(base_name, name) if relative_dir: target_dir = path.join(top_dir, relative_dir) if not path.exists(target_dir): os.mkdir(target_dir) for dirname in dirs[:]: if dirname.startswith('.') or dirname == '__pycache__': dirs.remove(dirname) for filename in files: if filename.endswith(('.pyo', '.pyc', '.py.class')): # Ignore some files as they cause various breakages. continue old_path = path.join(root, filename) new_path = path.join(top_dir, relative_dir, filename.replace(base_name, name)) for old_suffix, new_suffix in self.rewrite_template_suffixes: if new_path.endswith(old_suffix): new_path = new_path[:-len(old_suffix)] + new_suffix break # Only rewrite once if path.exists(new_path): raise CommandError("%s already exists, overlaying a " "project or app into an existing " "directory won't replace conflicting " "files" % new_path) # Only render the Python files, as we don't want to # accidentally render Django templates files if new_path.endswith(extensions) or filename in extra_files: with open(old_path, 'r', encoding='utf-8') as template_file: content = template_file.read() template = Engine().from_string(content) content = template.render(context) with open(new_path, 'w', encoding='utf-8') as new_file: new_file.write(content) else: shutil.copyfile(old_path, new_path) if self.verbosity >= 2: self.stdout.write("Creating %s\n" % new_path) try: shutil.copymode(old_path, new_path) self.make_writeable(new_path) except OSError: self.stderr.write( "Notice: Couldn't set permission bits on %s. You're " "probably using an uncommon filesystem setup. No " "problem." % new_path, self.style.NOTICE) if self.paths_to_remove: if self.verbosity >= 2: self.stdout.write("Cleaning up temporary files.\n") for path_to_remove in self.paths_to_remove: if path.isfile(path_to_remove): os.remove(path_to_remove) else: shutil.rmtree(path_to_remove) def handle_template(self, template, subdir): """ Determine where the app or project templates are. Use django.__path__[0] as the default because the Django install directory isn't known. """ if template is None: return path.join(django.__path__[0], 'conf', subdir) else: if template.startswith('file://'): template = template[7:] expanded_template = path.expanduser(template) expanded_template = path.normpath(expanded_template) if path.isdir(expanded_template): return expanded_template if self.is_url(template): # downloads the file and returns the path absolute_path = self.download(template) else: absolute_path = path.abspath(expanded_template) if path.exists(absolute_path): return self.extract(absolute_path) raise CommandError("couldn't handle %s template %s." % (self.app_or_project, template)) def validate_name(self, name, app_or_project): a_or_an = 'an' if app_or_project == 'app' else 'a' if name is None: raise CommandError('you must provide {an} {app} name'.format( an=a_or_an, app=app_or_project, )) # Check it's a valid directory name. if not name.isidentifier(): raise CommandError( "'{name}' is not a valid {app} name. Please make sure the " "name is a valid identifier.".format( name=name, app=app_or_project, ) ) # Check it cannot be imported. try: import_module(name) except ImportError: pass else: raise CommandError( "'{name}' conflicts with the name of an existing Python " "module and cannot be used as {an} {app} name. Please try " "another name.".format( name=name, an=a_or_an, app=app_or_project, ) ) def download(self, url): """ Download the given URL and return the file name. """ def cleanup_url(url): tmp = url.rstrip('/') filename = tmp.split('/')[-1] if url.endswith('/'): display_url = tmp + '/' else: display_url = url return filename, display_url prefix = 'django_%s_template_' % self.app_or_project tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download') self.paths_to_remove.append(tempdir) filename, display_url = cleanup_url(url) if self.verbosity >= 2: self.stdout.write("Downloading %s\n" % display_url) try: the_path, info = urlretrieve(url, path.join(tempdir, filename)) except IOError as e: raise CommandError("couldn't download URL %s to %s: %s" % (url, filename, e)) used_name = the_path.split('/')[-1] # Trying to get better name from response headers content_disposition = info.get('content-disposition') if content_disposition: _, params = cgi.parse_header(content_disposition) guessed_filename = params.get('filename') or used_name else: guessed_filename = used_name # Falling back to content type guessing ext = self.splitext(guessed_filename)[1] content_type = info.get('content-type') if not ext and content_type: ext = mimetypes.guess_extension(content_type) if ext: guessed_filename += ext # Move the temporary file to a filename that has better # chances of being recognized by the archive utils if used_name != guessed_filename: guessed_path = path.join(tempdir, guessed_filename) shutil.move(the_path, guessed_path) return guessed_path # Giving up return the_path def splitext(self, the_path): """ Like os.path.splitext, but takes off .tar, too """ base, ext = posixpath.splitext(the_path) if base.lower().endswith('.tar'): ext = base[-4:] + ext base = base[:-4] return base, ext def extract(self, filename): """ Extract the given file to a temporarily and return the path of the directory with the extracted content. """ prefix = 'django_%s_template_' % self.app_or_project tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract') self.paths_to_remove.append(tempdir) if self.verbosity >= 2: self.stdout.write("Extracting %s\n" % filename) try: archive.extract(filename, tempdir) return tempdir except (archive.ArchiveException, IOError) as e: raise CommandError("couldn't extract file %s to %s: %s" % (filename, tempdir, e)) def is_url(self, template): """Return True if the name looks like a URL.""" if ':' not in template: return False scheme = template.split(':', 1)[0].lower() return scheme in self.url_schemes def make_writeable(self, filename): """ Make sure that the file is writeable. Useful if our source is read-only. """ if not os.access(filename, os.W_OK): st = os.stat(filename) new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR os.chmod(filename, new_permissions)
6e7c4451420949c8fa03aeecd2dc69ab8453f2c8749fe452e59c5c36ac926805
from django.apps import apps from django.db import models def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False): """ Return a list of the SQL statements used to flush the database. If only_django is True, only include the table names that have associated Django models and are in INSTALLED_APPS . """ if only_django: tables = connection.introspection.django_table_names(only_existing=True, include_views=False) else: tables = connection.introspection.table_names(include_views=False) seqs = connection.introspection.sequence_list() if reset_sequences else () statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade) return statements def emit_pre_migrate_signal(verbosity, interactive, db, **kwargs): # Emit the pre_migrate signal for every application. for app_config in apps.get_app_configs(): if app_config.models_module is None: continue if verbosity >= 2: print("Running pre-migrate handlers for application %s" % app_config.label) models.signals.pre_migrate.send( sender=app_config, app_config=app_config, verbosity=verbosity, interactive=interactive, using=db, **kwargs ) def emit_post_migrate_signal(verbosity, interactive, db, **kwargs): # Emit the post_migrate signal for every application. for app_config in apps.get_app_configs(): if app_config.models_module is None: continue if verbosity >= 2: print("Running post-migrate handlers for application %s" % app_config.label) models.signals.post_migrate.send( sender=app_config, app_config=app_config, verbosity=verbosity, interactive=interactive, using=db, **kwargs )
4c10d42322d791952880afc5bc8cb713b9e97086d7a69acd28218683a25f1108
""" Sets up the terminal color scheme. """ import functools import os import sys from django.utils import termcolors def supports_color(): """ Return True if the running system's terminal supports color, and False otherwise. """ plat = sys.platform supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ) # isatty is not always implemented, #6223. is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() return supported_platform and is_a_tty class Style: pass def make_style(config_string=''): """ Create a Style object from the given config_string. If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used. """ style = Style() color_settings = termcolors.parse_color_setting(config_string) # The nocolor palette has all available roles. # Use that palette as the basis for populating # the palette as defined in the environment. for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]: if color_settings: format = color_settings.get(role, {}) style_func = termcolors.make_style(**format) else: def style_func(x): return x setattr(style, role, style_func) # For backwards compatibility, # set style for ERROR_OUTPUT == ERROR style.ERROR_OUTPUT = style.ERROR return style @functools.lru_cache(maxsize=None) def no_style(): """ Return a Style object with no color scheme. """ return make_style('nocolor') def color_style(): """ Return a Style object from the Django color scheme. """ if not supports_color(): return no_style() return make_style(os.environ.get('DJANGO_COLORS', ''))
1b16cb073153dffeaba96f84dbabb7d3ffc1624aa168287aa7384cd4aca4644f
""" Caching framework. This package defines set of cache backends that all conform to a simple API. In a nutshell, a cache is a set of values -- which can be any object that may be pickled -- identified by string keys. For the complete API, see the abstract BaseCache class in django.core.cache.backends.base. Client code should use the `cache` variable defined here to access the default cache backend and look up non-default cache backends in the `caches` dict-like object. See docs/topics/cache.txt for information on the public API. """ from threading import local from django.conf import settings from django.core import signals from django.core.cache.backends.base import ( BaseCache, CacheKeyWarning, InvalidCacheBackendError, ) from django.utils.module_loading import import_string __all__ = [ 'cache', 'caches', 'DEFAULT_CACHE_ALIAS', 'InvalidCacheBackendError', 'CacheKeyWarning', 'BaseCache', ] DEFAULT_CACHE_ALIAS = 'default' def _create_cache(backend, **kwargs): try: # Try to get the CACHES entry for the given backend name first try: conf = settings.CACHES[backend] except KeyError: try: # Trying to import the given backend, in case it's a dotted path import_string(backend) except ImportError as e: raise InvalidCacheBackendError("Could not find backend '%s': %s" % ( backend, e)) location = kwargs.pop('LOCATION', '') params = kwargs else: params = {**conf, **kwargs} backend = params.pop('BACKEND') location = params.pop('LOCATION', '') backend_cls = import_string(backend) except ImportError as e: raise InvalidCacheBackendError( "Could not find backend '%s': %s" % (backend, e)) return backend_cls(location, params) class CacheHandler: """ A Cache Handler to manage access to Cache instances. Ensure only one instance of each alias exists per thread. """ def __init__(self): self._caches = local() def __getitem__(self, alias): try: return self._caches.caches[alias] except AttributeError: self._caches.caches = {} except KeyError: pass if alias not in settings.CACHES: raise InvalidCacheBackendError( "Could not find config for '%s' in settings.CACHES" % alias ) cache = _create_cache(alias) self._caches.caches[alias] = cache return cache def all(self): return getattr(self._caches, 'caches', {}).values() caches = CacheHandler() class DefaultCacheProxy: """ Proxy access to the default Cache object's attributes. This allows the legacy `cache` object to be thread-safe using the new ``caches`` API. """ def __getattr__(self, name): return getattr(caches[DEFAULT_CACHE_ALIAS], name) def __setattr__(self, name, value): return setattr(caches[DEFAULT_CACHE_ALIAS], name, value) def __delattr__(self, name): return delattr(caches[DEFAULT_CACHE_ALIAS], name) def __contains__(self, key): return key in caches[DEFAULT_CACHE_ALIAS] def __eq__(self, other): return caches[DEFAULT_CACHE_ALIAS] == other cache = DefaultCacheProxy() def close_caches(**kwargs): # Some caches -- python-memcached in particular -- need to do a cleanup at the # end of a request cycle. If not implemented in a particular backend # cache.close is a no-op for cache in caches.all(): cache.close() signals.request_finished.connect(close_caches)
7dd49398cbe755333b761c1ffaa6c746784b2405755c825efd4b791fe106281c
import hashlib from urllib.parse import quote TEMPLATE_FRAGMENT_KEY_TEMPLATE = 'template.cache.%s.%s' def make_template_fragment_key(fragment_name, vary_on=None): if vary_on is None: vary_on = () key = ':'.join(quote(str(var)) for var in vary_on) args = hashlib.md5(key.encode()) return TEMPLATE_FRAGMENT_KEY_TEMPLATE % (fragment_name, args.hexdigest())
4c42531061ec5b9bd40095aa433ec57650ff34418d686941378cc901edb5c9ea
""" Interfaces for serializing Django objects. Usage:: from django.core import serializers json = serializers.serialize("json", some_queryset) objects = list(serializers.deserialize("json", json)) To add your own serializers, use the SERIALIZATION_MODULES setting:: SERIALIZATION_MODULES = { "csv": "path.to.csv.serializer", "txt": "path.to.txt.serializer", } """ import importlib from django.apps import apps from django.conf import settings from django.core.serializers.base import SerializerDoesNotExist # Built-in serializers BUILTIN_SERIALIZERS = { "xml": "django.core.serializers.xml_serializer", "python": "django.core.serializers.python", "json": "django.core.serializers.json", "yaml": "django.core.serializers.pyyaml", } _serializers = {} class BadSerializer: """ Stub serializer to hold exception raised during registration This allows the serializer registration to cache serializers and if there is an error raised in the process of creating a serializer it will be raised and passed along to the caller when the serializer is used. """ internal_use_only = False def __init__(self, exception): self.exception = exception def __call__(self, *args, **kwargs): raise self.exception def register_serializer(format, serializer_module, serializers=None): """Register a new serializer. ``serializer_module`` should be the fully qualified module name for the serializer. If ``serializers`` is provided, the registration will be added to the provided dictionary. If ``serializers`` is not provided, the registration will be made directly into the global register of serializers. Adding serializers directly is not a thread-safe operation. """ if serializers is None and not _serializers: _load_serializers() try: module = importlib.import_module(serializer_module) except ImportError as exc: bad_serializer = BadSerializer(exc) module = type('BadSerializerModule', (), { 'Deserializer': bad_serializer, 'Serializer': bad_serializer, }) if serializers is None: _serializers[format] = module else: serializers[format] = module def unregister_serializer(format): "Unregister a given serializer. This is not a thread-safe operation." if not _serializers: _load_serializers() if format not in _serializers: raise SerializerDoesNotExist(format) del _serializers[format] def get_serializer(format): if not _serializers: _load_serializers() if format not in _serializers: raise SerializerDoesNotExist(format) return _serializers[format].Serializer def get_serializer_formats(): if not _serializers: _load_serializers() return list(_serializers) def get_public_serializer_formats(): if not _serializers: _load_serializers() return [k for k, v in _serializers.items() if not v.Serializer.internal_use_only] def get_deserializer(format): if not _serializers: _load_serializers() if format not in _serializers: raise SerializerDoesNotExist(format) return _serializers[format].Deserializer def serialize(format, queryset, **options): """ Serialize a queryset (or any iterator that returns database objects) using a certain serializer. """ s = get_serializer(format)() s.serialize(queryset, **options) return s.getvalue() def deserialize(format, stream_or_string, **options): """ Deserialize a stream or a string. Return an iterator that yields ``(obj, m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* -- object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name : list_of_related_objects}``. """ d = get_deserializer(format) return d(stream_or_string, **options) def _load_serializers(): """ Register built-in and settings-defined serializers. This is done lazily so that user code has a chance to (e.g.) set up custom settings without needing to be careful of import order. """ global _serializers serializers = {} for format in BUILTIN_SERIALIZERS: register_serializer(format, BUILTIN_SERIALIZERS[format], serializers) if hasattr(settings, "SERIALIZATION_MODULES"): for format in settings.SERIALIZATION_MODULES: register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers) _serializers = serializers def sort_dependencies(app_list): """Sort a list of (app_config, models) pairs into a single list of models. The single list of models is sorted so that any model with a natural key is serialized before a normal model, and any model with a natural key dependency has it's dependencies serialized first. """ # Process the list of models, and get the list of dependencies model_dependencies = [] models = set() for app_config, model_list in app_list: if model_list is None: model_list = app_config.get_models() for model in model_list: models.add(model) # Add any explicitly defined dependencies if hasattr(model, 'natural_key'): deps = getattr(model.natural_key, 'dependencies', []) if deps: deps = [apps.get_model(dep) for dep in deps] else: deps = [] # Now add a dependency for any FK relation with a model that # defines a natural key for field in model._meta.fields: if field.remote_field: rel_model = field.remote_field.model if hasattr(rel_model, 'natural_key') and rel_model != model: deps.append(rel_model) # Also add a dependency for any simple M2M relation with a model # that defines a natural key. M2M relations with explicit through # models don't count as dependencies. for field in model._meta.many_to_many: if field.remote_field.through._meta.auto_created: rel_model = field.remote_field.model if hasattr(rel_model, 'natural_key') and rel_model != model: deps.append(rel_model) model_dependencies.append((model, deps)) model_dependencies.reverse() # Now sort the models to ensure that dependencies are met. This # is done by repeatedly iterating over the input list of models. # If all the dependencies of a given model are in the final list, # that model is promoted to the end of the final list. This process # continues until the input list is empty, or we do a full iteration # over the input models without promoting a model to the final list. # If we do a full iteration without a promotion, that means there are # circular dependencies in the list. model_list = [] while model_dependencies: skipped = [] changed = False while model_dependencies: model, deps = model_dependencies.pop() # If all of the models in the dependency list are either already # on the final model list, or not on the original serialization list, # then we've found another model with all it's dependencies satisfied. if all(d not in models or d in model_list for d in deps): model_list.append(model) changed = True else: skipped.append((model, deps)) if not changed: raise RuntimeError( "Can't resolve dependencies for %s in serialized app list." % ', '.join( '%s.%s' % (model._meta.app_label, model._meta.object_name) for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__) ) ) model_dependencies = skipped return model_list
b0a7f302b4632b6ccd69153161105b5169a841f0d42b3c8a7482aed6005e2a77
""" Serialize data to/from JSON """ import datetime import decimal import json import uuid from django.core.serializers.base import DeserializationError from django.core.serializers.python import ( Deserializer as PythonDeserializer, Serializer as PythonSerializer, ) from django.utils.duration import duration_iso_string from django.utils.functional import Promise from django.utils.timezone import is_aware class Serializer(PythonSerializer): """Convert a queryset to JSON.""" internal_use_only = False def _init_options(self): self._current = None self.json_kwargs = self.options.copy() self.json_kwargs.pop('stream', None) self.json_kwargs.pop('fields', None) if self.options.get('indent'): # Prevent trailing spaces self.json_kwargs['separators'] = (',', ': ') self.json_kwargs.setdefault('cls', DjangoJSONEncoder) def start_serialization(self): self._init_options() self.stream.write("[") def end_serialization(self): if self.options.get("indent"): self.stream.write("\n") self.stream.write("]") if self.options.get("indent"): self.stream.write("\n") def end_object(self, obj): # self._current has the field data indent = self.options.get("indent") if not self.first: self.stream.write(",") if not indent: self.stream.write(" ") if indent: self.stream.write("\n") json.dump(self.get_dump_object(obj), self.stream, **self.json_kwargs) self._current = None def getvalue(self): # Grandparent super return super(PythonSerializer, self).getvalue() def Deserializer(stream_or_string, **options): """Deserialize a stream or string of JSON data.""" if not isinstance(stream_or_string, (bytes, str)): stream_or_string = stream_or_string.read() if isinstance(stream_or_string, bytes): stream_or_string = stream_or_string.decode() try: objects = json.loads(stream_or_string) yield from PythonDeserializer(objects, **options) except (GeneratorExit, DeserializationError): raise except Exception as exc: raise DeserializationError() from exc class DjangoJSONEncoder(json.JSONEncoder): """ JSONEncoder subclass that knows how to encode date/time, decimal types, and UUIDs. """ def default(self, o): # See "Date Time String Format" in the ECMA-262 specification. if isinstance(o, datetime.datetime): r = o.isoformat() if o.microsecond: r = r[:23] + r[26:] if r.endswith('+00:00'): r = r[:-6] + 'Z' return r elif isinstance(o, datetime.date): return o.isoformat() elif isinstance(o, datetime.time): if is_aware(o): raise ValueError("JSON can't represent timezone-aware times.") r = o.isoformat() if o.microsecond: r = r[:12] return r elif isinstance(o, datetime.timedelta): return duration_iso_string(o) elif isinstance(o, (decimal.Decimal, uuid.UUID, Promise)): return str(o) else: return super().default(o)
cc39e0f598ce1d467f2c5f33c60215eddb46ec382d3ffdedba3b1b7a8ad12fe7
""" Module for abstract serializer/unserializer base classes. """ from io import StringIO from django.db import models class SerializerDoesNotExist(KeyError): """The requested serializer was not found.""" pass class SerializationError(Exception): """Something bad happened during serialization.""" pass class DeserializationError(Exception): """Something bad happened during deserialization.""" @classmethod def WithData(cls, original_exc, model, fk, field_value): """ Factory method for creating a deserialization error which has a more explanatory message. """ return cls("%s: (%s:pk=%s) field_value was '%s'" % (original_exc, model, fk, field_value)) class M2MDeserializationError(Exception): """Something bad happened during deserialization of a ManyToManyField.""" def __init__(self, original_exc, pk): self.original_exc = original_exc self.pk = pk class ProgressBar: progress_width = 75 def __init__(self, output, total_count): self.output = output self.total_count = total_count self.prev_done = 0 def update(self, count): if not self.output: return perc = count * 100 // self.total_count done = perc * self.progress_width // 100 if self.prev_done >= done: return self.prev_done = done cr = '' if self.total_count == 1 else '\r' self.output.write(cr + '[' + '.' * done + ' ' * (self.progress_width - done) + ']') if done == self.progress_width: self.output.write('\n') self.output.flush() class Serializer: """ Abstract serializer base class. """ # Indicates if the implemented serializer is only available for # internal Django use. internal_use_only = False progress_class = ProgressBar stream_class = StringIO def serialize(self, queryset, *, stream=None, fields=None, use_natural_foreign_keys=False, use_natural_primary_keys=False, progress_output=None, object_count=0, **options): """ Serialize a queryset. """ self.options = options self.stream = stream if stream is not None else self.stream_class() self.selected_fields = fields self.use_natural_foreign_keys = use_natural_foreign_keys self.use_natural_primary_keys = use_natural_primary_keys progress_bar = self.progress_class(progress_output, object_count) self.start_serialization() self.first = True for count, obj in enumerate(queryset, start=1): self.start_object(obj) # Use the concrete parent class' _meta instead of the object's _meta # This is to avoid local_fields problems for proxy models. Refs #17717. concrete_model = obj._meta.concrete_model # When using natural primary keys, retrieve the pk field of the # parent for multi-table inheritance child models. That field must # be serialized, otherwise deserialization isn't possible. if self.use_natural_primary_keys: pk = concrete_model._meta.pk pk_parent = pk if pk.remote_field and pk.remote_field.parent_link else None else: pk_parent = None for field in concrete_model._meta.local_fields: if field.serialize or field is pk_parent: if field.remote_field is None: if self.selected_fields is None or field.attname in self.selected_fields: self.handle_field(obj, field) else: if self.selected_fields is None or field.attname[:-3] in self.selected_fields: self.handle_fk_field(obj, field) for field in concrete_model._meta.many_to_many: if field.serialize: if self.selected_fields is None or field.attname in self.selected_fields: self.handle_m2m_field(obj, field) self.end_object(obj) progress_bar.update(count) self.first = self.first and False self.end_serialization() return self.getvalue() def start_serialization(self): """ Called when serializing of the queryset starts. """ raise NotImplementedError('subclasses of Serializer must provide a start_serialization() method') def end_serialization(self): """ Called when serializing of the queryset ends. """ pass def start_object(self, obj): """ Called when serializing of an object starts. """ raise NotImplementedError('subclasses of Serializer must provide a start_object() method') def end_object(self, obj): """ Called when serializing of an object ends. """ pass def handle_field(self, obj, field): """ Called to handle each individual (non-relational) field on an object. """ raise NotImplementedError('subclasses of Serializer must provide an handle_field() method') def handle_fk_field(self, obj, field): """ Called to handle a ForeignKey field. """ raise NotImplementedError('subclasses of Serializer must provide an handle_fk_field() method') def handle_m2m_field(self, obj, field): """ Called to handle a ManyToManyField. """ raise NotImplementedError('subclasses of Serializer must provide an handle_m2m_field() method') def getvalue(self): """ Return the fully serialized queryset (or None if the output stream is not seekable). """ if callable(getattr(self.stream, 'getvalue', None)): return self.stream.getvalue() class Deserializer: """ Abstract base deserializer class. """ def __init__(self, stream_or_string, **options): """ Init this serializer given a stream or a string """ self.options = options if isinstance(stream_or_string, str): self.stream = StringIO(stream_or_string) else: self.stream = stream_or_string def __iter__(self): return self def __next__(self): """Iteration iterface -- return the next item in the stream""" raise NotImplementedError('subclasses of Deserializer must provide a __next__() method') class DeserializedObject: """ A deserialized model. Basically a container for holding the pre-saved deserialized data along with the many-to-many data saved with the object. Call ``save()`` to save the object (with the many-to-many data) to the database; call ``save(save_m2m=False)`` to save just the object fields (and not touch the many-to-many stuff.) """ def __init__(self, obj, m2m_data=None): self.object = obj self.m2m_data = m2m_data def __repr__(self): return "<%s: %s(pk=%s)>" % ( self.__class__.__name__, self.object._meta.label, self.object.pk, ) def save(self, save_m2m=True, using=None, **kwargs): # Call save on the Model baseclass directly. This bypasses any # model-defined save. The save is also forced to be raw. # raw=True is passed to any pre/post_save signals. models.Model.save_base(self.object, using=using, raw=True, **kwargs) if self.m2m_data and save_m2m: for accessor_name, object_list in self.m2m_data.items(): getattr(self.object, accessor_name).set(object_list) # prevent a second (possibly accidental) call to save() from saving # the m2m data twice. self.m2m_data = None def build_instance(Model, data, db): """ Build a model instance. If the model instance doesn't have a primary key and the model supports natural keys, try to retrieve it from the database. """ obj = Model(**data) if (obj.pk is None and hasattr(Model, 'natural_key') and hasattr(Model._default_manager, 'get_by_natural_key')): natural_key = obj.natural_key() try: obj.pk = Model._default_manager.db_manager(db).get_by_natural_key(*natural_key).pk except Model.DoesNotExist: pass return obj def deserialize_m2m_values(field, field_value, using): model = field.remote_field.model if hasattr(model._default_manager, 'get_by_natural_key'): def m2m_convert(value): if hasattr(value, '__iter__') and not isinstance(value, str): return model._default_manager.db_manager(using).get_by_natural_key(*value).pk else: return model._meta.pk.to_python(value) else: def m2m_convert(v): return model._meta.pk.to_python(v) try: values = [] for pk in field_value: values.append(m2m_convert(pk)) return values except Exception as e: raise M2MDeserializationError(e, pk) def deserialize_fk_value(field, field_value, using): if field_value is None: return None model = field.remote_field.model default_manager = model._default_manager field_name = field.remote_field.field_name if (hasattr(default_manager, 'get_by_natural_key') and hasattr(field_value, '__iter__') and not isinstance(field_value, str)): obj = default_manager.db_manager(using).get_by_natural_key(*field_value) value = getattr(obj, field_name) # If this is a natural foreign key to an object that has a FK/O2O as # the foreign key, use the FK value. if model._meta.pk.remote_field: value = value.pk return value return model._meta.get_field(field_name).to_python(field_value)
497c171e3b34ff6f6dc9c3ad5d7b3e956192fbe48105578794d6feae385e09a9
""" YAML serializer. Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__. """ import collections import decimal from io import StringIO import yaml from django.core.serializers.base import DeserializationError from django.core.serializers.python import ( Deserializer as PythonDeserializer, Serializer as PythonSerializer, ) from django.db import models # Use the C (faster) implementation if possible try: from yaml import CSafeLoader as SafeLoader from yaml import CSafeDumper as SafeDumper except ImportError: from yaml import SafeLoader, SafeDumper class DjangoSafeDumper(SafeDumper): def represent_decimal(self, data): return self.represent_scalar('tag:yaml.org,2002:str', str(data)) def represent_ordered_dict(self, data): return self.represent_mapping('tag:yaml.org,2002:map', data.items()) DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal) DjangoSafeDumper.add_representer(collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict) class Serializer(PythonSerializer): """Convert a queryset to YAML.""" internal_use_only = False def handle_field(self, obj, field): # A nasty special case: base YAML doesn't support serialization of time # types (as opposed to dates or datetimes, which it does support). Since # we want to use the "safe" serializer for better interoperability, we # need to do something with those pesky times. Converting 'em to strings # isn't perfect, but it's better than a "!!python/time" type which would # halt deserialization under any other language. if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None: self._current[field.name] = str(getattr(obj, field.name)) else: super().handle_field(obj, field) def end_serialization(self): yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options) def getvalue(self): # Grandparent super return super(PythonSerializer, self).getvalue() def Deserializer(stream_or_string, **options): """Deserialize a stream or string of YAML data.""" if isinstance(stream_or_string, bytes): stream_or_string = stream_or_string.decode() if isinstance(stream_or_string, str): stream = StringIO(stream_or_string) else: stream = stream_or_string try: yield from PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options) except (GeneratorExit, DeserializationError): raise except Exception as exc: raise DeserializationError() from exc
c178a1e1eb8c3c66c6c2500d03b981ad0a15f4b9598e5b41008f22e3e792a04b
""" A Python "serializer". Doesn't do much serializing per se -- just converts to and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for other serializers. """ from collections import OrderedDict from django.apps import apps from django.core.serializers import base from django.db import DEFAULT_DB_ALIAS, models from django.utils.encoding import is_protected_type class Serializer(base.Serializer): """ Serialize a QuerySet to basic Python objects. """ internal_use_only = True def start_serialization(self): self._current = None self.objects = [] def end_serialization(self): pass def start_object(self, obj): self._current = OrderedDict() def end_object(self, obj): self.objects.append(self.get_dump_object(obj)) self._current = None def get_dump_object(self, obj): data = OrderedDict([('model', str(obj._meta))]) if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): data["pk"] = self._value_from_field(obj, obj._meta.pk) data['fields'] = self._current return data def _value_from_field(self, obj, field): value = field.value_from_object(obj) # Protected types (i.e., primitives like None, numbers, dates, # and Decimals) are passed through as is. All other values are # converted to string first. return value if is_protected_type(value) else field.value_to_string(obj) def handle_field(self, obj, field): self._current[field.name] = self._value_from_field(obj, field) def handle_fk_field(self, obj, field): if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): related = getattr(obj, field.name) if related: value = related.natural_key() else: value = None else: value = self._value_from_field(obj, field) self._current[field.name] = value def handle_m2m_field(self, obj, field): if field.remote_field.through._meta.auto_created: if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): def m2m_value(value): return value.natural_key() else: def m2m_value(value): return self._value_from_field(value, value._meta.pk) self._current[field.name] = [ m2m_value(related) for related in getattr(obj, field.name).iterator() ] def getvalue(self): return self.objects def Deserializer(object_list, *, using=DEFAULT_DB_ALIAS, ignorenonexistent=False, **options): """ Deserialize simple Python objects back into Django ORM instances. It's expected that you pass the Python objects themselves (instead of a stream or a string) to the constructor """ field_names_cache = {} # Model: <list of field_names> for d in object_list: # Look up the model and starting build a dict of data for it. try: Model = _get_model(d["model"]) except base.DeserializationError: if ignorenonexistent: continue else: raise data = {} if 'pk' in d: try: data[Model._meta.pk.attname] = Model._meta.pk.to_python(d.get('pk')) except Exception as e: raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), None) m2m_data = {} if Model not in field_names_cache: field_names_cache[Model] = {f.name for f in Model._meta.get_fields()} field_names = field_names_cache[Model] # Handle each field for (field_name, field_value) in d["fields"].items(): if ignorenonexistent and field_name not in field_names: # skip fields no longer on model continue field = Model._meta.get_field(field_name) # Handle M2M relations if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel): try: values = base.deserialize_m2m_values(field, field_value, using) except base.M2MDeserializationError as e: raise base.DeserializationError.WithData(e.original_exc, d['model'], d.get('pk'), e.pk) m2m_data[field.name] = values # Handle FK fields elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel): try: value = base.deserialize_fk_value(field, field_value, using) except Exception as e: raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value) data[field.attname] = value # Handle all other fields else: try: data[field.name] = field.to_python(field_value) except Exception as e: raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value) obj = base.build_instance(Model, data, using) yield base.DeserializedObject(obj, m2m_data) def _get_model(model_identifier): """Look up a model from an "app_label.model_name" string.""" try: return apps.get_model(model_identifier) except (LookupError, TypeError): raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier)
cdee5e97a3a949a2a776155d5a62300424e5ec3f7b5f8a143fe896507f4ab8d2
""" XML serializer. """ from xml.dom import pulldom from xml.sax import handler from xml.sax.expatreader import ExpatParser as _ExpatParser from django.apps import apps from django.conf import settings from django.core.serializers import base from django.db import DEFAULT_DB_ALIAS, models from django.utils.xmlutils import ( SimplerXMLGenerator, UnserializableContentError, ) class Serializer(base.Serializer): """Serialize a QuerySet to XML.""" def indent(self, level): if self.options.get('indent') is not None: self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level) def start_serialization(self): """ Start serialization -- open the XML document and the root element. """ self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET)) self.xml.startDocument() self.xml.startElement("django-objects", {"version": "1.0"}) def end_serialization(self): """ End serialization -- end the document. """ self.indent(0) self.xml.endElement("django-objects") self.xml.endDocument() def start_object(self, obj): """ Called as each object is handled. """ if not hasattr(obj, "_meta"): raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj)) self.indent(1) attrs = {'model': str(obj._meta)} if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): obj_pk = obj.pk if obj_pk is not None: attrs['pk'] = str(obj_pk) self.xml.startElement("object", attrs) def end_object(self, obj): """ Called after handling all fields for an object. """ self.indent(1) self.xml.endElement("object") def handle_field(self, obj, field): """ Handle each field on an object (except for ForeignKeys and ManyToManyFields). """ self.indent(2) self.xml.startElement('field', { 'name': field.name, 'type': field.get_internal_type(), }) # Get a "string version" of the object's data. if getattr(obj, field.name) is not None: try: self.xml.characters(field.value_to_string(obj)) except UnserializableContentError: raise ValueError("%s.%s (pk:%s) contains unserializable characters" % ( obj.__class__.__name__, field.name, obj.pk)) else: self.xml.addQuickElement("None") self.xml.endElement("field") def handle_fk_field(self, obj, field): """ Handle a ForeignKey (they need to be treated slightly differently from regular fields). """ self._start_relational_field(field) related_att = getattr(obj, field.get_attname()) if related_att is not None: if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): related = getattr(obj, field.name) # If related object has a natural key, use it related = related.natural_key() # Iterable natural keys are rolled out as subelements for key_value in related: self.xml.startElement("natural", {}) self.xml.characters(str(key_value)) self.xml.endElement("natural") else: self.xml.characters(str(related_att)) else: self.xml.addQuickElement("None") self.xml.endElement("field") def handle_m2m_field(self, obj, field): """ Handle a ManyToManyField. Related objects are only serialized as references to the object's PK (i.e. the related *data* is not dumped, just the relation). """ if field.remote_field.through._meta.auto_created: self._start_relational_field(field) if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): # If the objects in the m2m have a natural key, use it def handle_m2m(value): natural = value.natural_key() # Iterable natural keys are rolled out as subelements self.xml.startElement("object", {}) for key_value in natural: self.xml.startElement("natural", {}) self.xml.characters(str(key_value)) self.xml.endElement("natural") self.xml.endElement("object") else: def handle_m2m(value): self.xml.addQuickElement("object", attrs={ 'pk': str(value.pk) }) for relobj in getattr(obj, field.name).iterator(): handle_m2m(relobj) self.xml.endElement("field") def _start_relational_field(self, field): """Output the <field> element for relational fields.""" self.indent(2) self.xml.startElement('field', { 'name': field.name, 'rel': field.remote_field.__class__.__name__, 'to': str(field.remote_field.model._meta), }) class Deserializer(base.Deserializer): """Deserialize XML.""" def __init__(self, stream_or_string, *, using=DEFAULT_DB_ALIAS, ignorenonexistent=False, **options): super().__init__(stream_or_string, **options) self.event_stream = pulldom.parse(self.stream, self._make_parser()) self.db = using self.ignore = ignorenonexistent def _make_parser(self): """Create a hardened XML parser (no custom/external entities).""" return DefusedExpatParser() def __next__(self): for event, node in self.event_stream: if event == "START_ELEMENT" and node.nodeName == "object": self.event_stream.expandNode(node) return self._handle_object(node) raise StopIteration def _handle_object(self, node): """Convert an <object> node to a DeserializedObject.""" # Look up the model using the model loading mechanism. If this fails, # bail. Model = self._get_model_from_node(node, "model") # Start building a data dictionary from the object. data = {} if node.hasAttribute('pk'): data[Model._meta.pk.attname] = Model._meta.pk.to_python( node.getAttribute('pk')) # Also start building a dict of m2m data (this is saved as # {m2m_accessor_attribute : [list_of_related_objects]}) m2m_data = {} field_names = {f.name for f in Model._meta.get_fields()} # Deserialize each field. for field_node in node.getElementsByTagName("field"): # If the field is missing the name attribute, bail (are you # sensing a pattern here?) field_name = field_node.getAttribute("name") if not field_name: raise base.DeserializationError("<field> node is missing the 'name' attribute") # Get the field from the Model. This will raise a # FieldDoesNotExist if, well, the field doesn't exist, which will # be propagated correctly unless ignorenonexistent=True is used. if self.ignore and field_name not in field_names: continue field = Model._meta.get_field(field_name) # As is usually the case, relation fields get the special treatment. if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel): m2m_data[field.name] = self._handle_m2m_field_node(field_node, field) elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel): data[field.attname] = self._handle_fk_field_node(field_node, field) else: if field_node.getElementsByTagName('None'): value = None else: value = field.to_python(getInnerText(field_node).strip()) data[field.name] = value obj = base.build_instance(Model, data, self.db) # Return a DeserializedObject so that the m2m data has a place to live. return base.DeserializedObject(obj, m2m_data) def _handle_fk_field_node(self, node, field): """ Handle a <field> node for a ForeignKey """ # Check if there is a child node named 'None', returning None if so. if node.getElementsByTagName('None'): return None else: model = field.remote_field.model if hasattr(model._default_manager, 'get_by_natural_key'): keys = node.getElementsByTagName('natural') if keys: # If there are 'natural' subelements, it must be a natural key field_value = [getInnerText(k).strip() for k in keys] obj = model._default_manager.db_manager(self.db).get_by_natural_key(*field_value) obj_pk = getattr(obj, field.remote_field.field_name) # If this is a natural foreign key to an object that # has a FK/O2O as the foreign key, use the FK value if field.remote_field.model._meta.pk.remote_field: obj_pk = obj_pk.pk else: # Otherwise, treat like a normal PK field_value = getInnerText(node).strip() obj_pk = model._meta.get_field(field.remote_field.field_name).to_python(field_value) return obj_pk else: field_value = getInnerText(node).strip() return model._meta.get_field(field.remote_field.field_name).to_python(field_value) def _handle_m2m_field_node(self, node, field): """ Handle a <field> node for a ManyToManyField. """ model = field.remote_field.model default_manager = model._default_manager if hasattr(default_manager, 'get_by_natural_key'): def m2m_convert(n): keys = n.getElementsByTagName('natural') if keys: # If there are 'natural' subelements, it must be a natural key field_value = [getInnerText(k).strip() for k in keys] obj_pk = default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk else: # Otherwise, treat like a normal PK value. obj_pk = model._meta.pk.to_python(n.getAttribute('pk')) return obj_pk else: def m2m_convert(n): return model._meta.pk.to_python(n.getAttribute('pk')) return [m2m_convert(c) for c in node.getElementsByTagName("object")] def _get_model_from_node(self, node, attr): """ Look up a model from a <object model=...> or a <field rel=... to=...> node. """ model_identifier = node.getAttribute(attr) if not model_identifier: raise base.DeserializationError( "<%s> node is missing the required '%s' attribute" % (node.nodeName, attr)) try: return apps.get_model(model_identifier) except (LookupError, TypeError): raise base.DeserializationError( "<%s> node has invalid model identifier: '%s'" % (node.nodeName, model_identifier)) def getInnerText(node): """Get all the inner text of a DOM node (recursively).""" # inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html inner_text = [] for child in node.childNodes: if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE: inner_text.append(child.data) elif child.nodeType == child.ELEMENT_NODE: inner_text.extend(getInnerText(child)) else: pass return "".join(inner_text) # Below code based on Christian Heimes' defusedxml class DefusedExpatParser(_ExpatParser): """ An expat parser hardened against XML bomb attacks. Forbid DTDs, external entity references """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.setFeature(handler.feature_external_ges, False) self.setFeature(handler.feature_external_pes, False) def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): raise DTDForbidden(name, sysid, pubid) def entity_decl(self, name, is_parameter_entity, value, base, sysid, pubid, notation_name): raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name) def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): # expat 1.2 raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) def external_entity_ref_handler(self, context, base, sysid, pubid): raise ExternalReferenceForbidden(context, base, sysid, pubid) def reset(self): _ExpatParser.reset(self) parser = self._parser parser.StartDoctypeDeclHandler = self.start_doctype_decl parser.EntityDeclHandler = self.entity_decl parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl parser.ExternalEntityRefHandler = self.external_entity_ref_handler class DefusedXmlException(ValueError): """Base exception.""" def __repr__(self): return str(self) class DTDForbidden(DefusedXmlException): """Document type definition is forbidden.""" def __init__(self, name, sysid, pubid): super().__init__() self.name = name self.sysid = sysid self.pubid = pubid def __str__(self): tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})" return tpl.format(self.name, self.sysid, self.pubid) class EntitiesForbidden(DefusedXmlException): """Entity definition is forbidden.""" def __init__(self, name, value, base, sysid, pubid, notation_name): super().__init__() self.name = name self.value = value self.base = base self.sysid = sysid self.pubid = pubid self.notation_name = notation_name def __str__(self): tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})" return tpl.format(self.name, self.sysid, self.pubid) class ExternalReferenceForbidden(DefusedXmlException): """Resolving an external reference is forbidden.""" def __init__(self, context, base, sysid, pubid): super().__init__() self.context = context self.base = base self.sysid = sysid self.pubid = pubid def __str__(self): tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})" return tpl.format(self.sysid, self.pubid)
2895ce9b67cbab86496afa83c248587482916d2fed70fb6c3e62e0b3294d5fa1
import cgi import codecs import re from io import BytesIO from django.conf import settings from django.core import signals from django.core.handlers import base from django.http import HttpRequest, QueryDict, parse_cookie from django.urls import set_script_prefix from django.utils.encoding import repercent_broken_unicode from django.utils.functional import cached_property _slashes_re = re.compile(br'/+') class LimitedStream: """Wrap another stream to disallow reading it past a number of bytes.""" def __init__(self, stream, limit, buf_size=64 * 1024 * 1024): self.stream = stream self.remaining = limit self.buffer = b'' self.buf_size = buf_size def _read_limited(self, size=None): if size is None or size > self.remaining: size = self.remaining if size == 0: return b'' result = self.stream.read(size) self.remaining -= len(result) return result def read(self, size=None): if size is None: result = self.buffer + self._read_limited() self.buffer = b'' elif size < len(self.buffer): result = self.buffer[:size] self.buffer = self.buffer[size:] else: # size >= len(self.buffer) result = self.buffer + self._read_limited(size - len(self.buffer)) self.buffer = b'' return result def readline(self, size=None): while b'\n' not in self.buffer and \ (size is None or len(self.buffer) < size): if size: # since size is not None here, len(self.buffer) < size chunk = self._read_limited(size - len(self.buffer)) else: chunk = self._read_limited() if not chunk: break self.buffer += chunk sio = BytesIO(self.buffer) if size: line = sio.readline(size) else: line = sio.readline() self.buffer = sio.read() return line class WSGIRequest(HttpRequest): def __init__(self, environ): script_name = get_script_name(environ) # If PATH_INFO is empty (e.g. accessing the SCRIPT_NAME URL without a # trailing slash), operate as if '/' was requested. path_info = get_path_info(environ) or '/' self.environ = environ self.path_info = path_info # be careful to only replace the first slash in the path because of # http://test/something and http://test//something being different as # stated in http://www.ietf.org/rfc/rfc2396.txt self.path = '%s/%s' % (script_name.rstrip('/'), path_info.replace('/', '', 1)) self.META = environ self.META['PATH_INFO'] = path_info self.META['SCRIPT_NAME'] = script_name self.method = environ['REQUEST_METHOD'].upper() self.content_type, self.content_params = cgi.parse_header(environ.get('CONTENT_TYPE', '')) if 'charset' in self.content_params: try: codecs.lookup(self.content_params['charset']) except LookupError: pass else: self.encoding = self.content_params['charset'] try: content_length = int(environ.get('CONTENT_LENGTH')) except (ValueError, TypeError): content_length = 0 self._stream = LimitedStream(self.environ['wsgi.input'], content_length) self._read_started = False self.resolver_match = None def _get_scheme(self): return self.environ.get('wsgi.url_scheme') @cached_property def GET(self): # The WSGI spec says 'QUERY_STRING' may be absent. raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '') return QueryDict(raw_query_string, encoding=self._encoding) def _get_post(self): if not hasattr(self, '_post'): self._load_post_and_files() return self._post def _set_post(self, post): self._post = post @cached_property def COOKIES(self): raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '') return parse_cookie(raw_cookie) @property def FILES(self): if not hasattr(self, '_files'): self._load_post_and_files() return self._files POST = property(_get_post, _set_post) class WSGIHandler(base.BaseHandler): request_class = WSGIRequest def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.load_middleware() def __call__(self, environ, start_response): set_script_prefix(get_script_name(environ)) signals.request_started.send(sender=self.__class__, environ=environ) request = self.request_class(environ) response = self.get_response(request) response._handler_class = self.__class__ status = '%d %s' % (response.status_code, response.reason_phrase) response_headers = list(response.items()) for c in response.cookies.values(): response_headers.append(('Set-Cookie', c.output(header=''))) start_response(status, response_headers) if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'): response = environ['wsgi.file_wrapper'](response.file_to_stream) return response def get_path_info(environ): """Return the HTTP request's PATH_INFO as a string.""" path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/') return repercent_broken_unicode(path_info).decode() def get_script_name(environ): """ Return the equivalent of the HTTP request's SCRIPT_NAME environment variable. If Apache mod_rewrite is used, return what would have been the script name prior to any rewriting (so it's the script name as seen from the client's perspective), unless the FORCE_SCRIPT_NAME setting is set (to anything). """ if settings.FORCE_SCRIPT_NAME is not None: return settings.FORCE_SCRIPT_NAME # If Apache's mod_rewrite had a whack at the URL, Apache set either # SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any # rewrites. Unfortunately not every Web server (lighttpd!) passes this # information through all the time, so FORCE_SCRIPT_NAME, above, is still # needed. script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '') or get_bytes_from_wsgi(environ, 'REDIRECT_URL', '') if script_url: if b'//' in script_url: # mod_wsgi squashes multiple successive slashes in PATH_INFO, # do the same with script_url before manipulating paths (#17133). script_url = _slashes_re.sub(b'/', script_url) path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '') script_name = script_url[:-len(path_info)] if path_info else script_url else: script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '') return script_name.decode() def get_bytes_from_wsgi(environ, key, default): """ Get a value from the WSGI environ dictionary as bytes. key and default should be strings. """ value = environ.get(key, default) # Non-ASCII values in the WSGI environ are arbitrarily decoded with # ISO-8859-1. This is wrong for Django websites where UTF-8 is the default. # Re-encode to recover the original bytestring. return value.encode('iso-8859-1') def get_str_from_wsgi(environ, key, default): """ Get a value from the WSGI environ dictionary as str. key and default should be str objects. """ value = get_bytes_from_wsgi(environ, key, default) return value.decode(errors='replace')
63c0d4b66ff549e24c8e049ef366a5b23172b02e24584639ea4f4c650bef7f22
import logging import sys from functools import wraps from django.conf import settings from django.core import signals from django.core.exceptions import ( PermissionDenied, RequestDataTooBig, SuspiciousOperation, TooManyFieldsSent, ) from django.http import Http404 from django.http.multipartparser import MultiPartParserError from django.urls import get_resolver, get_urlconf from django.utils.log import log_response from django.views import debug def convert_exception_to_response(get_response): """ Wrap the given get_response callable in exception-to-response conversion. All exceptions will be converted. All known 4xx exceptions (Http404, PermissionDenied, MultiPartParserError, SuspiciousOperation) will be converted to the appropriate response, and all other exceptions will be converted to 500 responses. This decorator is automatically applied to all middleware to ensure that no middleware leaks an exception and that the next middleware in the stack can rely on getting a response instead of an exception. """ @wraps(get_response) def inner(request): try: response = get_response(request) except Exception as exc: response = response_for_exception(request, exc) return response return inner def response_for_exception(request, exc): if isinstance(exc, Http404): if settings.DEBUG: response = debug.technical_404_response(request, exc) else: response = get_exception_response(request, get_resolver(get_urlconf()), 404, exc) elif isinstance(exc, PermissionDenied): response = get_exception_response(request, get_resolver(get_urlconf()), 403, exc) log_response( 'Forbidden (Permission denied): %s', request.path, response=response, request=request, exc_info=sys.exc_info(), ) elif isinstance(exc, MultiPartParserError): response = get_exception_response(request, get_resolver(get_urlconf()), 400, exc) log_response( 'Bad request (Unable to parse request body): %s', request.path, response=response, request=request, exc_info=sys.exc_info(), ) elif isinstance(exc, SuspiciousOperation): if isinstance(exc, (RequestDataTooBig, TooManyFieldsSent)): # POST data can't be accessed again, otherwise the original # exception would be raised. request._mark_post_parse_error() # The request logger receives events for any problematic request # The security logger receives events for all SuspiciousOperations security_logger = logging.getLogger('django.security.%s' % exc.__class__.__name__) security_logger.error( str(exc), extra={'status_code': 400, 'request': request}, ) if settings.DEBUG: response = debug.technical_500_response(request, *sys.exc_info(), status_code=400) else: response = get_exception_response(request, get_resolver(get_urlconf()), 400, exc) elif isinstance(exc, SystemExit): # Allow sys.exit() to actually exit. See tickets #1023 and #4701 raise else: signals.got_request_exception.send(sender=None, request=request) response = handle_uncaught_exception(request, get_resolver(get_urlconf()), sys.exc_info()) log_response( '%s: %s', response.reason_phrase, request.path, response=response, request=request, exc_info=sys.exc_info(), ) # Force a TemplateResponse to be rendered. if not getattr(response, 'is_rendered', True) and callable(getattr(response, 'render', None)): response = response.render() return response def get_exception_response(request, resolver, status_code, exception, sender=None): try: callback, param_dict = resolver.resolve_error_handler(status_code) response = callback(request, **{**param_dict, 'exception': exception}) except Exception: signals.got_request_exception.send(sender=sender, request=request) response = handle_uncaught_exception(request, resolver, sys.exc_info()) return response def handle_uncaught_exception(request, resolver, exc_info): """ Processing for any otherwise uncaught exceptions (those that will generate HTTP 500 responses). """ if settings.DEBUG_PROPAGATE_EXCEPTIONS: raise if settings.DEBUG: return debug.technical_500_response(request, *exc_info) # Return an HttpResponse that displays a friendly error message. callback, param_dict = resolver.resolve_error_handler(500) return callback(request, **param_dict)
feec70f5fb6773b0e5ae39efb249815bb732ca3ee778589ab672422e12783bcb
import logging import types from django.conf import settings from django.core.exceptions import ImproperlyConfigured, MiddlewareNotUsed from django.db import connections, transaction from django.urls import get_resolver, set_urlconf from django.utils.log import log_response from django.utils.module_loading import import_string from .exception import convert_exception_to_response, get_exception_response logger = logging.getLogger('django.request') class BaseHandler: _view_middleware = None _template_response_middleware = None _exception_middleware = None _middleware_chain = None def load_middleware(self): """ Populate middleware lists from settings.MIDDLEWARE. Must be called after the environment is fixed (see __call__ in subclasses). """ self._view_middleware = [] self._template_response_middleware = [] self._exception_middleware = [] handler = convert_exception_to_response(self._get_response) for middleware_path in reversed(settings.MIDDLEWARE): middleware = import_string(middleware_path) try: mw_instance = middleware(handler) except MiddlewareNotUsed as exc: if settings.DEBUG: if str(exc): logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc) else: logger.debug('MiddlewareNotUsed: %r', middleware_path) continue if mw_instance is None: raise ImproperlyConfigured( 'Middleware factory %s returned None.' % middleware_path ) if hasattr(mw_instance, 'process_view'): self._view_middleware.insert(0, mw_instance.process_view) if hasattr(mw_instance, 'process_template_response'): self._template_response_middleware.append(mw_instance.process_template_response) if hasattr(mw_instance, 'process_exception'): self._exception_middleware.append(mw_instance.process_exception) handler = convert_exception_to_response(mw_instance) # We only assign to this when initialization is complete as it is used # as a flag for initialization being complete. self._middleware_chain = handler def make_view_atomic(self, view): non_atomic_requests = getattr(view, '_non_atomic_requests', set()) for db in connections.all(): if db.settings_dict['ATOMIC_REQUESTS'] and db.alias not in non_atomic_requests: view = transaction.atomic(using=db.alias)(view) return view def get_exception_response(self, request, resolver, status_code, exception): return get_exception_response(request, resolver, status_code, exception, self.__class__) def get_response(self, request): """Return an HttpResponse object for the given HttpRequest.""" # Setup default url resolver for this thread set_urlconf(settings.ROOT_URLCONF) response = self._middleware_chain(request) response._closable_objects.append(request) # If the exception handler returns a TemplateResponse that has not # been rendered, force it to be rendered. if not getattr(response, 'is_rendered', True) and callable(getattr(response, 'render', None)): response = response.render() if response.status_code >= 400: log_response( '%s: %s', response.reason_phrase, request.path, response=response, request=request, ) return response def _get_response(self, request): """ Resolve and call the view, then apply view, exception, and template_response middleware. This method is everything that happens inside the request/response middleware. """ response = None if hasattr(request, 'urlconf'): urlconf = request.urlconf set_urlconf(urlconf) resolver = get_resolver(urlconf) else: resolver = get_resolver() resolver_match = resolver.resolve(request.path_info) callback, callback_args, callback_kwargs = resolver_match request.resolver_match = resolver_match # Apply view middleware for middleware_method in self._view_middleware: response = middleware_method(request, callback, callback_args, callback_kwargs) if response: break if response is None: wrapped_callback = self.make_view_atomic(callback) try: response = wrapped_callback(request, *callback_args, **callback_kwargs) except Exception as e: response = self.process_exception_by_middleware(e, request) # Complain if the view returned None (a common error). if response is None: if isinstance(callback, types.FunctionType): # FBV view_name = callback.__name__ else: # CBV view_name = callback.__class__.__name__ + '.__call__' raise ValueError( "The view %s.%s didn't return an HttpResponse object. It " "returned None instead." % (callback.__module__, view_name) ) # If the response supports deferred rendering, apply template # response middleware and then render the response elif hasattr(response, 'render') and callable(response.render): for middleware_method in self._template_response_middleware: response = middleware_method(request, response) # Complain if the template response middleware returned None (a common error). if response is None: raise ValueError( "%s.process_template_response didn't return an " "HttpResponse object. It returned None instead." % (middleware_method.__self__.__class__.__name__) ) try: response = response.render() except Exception as e: response = self.process_exception_by_middleware(e, request) return response def process_exception_by_middleware(self, exception, request): """ Pass the exception to the exception middleware. If no middleware return a response for this exception, raise it. """ for middleware_method in self._exception_middleware: response = middleware_method(request, exception) if response: return response raise
2c10bde2a0c62d42f16ac47ded701bd311acb2bdbdf846cd5130e96ed9d0ea21
""" Tools for sending email. """ from django.conf import settings # Imported for backwards compatibility and for the sake # of a cleaner namespace. These symbols used to be in # django/core/mail.py before the introduction of email # backends and the subsequent reorganization (See #10355) from django.core.mail.message import ( DEFAULT_ATTACHMENT_MIME_TYPE, BadHeaderError, EmailMessage, EmailMultiAlternatives, SafeMIMEMultipart, SafeMIMEText, forbid_multi_line_headers, make_msgid, ) from django.core.mail.utils import DNS_NAME, CachedDnsName from django.utils.module_loading import import_string __all__ = [ 'CachedDnsName', 'DNS_NAME', 'EmailMessage', 'EmailMultiAlternatives', 'SafeMIMEText', 'SafeMIMEMultipart', 'DEFAULT_ATTACHMENT_MIME_TYPE', 'make_msgid', 'BadHeaderError', 'forbid_multi_line_headers', 'get_connection', 'send_mail', 'send_mass_mail', 'mail_admins', 'mail_managers', ] def get_connection(backend=None, fail_silently=False, **kwds): """Load an email backend and return an instance of it. If backend is None (default), use settings.EMAIL_BACKEND. Both fail_silently and other keyword arguments are used in the constructor of the backend. """ klass = import_string(backend or settings.EMAIL_BACKEND) return klass(fail_silently=fail_silently, **kwds) def send_mail(subject, message, from_email, recipient_list, fail_silently=False, auth_user=None, auth_password=None, connection=None, html_message=None): """ Easy wrapper for sending a single message to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. If auth_user is None, use the EMAIL_HOST_USER setting. If auth_password is None, use the EMAIL_HOST_PASSWORD setting. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. """ connection = connection or get_connection( username=auth_user, password=auth_password, fail_silently=fail_silently, ) mail = EmailMultiAlternatives(subject, message, from_email, recipient_list, connection=connection) if html_message: mail.attach_alternative(html_message, 'text/html') return mail.send() def send_mass_mail(datatuple, fail_silently=False, auth_user=None, auth_password=None, connection=None): """ Given a datatuple of (subject, message, from_email, recipient_list), send each message to each recipient list. Return the number of emails sent. If from_email is None, use the DEFAULT_FROM_EMAIL setting. If auth_user and auth_password are set, use them to log in. If auth_user is None, use the EMAIL_HOST_USER setting. If auth_password is None, use the EMAIL_HOST_PASSWORD setting. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. """ connection = connection or get_connection( username=auth_user, password=auth_password, fail_silently=fail_silently, ) messages = [ EmailMessage(subject, message, sender, recipient, connection=connection) for subject, message, sender, recipient in datatuple ] return connection.send_messages(messages) def mail_admins(subject, message, fail_silently=False, connection=None, html_message=None): """Send a message to the admins, as defined by the ADMINS setting.""" if not settings.ADMINS: return mail = EmailMultiAlternatives( '%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject), message, settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS], connection=connection, ) if html_message: mail.attach_alternative(html_message, 'text/html') mail.send(fail_silently=fail_silently) def mail_managers(subject, message, fail_silently=False, connection=None, html_message=None): """Send a message to the managers, as defined by the MANAGERS setting.""" if not settings.MANAGERS: return mail = EmailMultiAlternatives( '%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject), message, settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS], connection=connection, ) if html_message: mail.attach_alternative(html_message, 'text/html') mail.send(fail_silently=fail_silently)
9d4416ab33c065fa9663627b4ac61f5658a160c633ea2fc0f07e43d294a406fa
""" Email message and email sending related helper functions. """ import socket # Cache the hostname, but do it lazily: socket.getfqdn() can take a couple of # seconds, which slows down the restart of the server. class CachedDnsName: def __str__(self): return self.get_fqdn() def get_fqdn(self): if not hasattr(self, '_fqdn'): self._fqdn = socket.getfqdn() return self._fqdn DNS_NAME = CachedDnsName()