hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
d1430bad9cf5688dac7513e42746335fdf5ca36215ead95d3fba44bc610f74fd | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j ខែ F ឆ្នាំ Y"
TIME_FORMAT = "G:i"
DATETIME_FORMAT = "j ខែ F ឆ្នាំ Y, G:i"
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "j M Y"
SHORT_DATETIME_FORMAT = "j M Y, G:i"
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
# NUMBER_GROUPING =
|
7c1bf202a05c017011a6d484de1c70cc560dc7794413c42b84dac25aeb863650 | # This file is distributed under the same license as the Django package.
#
DATE_FORMAT = r"j \d\e F \d\e Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = r"j \d\e F \d\e Y \a \l\a\s H:i"
YEAR_MONTH_FORMAT = r"F \d\e Y"
MONTH_DAY_FORMAT = r"j \d\e F"
SHORT_DATE_FORMAT = "d/m/Y"
SHORT_DATETIME_FORMAT = "d/m/Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday: ISO 8601
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '25/10/2006'
"%d/%m/%y", # '25/10/06'
"%Y%m%d", # '20061025'
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S",
"%d/%m/%Y %H:%M:%S.%f",
"%d/%m/%Y %H:%M",
"%d/%m/%y %H:%M:%S",
"%d/%m/%y %H:%M:%S.%f",
"%d/%m/%y %H:%M",
]
DECIMAL_SEPARATOR = "." # ',' is also official (less common): NOM-008-SCFI-2002
THOUSAND_SEPARATOR = ","
NUMBER_GROUPING = 3
|
33eb6ef8b99365dfe860f347e82644b1dc49379dba45439f9cb1e543144bd0dd | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j בF Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "j בF Y H:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j בF"
SHORT_DATE_FORMAT = "d/m/Y"
SHORT_DATETIME_FORMAT = "d/m/Y H:i"
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
# NUMBER_GROUPING =
|
cb540b13e486d34787c237b4964013a07b73e2deb6d51cff1d04720560a02bc7 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j. F Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "j. F Y H:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j. F"
SHORT_DATE_FORMAT = "d.m.Y"
SHORT_DATETIME_FORMAT = "d.m.Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
"%Y-%m-%d", # '2006-10-25'
"%d.%m.%Y", # '25.10.2006'
"%d.%m.%y", # '25.10.06'
# "%d. %b %Y", # '25. okt 2006'
# "%d %b %Y", # '25 okt 2006'
# "%d. %b. %Y", # '25. okt. 2006'
# "%d %b. %Y", # '25 okt. 2006'
# "%d. %B %Y", # '25. oktober 2006'
# "%d %B %Y", # '25 oktober 2006'
]
DATETIME_INPUT_FORMATS = [
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
"%d.%m.%Y %H:%M:%S", # '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # '25.10.2006 14:30'
"%d.%m.%y %H:%M:%S", # '25.10.06 14:30:59'
"%d.%m.%y %H:%M:%S.%f", # '25.10.06 14:30:59.000200'
"%d.%m.%y %H:%M", # '25.10.06 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "\xa0" # non-breaking space
NUMBER_GROUPING = 3
|
2b39126f72970707ccde093615ecf247e5bcfd160aa67945785b888b9ce5f92d | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "d F Y" # 25 Ottobre 2006
TIME_FORMAT = "H:i" # 14:30
DATETIME_FORMAT = "l d F Y H:i" # Mercoledì 25 Ottobre 2006 14:30
YEAR_MONTH_FORMAT = "F Y" # Ottobre 2006
MONTH_DAY_FORMAT = "j F" # 25 Ottobre
SHORT_DATE_FORMAT = "d/m/Y" # 25/12/2009
SHORT_DATETIME_FORMAT = "d/m/Y H:i" # 25/10/2009 14:30
FIRST_DAY_OF_WEEK = 1 # Lunedì
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '25/10/2006'
"%Y/%m/%d", # '2006/10/25'
"%d-%m-%Y", # '25-10-2006'
"%Y-%m-%d", # '2006-10-25'
"%d-%m-%y", # '25-10-06'
"%d/%m/%y", # '25/10/06'
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S", # '25/10/2006 14:30:59'
"%d/%m/%Y %H:%M:%S.%f", # '25/10/2006 14:30:59.000200'
"%d/%m/%Y %H:%M", # '25/10/2006 14:30'
"%d/%m/%y %H:%M:%S", # '25/10/06 14:30:59'
"%d/%m/%y %H:%M:%S.%f", # '25/10/06 14:30:59.000200'
"%d/%m/%y %H:%M", # '25/10/06 14:30'
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
"%d-%m-%Y %H:%M:%S", # '25-10-2006 14:30:59'
"%d-%m-%Y %H:%M:%S.%f", # '25-10-2006 14:30:59.000200'
"%d-%m-%Y %H:%M", # '25-10-2006 14:30'
"%d-%m-%y %H:%M:%S", # '25-10-06 14:30:59'
"%d-%m-%y %H:%M:%S.%f", # '25-10-06 14:30:59.000200'
"%d-%m-%y %H:%M", # '25-10-06 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
4fce2ade830a9e0f8beffc7298fa98c29cecefc2efbdf1f2d9dadf4918fc5e31 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "j F Y H:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "j F Y"
SHORT_DATETIME_FORMAT = "j F Y H:i"
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%Y/%m/%d", # '2006/10/25'
]
TIME_INPUT_FORMATS = [
"%H:%M", # '14:30
"%H:%M:%S", # '14:30:59'
]
DATETIME_INPUT_FORMATS = [
"%Y/%m/%d %H:%M", # '2006/10/25 14:30'
"%Y/%m/%d %H:%M:%S", # '2006/10/25 14:30:59'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
480fe309237023cfa9efdb241e82f1acf2d99e4caad4f55a7704fa80cb7b9ff3 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "d F Y"
TIME_FORMAT = "g.i.A"
# DATETIME_FORMAT =
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "Y-m-d"
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
# NUMBER_GROUPING =
|
3f722cc61179acd159e67096512c897c52dbd15d50757fd79feb5876b7259794 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F Y"
TIME_FORMAT = "P"
DATETIME_FORMAT = "j F Y P"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "d.m.Y"
SHORT_DATETIME_FORMAT = "d.m.Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d.%m.%Y", # '25.10.2006'
"%d.%m.%y", # '25.10.06'
]
DATETIME_INPUT_FORMATS = [
"%d.%m.%Y %H:%M:%S", # '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # '25.10.2006 14:30'
"%d.%m.%Y", # '25.10.2006'
"%d.%m.%y %H:%M:%S", # '25.10.06 14:30:59'
"%d.%m.%y %H:%M:%S.%f", # '25.10.06 14:30:59.000200'
"%d.%m.%y %H:%M", # '25.10.06 14:30'
"%d.%m.%y", # '25.10.06'
]
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
NUMBER_GROUPING = 3
|
f68f19b5a4aad5439ae72e83bb7ad0b0b00097964e11d558d4e55531b8f4d910 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "j F Y H:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "Y-m-d"
SHORT_DATETIME_FORMAT = "Y-m-d H:i"
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
"%Y-%m-%d", # '2006-10-25'
"%m/%d/%Y", # '10/25/2006'
"%m/%d/%y", # '10/25/06'
]
DATETIME_INPUT_FORMATS = [
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
"%m/%d/%Y %H:%M:%S", # '10/25/2006 14:30:59'
"%m/%d/%Y %H:%M:%S.%f", # '10/25/2006 14:30:59.000200'
"%m/%d/%Y %H:%M", # '10/25/2006 14:30'
"%m/%d/%y %H:%M:%S", # '10/25/06 14:30:59'
"%m/%d/%y %H:%M:%S.%f", # '10/25/06 14:30:59.000200'
"%m/%d/%y %H:%M", # '10/25/06 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "\xa0" # non-breaking space
NUMBER_GROUPING = 3
|
b1cb0d7cfe2f55a731588a0dd37a9cd856b747c521e48cebd4ca8189cac09770 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j. F Y"
TIME_FORMAT = "H:i"
# DATETIME_FORMAT =
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j. F"
SHORT_DATE_FORMAT = "j.n.Y"
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
8f29e167d5cd36ec535de17b7f616b258619b85c252d8e7c7c67d0ea056ceecf | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F, Y"
TIME_FORMAT = "g:i A"
# DATETIME_FORMAT =
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "j M, Y"
# SHORT_DATETIME_FORMAT =
FIRST_DAY_OF_WEEK = 6 # Saturday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # 25/10/2016
"%d/%m/%y", # 25/10/16
"%d-%m-%Y", # 25-10-2016
"%d-%m-%y", # 25-10-16
]
TIME_INPUT_FORMATS = [
"%H:%M:%S", # 14:30:59
"%H:%M", # 14:30
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S", # 25/10/2006 14:30:59
"%d/%m/%Y %H:%M", # 25/10/2006 14:30
]
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
# NUMBER_GROUPING =
|
e9ccf15c8786b3b058a406d954b485b475c246579fb683d145a6d18515ee28f3 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r"j \d\e F \d\e Y"
TIME_FORMAT = "G:i"
DATETIME_FORMAT = r"j \d\e F \d\e Y \a \l\e\s G:i"
YEAR_MONTH_FORMAT = r"F \d\e\l Y"
MONTH_DAY_FORMAT = r"j \d\e F"
SHORT_DATE_FORMAT = "d/m/Y"
SHORT_DATETIME_FORMAT = "d/m/Y G:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '31/12/2009'
"%d/%m/%y", # '31/12/09'
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S",
"%d/%m/%Y %H:%M:%S.%f",
"%d/%m/%Y %H:%M",
"%d/%m/%y %H:%M:%S",
"%d/%m/%y %H:%M:%S.%f",
"%d/%m/%y %H:%M",
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
358d6960f7e9bbb5e32cc082b899398207692dcb9f575875d35a23cb17cf52b6 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F Y" # '25 Hydref 2006'
TIME_FORMAT = "P" # '2:30 y.b.'
DATETIME_FORMAT = "j F Y, P" # '25 Hydref 2006, 2:30 y.b.'
YEAR_MONTH_FORMAT = "F Y" # 'Hydref 2006'
MONTH_DAY_FORMAT = "j F" # '25 Hydref'
SHORT_DATE_FORMAT = "d/m/Y" # '25/10/2006'
SHORT_DATETIME_FORMAT = "d/m/Y P" # '25/10/2006 2:30 y.b.'
FIRST_DAY_OF_WEEK = 1 # 'Dydd Llun'
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '25/10/2006'
"%d/%m/%y", # '25/10/06'
]
DATETIME_INPUT_FORMATS = [
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
"%d/%m/%Y %H:%M:%S", # '25/10/2006 14:30:59'
"%d/%m/%Y %H:%M:%S.%f", # '25/10/2006 14:30:59.000200'
"%d/%m/%Y %H:%M", # '25/10/2006 14:30'
"%d/%m/%y %H:%M:%S", # '25/10/06 14:30:59'
"%d/%m/%y %H:%M:%S.%f", # '25/10/06 14:30:59.000200'
"%d/%m/%y %H:%M", # '25/10/06 14:30'
]
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
NUMBER_GROUPING = 3
|
310d4a03a975aa65b4eeb5cb629951b3e575851d4071bc77d24d91a579cc8508 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "Y年n月j日"
TIME_FORMAT = "G:i"
DATETIME_FORMAT = "Y年n月j日G:i"
YEAR_MONTH_FORMAT = "Y年n月"
MONTH_DAY_FORMAT = "n月j日"
SHORT_DATE_FORMAT = "Y/m/d"
SHORT_DATETIME_FORMAT = "Y/m/d G:i"
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
# NUMBER_GROUPING =
|
918c8ec561cddc9ca27f7ac5c4b1720548d3cc55309ae68bae4c3926f19b133f | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j N Y"
DATETIME_FORMAT = "j N Y, G.i"
TIME_FORMAT = "G.i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "d-m-Y"
SHORT_DATETIME_FORMAT = "d-m-Y G.i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d-%m-%Y", # '25-10-2009'
"%d/%m/%Y", # '25/10/2009'
"%d-%m-%y", # '25-10-09'
"%d/%m/%y", # '25/10/09'
"%d %b %Y", # '25 Oct 2006',
"%d %B %Y", # '25 October 2006'
"%m/%d/%y", # '10/25/06'
"%m/%d/%Y", # '10/25/2009'
]
TIME_INPUT_FORMATS = [
"%H.%M.%S", # '14.30.59'
"%H.%M", # '14.30'
]
DATETIME_INPUT_FORMATS = [
"%d-%m-%Y %H.%M.%S", # '25-10-2009 14.30.59'
"%d-%m-%Y %H.%M.%S.%f", # '25-10-2009 14.30.59.000200'
"%d-%m-%Y %H.%M", # '25-10-2009 14.30'
"%d-%m-%y %H.%M.%S", # '25-10-09' 14.30.59'
"%d-%m-%y %H.%M.%S.%f", # '25-10-09' 14.30.59.000200'
"%d-%m-%y %H.%M", # '25-10-09' 14.30'
"%m/%d/%y %H.%M.%S", # '10/25/06 14.30.59'
"%m/%d/%y %H.%M.%S.%f", # '10/25/06 14.30.59.000200'
"%m/%d/%y %H.%M", # '10/25/06 14.30'
"%m/%d/%Y %H.%M.%S", # '25/10/2009 14.30.59'
"%m/%d/%Y %H.%M.%S.%f", # '25/10/2009 14.30.59.000200'
"%m/%d/%Y %H.%M", # '25/10/2009 14.30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
06823952f88a199e1371ca8b9b1a5c74c7f4624d58884858fe22d051d763be2d | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j M Y" # '25 Oct 2006'
TIME_FORMAT = "P" # '2:30 p.m.'
DATETIME_FORMAT = "j M Y, P" # '25 Oct 2006, 2:30 p.m.'
YEAR_MONTH_FORMAT = "F Y" # 'October 2006'
MONTH_DAY_FORMAT = "j F" # '25 October'
SHORT_DATE_FORMAT = "d/m/Y" # '25/10/2006'
SHORT_DATETIME_FORMAT = "d/m/Y P" # '25/10/2006 2:30 p.m.'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '25/10/2006'
"%d/%m/%y", # '25/10/06'
# "%b %d %Y", # 'Oct 25 2006'
# "%b %d, %Y", # 'Oct 25, 2006'
# "%d %b %Y", # '25 Oct 2006'
# "%d %b, %Y", # '25 Oct, 2006'
# "%B %d %Y", # 'October 25 2006'
# "%B %d, %Y", # 'October 25, 2006'
# "%d %B %Y", # '25 October 2006'
# "%d %B, %Y", # '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
"%d/%m/%Y %H:%M:%S", # '25/10/2006 14:30:59'
"%d/%m/%Y %H:%M:%S.%f", # '25/10/2006 14:30:59.000200'
"%d/%m/%Y %H:%M", # '25/10/2006 14:30'
"%d/%m/%y %H:%M:%S", # '25/10/06 14:30:59'
"%d/%m/%y %H:%M:%S.%f", # '25/10/06 14:30:59.000200'
"%d/%m/%y %H:%M", # '25/10/06 14:30'
]
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
NUMBER_GROUPING = 3
|
44e3766aa41a40adc361517fc069414090c7ae100dc72a5c516fee758288faba | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "d/m/Y"
TIME_FORMAT = "P"
DATETIME_FORMAT = "d/m/Y P"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "d/m/Y"
SHORT_DATETIME_FORMAT = "d/m/Y P"
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '25/10/2006'
"%d/%m/%y", # '25/10/06'
"%Y-%m-%d", # '2006-10-25'
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S", # '25/10/2006 14:30:59'
"%d/%m/%Y %H:%M:%S.%f", # '25/10/2006 14:30:59.000200'
"%d/%m/%Y %H:%M", # '25/10/2006 14:30'
"%d/%m/%y %H:%M:%S", # '25/10/06 14:30:59'
"%d/%m/%y %H:%M:%S.%f", # '25/10/06 14:30:59.000200'
"%d/%m/%y %H:%M", # '25/10/06 14:30'
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
0bd49c477818b304f57505c579d5149d87ba0d03d51804bf9cbc6cd21d84ddd1 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r"Y \m. E j \d."
TIME_FORMAT = "H:i"
DATETIME_FORMAT = r"Y \m. E j \d., H:i"
YEAR_MONTH_FORMAT = r"Y \m. F"
MONTH_DAY_FORMAT = r"E j \d."
SHORT_DATE_FORMAT = "Y-m-d"
SHORT_DATETIME_FORMAT = "Y-m-d H:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%Y-%m-%d", # '2006-10-25'
"%d.%m.%Y", # '25.10.2006'
"%d.%m.%y", # '25.10.06'
]
TIME_INPUT_FORMATS = [
"%H:%M:%S", # '14:30:59'
"%H:%M:%S.%f", # '14:30:59.000200'
"%H:%M", # '14:30'
"%H.%M.%S", # '14.30.59'
"%H.%M.%S.%f", # '14.30.59.000200'
"%H.%M", # '14.30'
]
DATETIME_INPUT_FORMATS = [
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
"%d.%m.%Y %H:%M:%S", # '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # '25.10.2006 14:30'
"%d.%m.%y %H:%M:%S", # '25.10.06 14:30:59'
"%d.%m.%y %H:%M:%S.%f", # '25.10.06 14:30:59.000200'
"%d.%m.%y %H:%M", # '25.10.06 14:30'
"%d.%m.%y %H.%M.%S", # '25.10.06 14.30.59'
"%d.%m.%y %H.%M.%S.%f", # '25.10.06 14.30.59.000200'
"%d.%m.%y %H.%M", # '25.10.06 14.30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
5554e5c327a45fcd3308a960d4fe2385a01d28da4de48eb8a56ff182b869c95b | # This file is distributed under the same license as the Django package.
#
DATE_FORMAT = r"j \d\e F \d\e Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = r"j \d\e F \d\e Y \a \l\a\s H:i"
YEAR_MONTH_FORMAT = r"F \d\e Y"
MONTH_DAY_FORMAT = r"j \d\e F"
SHORT_DATE_FORMAT = "d/m/Y"
SHORT_DATETIME_FORMAT = "d/m/Y H:i"
FIRST_DAY_OF_WEEK = 0 # Sunday
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '31/12/2009'
"%d/%m/%y", # '31/12/09'
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S",
"%d/%m/%Y %H:%M:%S.%f",
"%d/%m/%Y %H:%M",
"%d/%m/%y %H:%M:%S",
"%d/%m/%y %H:%M:%S.%f",
"%d/%m/%y %H:%M",
]
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
NUMBER_GROUPING = 3
|
e2a80e2684762b9644fa9036f9a611c055bb01b2be33d17dbb7cd5c2079baf6c | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r"j N Y"
TIME_FORMAT = r"H:i"
DATETIME_FORMAT = r"j N Y H:i"
YEAR_MONTH_FORMAT = r"F Y"
MONTH_DAY_FORMAT = r"j \d\e F"
SHORT_DATE_FORMAT = r"d/m/Y"
SHORT_DATETIME_FORMAT = r"d/m/Y H:i"
FIRST_DAY_OF_WEEK = 0 # 0: Sunday, 1: Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '31/12/2009'
"%d/%m/%y", # '31/12/09'
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S",
"%d/%m/%Y %H:%M:%S.%f",
"%d/%m/%Y %H:%M",
"%d/%m/%y %H:%M:%S",
"%d/%m/%y %H:%M:%S.%f",
"%d/%m/%y %H:%M",
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
8f69372387bfe1e3d816c25b91112d9a49ea961a833f14abbd6377948404d280 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r"j \d\e F \d\e Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = r"j \d\e F \d\e Y \a \l\a\s H:i"
YEAR_MONTH_FORMAT = r"F \d\e Y"
MONTH_DAY_FORMAT = r"j \d\e F"
SHORT_DATE_FORMAT = "d/m/Y"
SHORT_DATETIME_FORMAT = "d/m/Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '31/12/2009'
"%d/%m/%y", # '31/12/09'
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S",
"%d/%m/%Y %H:%M:%S.%f",
"%d/%m/%Y %H:%M",
"%d/%m/%y %H:%M:%S",
"%d/%m/%y %H:%M:%S.%f",
"%d/%m/%y %H:%M",
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
177fe0628a4e5c835c96568f1734ea66b676a19d727b7c73474350b65a97629d | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j. F Y."
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "j. F Y. H:i"
YEAR_MONTH_FORMAT = "F Y."
MONTH_DAY_FORMAT = "j. F"
SHORT_DATE_FORMAT = "j.m.Y."
SHORT_DATETIME_FORMAT = "j.m.Y. H:i"
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d.%m.%Y.", # '25.10.2006.'
"%d.%m.%y.", # '25.10.06.'
"%d. %m. %Y.", # '25. 10. 2006.'
"%d. %m. %y.", # '25. 10. 06.'
# "%d. %b %y.", # '25. Oct 06.'
# "%d. %B %y.", # '25. October 06.'
# "%d. %b '%y.", # '25. Oct '06.'
# "%d. %B '%y.", # '25. October '06.'
# "%d. %b %Y.", # '25. Oct 2006.'
# "%d. %B %Y.", # '25. October 2006.'
]
DATETIME_INPUT_FORMATS = [
"%d.%m.%Y. %H:%M:%S", # '25.10.2006. 14:30:59'
"%d.%m.%Y. %H:%M:%S.%f", # '25.10.2006. 14:30:59.000200'
"%d.%m.%Y. %H:%M", # '25.10.2006. 14:30'
"%d.%m.%y. %H:%M:%S", # '25.10.06. 14:30:59'
"%d.%m.%y. %H:%M:%S.%f", # '25.10.06. 14:30:59.000200'
"%d.%m.%y. %H:%M", # '25.10.06. 14:30'
"%d. %m. %Y. %H:%M:%S", # '25. 10. 2006. 14:30:59'
"%d. %m. %Y. %H:%M:%S.%f", # '25. 10. 2006. 14:30:59.000200'
"%d. %m. %Y. %H:%M", # '25. 10. 2006. 14:30'
"%d. %m. %y. %H:%M:%S", # '25. 10. 06. 14:30:59'
"%d. %m. %y. %H:%M:%S.%f", # '25. 10. 06. 14:30:59.000200'
"%d. %m. %y. %H:%M", # '25. 10. 06. 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
cc810093e4a22d7d1cbd056645cdcba6657af63c11ade8cc31d0bbbed56c4a1d | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r"j\-\a \d\e F Y" # '26-a de julio 1887'
TIME_FORMAT = "H:i" # '18:59'
DATETIME_FORMAT = r"j\-\a \d\e F Y\, \j\e H:i" # '26-a de julio 1887, je 18:59'
YEAR_MONTH_FORMAT = r"F \d\e Y" # 'julio de 1887'
MONTH_DAY_FORMAT = r"j\-\a \d\e F" # '26-a de julio'
SHORT_DATE_FORMAT = "Y-m-d" # '1887-07-26'
SHORT_DATETIME_FORMAT = "Y-m-d H:i" # '1887-07-26 18:59'
FIRST_DAY_OF_WEEK = 1 # Monday (lundo)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%Y-%m-%d", # '1887-07-26'
"%y-%m-%d", # '87-07-26'
"%Y %m %d", # '1887 07 26'
"%Y.%m.%d", # '1887.07.26'
"%d-a de %b %Y", # '26-a de jul 1887'
"%d %b %Y", # '26 jul 1887'
"%d-a de %B %Y", # '26-a de julio 1887'
"%d %B %Y", # '26 julio 1887'
"%d %m %Y", # '26 07 1887'
"%d/%m/%Y", # '26/07/1887'
]
TIME_INPUT_FORMATS = [
"%H:%M:%S", # '18:59:00'
"%H:%M", # '18:59'
]
DATETIME_INPUT_FORMATS = [
"%Y-%m-%d %H:%M:%S", # '1887-07-26 18:59:00'
"%Y-%m-%d %H:%M", # '1887-07-26 18:59'
"%Y.%m.%d %H:%M:%S", # '1887.07.26 18:59:00'
"%Y.%m.%d %H:%M", # '1887.07.26 18:59'
"%d/%m/%Y %H:%M:%S", # '26/07/1887 18:59:00'
"%d/%m/%Y %H:%M", # '26/07/1887 18:59'
"%y-%m-%d %H:%M:%S", # '87-07-26 18:59:00'
"%y-%m-%d %H:%M", # '87-07-26 18:59'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "\xa0" # non-breaking space
NUMBER_GROUPING = 3
|
dcc03bd025b4c1faf400862f62a134002997efdb4d3b1f9976547a01eb69f5ef | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j. E Y"
TIME_FORMAT = "G:i"
DATETIME_FORMAT = "j. E Y G:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j. F"
SHORT_DATE_FORMAT = "d.m.Y"
SHORT_DATETIME_FORMAT = "d.m.Y G:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d.%m.%Y", # '05.01.2006'
"%d.%m.%y", # '05.01.06'
"%d. %m. %Y", # '5. 1. 2006'
"%d. %m. %y", # '5. 1. 06'
# "%d. %B %Y", # '25. October 2006'
# "%d. %b. %Y", # '25. Oct. 2006'
]
# Kept ISO formats as one is in first position
TIME_INPUT_FORMATS = [
"%H:%M:%S", # '04:30:59'
"%H.%M", # '04.30'
"%H:%M", # '04:30'
]
DATETIME_INPUT_FORMATS = [
"%d.%m.%Y %H:%M:%S", # '05.01.2006 04:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '05.01.2006 04:30:59.000200'
"%d.%m.%Y %H.%M", # '05.01.2006 04.30'
"%d.%m.%Y %H:%M", # '05.01.2006 04:30'
"%d. %m. %Y %H:%M:%S", # '05. 01. 2006 04:30:59'
"%d. %m. %Y %H:%M:%S.%f", # '05. 01. 2006 04:30:59.000200'
"%d. %m. %Y %H.%M", # '05. 01. 2006 04.30'
"%d. %m. %Y %H:%M", # '05. 01. 2006 04:30'
"%Y-%m-%d %H.%M", # '2006-01-05 04.30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "\xa0" # non-breaking space
NUMBER_GROUPING = 3
|
178988743a1a39827f94f9ac27fe9b428e198fca4e495c2e95d9bddb3472e05a | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j. E Y."
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "j. E Y. H:i"
YEAR_MONTH_FORMAT = "F Y."
MONTH_DAY_FORMAT = "j. F"
SHORT_DATE_FORMAT = "j.m.Y."
SHORT_DATETIME_FORMAT = "j.m.Y. H:i"
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
"%Y-%m-%d", # '2006-10-25'
"%d.%m.%Y.", # '25.10.2006.'
"%d.%m.%y.", # '25.10.06.'
"%d. %m. %Y.", # '25. 10. 2006.'
"%d. %m. %y.", # '25. 10. 06.'
]
DATETIME_INPUT_FORMATS = [
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
"%d.%m.%Y. %H:%M:%S", # '25.10.2006. 14:30:59'
"%d.%m.%Y. %H:%M:%S.%f", # '25.10.2006. 14:30:59.000200'
"%d.%m.%Y. %H:%M", # '25.10.2006. 14:30'
"%d.%m.%y. %H:%M:%S", # '25.10.06. 14:30:59'
"%d.%m.%y. %H:%M:%S.%f", # '25.10.06. 14:30:59.000200'
"%d.%m.%y. %H:%M", # '25.10.06. 14:30'
"%d. %m. %Y. %H:%M:%S", # '25. 10. 2006. 14:30:59'
"%d. %m. %Y. %H:%M:%S.%f", # '25. 10. 2006. 14:30:59.000200'
"%d. %m. %Y. %H:%M", # '25. 10. 2006. 14:30'
"%d. %m. %y. %H:%M:%S", # '25. 10. 06. 14:30:59'
"%d. %m. %y. %H:%M:%S.%f", # '25. 10. 06. 14:30:59.000200'
"%d. %m. %y. %H:%M", # '25. 10. 06. 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
ff122e824a8b9e337d77322179f329b095da4cf95daf86e5292192f9cdecc20d | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r"\N\gà\y d \t\há\n\g n \nă\m Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = r"H:i \N\gà\y d \t\há\n\g n \nă\m Y"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "d-m-Y"
SHORT_DATETIME_FORMAT = "H:i d-m-Y"
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
# NUMBER_GROUPING =
|
52239a74fa0cacdb74893a7c8d956aeb9c51ff82e43b0a7e7e3bc56fc3323558 | # This file is distributed under the same license as the Django package.
#
DATE_FORMAT = r"j \d\e F \d\e Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = r"j \d\e F \d\e Y \a \l\a\s H:i"
YEAR_MONTH_FORMAT = r"F \d\e Y"
MONTH_DAY_FORMAT = r"j \d\e F"
SHORT_DATE_FORMAT = "d/m/Y"
SHORT_DATETIME_FORMAT = "d/m/Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday: ISO 8601
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '25/10/2006'
"%d/%m/%y", # '25/10/06'
"%Y%m%d", # '20061025'
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S",
"%d/%m/%Y %H:%M:%S.%f",
"%d/%m/%Y %H:%M",
"%d/%m/%y %H:%M:%S",
"%d/%m/%y %H:%M:%S.%f",
"%d/%m/%y %H:%M",
]
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
NUMBER_GROUPING = 3
|
efad26fa1e0e1e98a3ea9fc1003d9daf79ec59a4dbea847563972e957f46c6ac | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j. N Y."
TIME_FORMAT = "G:i"
DATETIME_FORMAT = "j. N. Y. G:i T"
YEAR_MONTH_FORMAT = "F Y."
MONTH_DAY_FORMAT = "j. F"
SHORT_DATE_FORMAT = "Y M j"
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
# NUMBER_GROUPING =
|
be68df88ce78a09271a9c76064950d35037ba0c4be5cb541609e255816f972d6 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F, Y"
TIME_FORMAT = "g:i A"
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "j M, Y"
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
c709c9b172d71a880eaa93f5f2ee86a338de869580c302a65db10ba25615fe4e | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "d F Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "j. F Y H:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j. F"
SHORT_DATE_FORMAT = "j.m.Y"
SHORT_DATETIME_FORMAT = "j.m.Y H:i"
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d.%m.%Y", # '25.10.2006'
"%d.%m.%y", # '25.10.06'
"%d. %m. %Y", # '25. 10. 2006'
"%d. %m. %y", # '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
"%d.%m.%Y %H:%M:%S", # '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # '25.10.2006 14:30'
"%d.%m.%y %H:%M:%S", # '25.10.06 14:30:59'
"%d.%m.%y %H:%M:%S.%f", # '25.10.06 14:30:59.000200'
"%d.%m.%y %H:%M", # '25.10.06 14:30'
"%d. %m. %Y %H:%M:%S", # '25. 10. 2006 14:30:59'
"%d. %m. %Y %H:%M:%S.%f", # '25. 10. 2006 14:30:59.000200'
"%d. %m. %Y %H:%M", # '25. 10. 2006 14:30'
"%d. %m. %y %H:%M:%S", # '25. 10. 06 14:30:59'
"%d. %m. %y %H:%M:%S.%f", # '25. 10. 06 14:30:59.000200'
"%d. %m. %y %H:%M", # '25. 10. 06 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
421ed1dd431f5b3b7b40874c66ac58d28e0e32956caa57211caed9d109c359db | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F Y"
TIME_FORMAT = "H:i"
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "j M Y"
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
# NUMBER_GROUPING =
|
71d9aa3940559cf4b28b693d024386976eecf3d3f298578f1b682d9d8cd86e2c | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r"j-E, Y-\y\i\l"
TIME_FORMAT = "G:i"
DATETIME_FORMAT = r"j-E, Y-\y\i\l G:i"
YEAR_MONTH_FORMAT = r"F Y-\y\i\l"
MONTH_DAY_FORMAT = "j-E"
SHORT_DATE_FORMAT = "d.m.Y"
SHORT_DATETIME_FORMAT = "d.m.Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d.%m.%Y", # '25.10.2006'
"%d-%B, %Y-yil", # '25-Oktabr, 2006-yil'
]
DATETIME_INPUT_FORMATS = [
"%d.%m.%Y %H:%M:%S", # '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # '25.10.2006 14:30'
"%d-%B, %Y-yil %H:%M:%S", # '25-Oktabr, 2006-yil 14:30:59'
"%d-%B, %Y-yil %H:%M:%S.%f", # '25-Oktabr, 2006-yil 14:30:59.000200'
"%d-%B, %Y-yil %H:%M", # '25-Oktabr, 2006-yil 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "\xa0" # non-breaking space
NUMBER_GROUPING = 3
|
2911213ed1eecca4bf66c02a5ece4ba983c6867e8efa32dde16650fb7f4103c2 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j E Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "j E Y H:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j E"
SHORT_DATE_FORMAT = "d-m-Y"
SHORT_DATETIME_FORMAT = "d-m-Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d.%m.%Y", # '25.10.2006'
"%d.%m.%y", # '25.10.06'
"%y-%m-%d", # '06-10-25'
# "%d. %B %Y", # '25. października 2006'
# "%d. %b. %Y", # '25. paź. 2006'
]
DATETIME_INPUT_FORMATS = [
"%d.%m.%Y %H:%M:%S", # '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # '25.10.2006 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = " "
NUMBER_GROUPING = 3
|
0f2152655b864981a8226ad123616875e339d4eb6f21c0a42b3908d0abd84d0c | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j. F Y"
TIME_FORMAT = "G:i"
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = "j. F"
SHORT_DATE_FORMAT = "d.m.Y"
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = " " # Non-breaking space
# NUMBER_GROUPING =
|
2c2ecfff9ca374682cc4b43f183b42f07d9bcfd353c54cdefc202dce59b7ed30 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "d F Y"
TIME_FORMAT = "H:i"
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "d.m.Y"
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = " " # Non-breaking space
# NUMBER_GROUPING =
|
651eed31d245d14e8ad47f79713aab147e20a29e99b9243bbc3da1d322aaea6a | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "N j, Y"
TIME_FORMAT = "P"
DATETIME_FORMAT = "N j, Y, P"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "F j"
SHORT_DATE_FORMAT = "m/d/Y"
SHORT_DATETIME_FORMAT = "m/d/Y P"
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
"%Y-%m-%d", # '2006-10-25'
"%m/%d/%Y", # '10/25/2006'
"%m/%d/%y", # '10/25/06'
# "%b %d %Y", # 'Oct 25 2006'
# "%b %d, %Y", # 'Oct 25, 2006'
# "%d %b %Y", # '25 Oct 2006'
# "%d %b, %Y", # '25 Oct, 2006'
# "%B %d %Y", # 'October 25 2006'
# "%B %d, %Y", # 'October 25, 2006'
# "%d %B %Y", # '25 October 2006'
# "%d %B, %Y", # '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
"%m/%d/%Y %H:%M:%S", # '10/25/2006 14:30:59'
"%m/%d/%Y %H:%M:%S.%f", # '10/25/2006 14:30:59.000200'
"%m/%d/%Y %H:%M", # '10/25/2006 14:30'
"%m/%d/%y %H:%M:%S", # '10/25/06 14:30:59'
"%m/%d/%y %H:%M:%S.%f", # '10/25/06 14:30:59.000200'
"%m/%d/%y %H:%M", # '10/25/06 14:30'
]
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
NUMBER_GROUPING = 3
|
f873a86609a7304e3efb80ae5f3727857a8d99ad1687b3629e87f74420866645 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F Y"
TIME_FORMAT = "g:i A"
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "j M Y"
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
d2e01b06f3a474964a8ef86baddd21b5271d3bbb1381ba1f3a4902f00df9fdaf | # This file is distributed under the same license as the Django package.
#
DATE_FORMAT = r"j \d\e F \d\e Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = r"j \d\e F \d\e Y \a \l\a\s H:i"
YEAR_MONTH_FORMAT = r"F \d\e Y"
MONTH_DAY_FORMAT = r"j \d\e F"
SHORT_DATE_FORMAT = "d/m/Y"
SHORT_DATETIME_FORMAT = "d/m/Y H:i"
FIRST_DAY_OF_WEEK = 1
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '25/10/2006'
"%d/%m/%y", # '25/10/06'
"%Y%m%d", # '20061025'
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S",
"%d/%m/%Y %H:%M:%S.%f",
"%d/%m/%Y %H:%M",
"%d/%m/%y %H:%M:%S",
"%d/%m/%y %H:%M:%S.%f",
"%d/%m/%y %H:%M",
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
aa7dfa12388ee01bb5d83ffaaa2b633039017f2e0cd0b80513e1612bc6cf3ada | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "Y년 n월 j일"
TIME_FORMAT = "A g:i"
DATETIME_FORMAT = "Y년 n월 j일 g:i A"
YEAR_MONTH_FORMAT = "Y년 n월"
MONTH_DAY_FORMAT = "n월 j일"
SHORT_DATE_FORMAT = "Y-n-j."
SHORT_DATETIME_FORMAT = "Y-n-j H:i"
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
"%Y-%m-%d", # '2006-10-25'
"%m/%d/%Y", # '10/25/2006'
"%m/%d/%y", # '10/25/06'
# "%b %d %Y", # 'Oct 25 2006'
# "%b %d, %Y", # 'Oct 25, 2006'
# "%d %b %Y", # '25 Oct 2006'
# "%d %b, %Y", #'25 Oct, 2006'
# "%B %d %Y", # 'October 25 2006'
# "%B %d, %Y", #'October 25, 2006'
# "%d %B %Y", # '25 October 2006'
# "%d %B, %Y", # '25 October, 2006'
"%Y년 %m월 %d일", # '2006년 10월 25일', with localized suffix.
]
TIME_INPUT_FORMATS = [
"%H:%M:%S", # '14:30:59'
"%H:%M:%S.%f", # '14:30:59.000200'
"%H:%M", # '14:30'
"%H시 %M분 %S초", # '14시 30분 59초'
"%H시 %M분", # '14시 30분'
]
DATETIME_INPUT_FORMATS = [
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
"%m/%d/%Y %H:%M:%S", # '10/25/2006 14:30:59'
"%m/%d/%Y %H:%M:%S.%f", # '10/25/2006 14:30:59.000200'
"%m/%d/%Y %H:%M", # '10/25/2006 14:30'
"%m/%d/%y %H:%M:%S", # '10/25/06 14:30:59'
"%m/%d/%y %H:%M:%S.%f", # '10/25/06 14:30:59.000200'
"%m/%d/%y %H:%M", # '10/25/06 14:30'
"%Y년 %m월 %d일 %H시 %M분 %S초", # '2006년 10월 25일 14시 30분 59초'
"%Y년 %m월 %d일 %H시 %M분", # '2006년 10월 25일 14시 30분'
]
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
NUMBER_GROUPING = 3
|
21d77f7d55ca2472523ae0778d16e8fc58304363fa54ad808c96da76fe54c4af | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "j F Y H:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "j N Y"
SHORT_DATETIME_FORMAT = "j N Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # '25/10/2006'
"%d/%m/%y", # '25/10/06'
"%d.%m.%Y", # Swiss [fr_CH] '25.10.2006'
"%d.%m.%y", # Swiss [fr_CH] '25.10.06'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S", # '25/10/2006 14:30:59'
"%d/%m/%Y %H:%M:%S.%f", # '25/10/2006 14:30:59.000200'
"%d/%m/%Y %H:%M", # '25/10/2006 14:30'
"%d.%m.%Y %H:%M:%S", # Swiss [fr_CH), '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # Swiss (fr_CH), '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # Swiss (fr_CH), '25.10.2006 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "\xa0" # non-breaking space
NUMBER_GROUPING = 3
|
4a60940fecd5808d501361f0aa416d00ef3bac9f85a028f0d6cfb6f9c7e5d61d | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F Y"
TIME_FORMAT = "G:i"
DATETIME_FORMAT = "j F Y, G:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "j M Y"
SHORT_DATETIME_FORMAT = "j M Y, G:i"
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d/%m/%Y", # 25/10/2006
"%d %b %Y", # 25 ต.ค. 2006
"%d %B %Y", # 25 ตุลาคม 2006
]
TIME_INPUT_FORMATS = [
"%H:%M:%S", # 14:30:59
"%H:%M:%S.%f", # 14:30:59.000200
"%H:%M", # 14:30
]
DATETIME_INPUT_FORMATS = [
"%d/%m/%Y %H:%M:%S", # 25/10/2006 14:30:59
"%d/%m/%Y %H:%M:%S.%f", # 25/10/2006 14:30:59.000200
"%d/%m/%Y %H:%M", # 25/10/2006 14:30
]
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
NUMBER_GROUPING = 3
|
93ca3076ad14efec7aca5f259755b956346829da7c6b51b67a71f4e06e7f9ef5 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r"Y. \g\a\d\a j. F"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = r"Y. \g\a\d\a j. F, H:i"
YEAR_MONTH_FORMAT = r"Y. \g. F"
MONTH_DAY_FORMAT = "j. F"
SHORT_DATE_FORMAT = r"j.m.Y"
SHORT_DATETIME_FORMAT = "j.m.Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
"%Y-%m-%d", # '2006-10-25'
"%d.%m.%Y", # '25.10.2006'
"%d.%m.%y", # '25.10.06'
]
TIME_INPUT_FORMATS = [
"%H:%M:%S", # '14:30:59'
"%H:%M:%S.%f", # '14:30:59.000200'
"%H:%M", # '14:30'
"%H.%M.%S", # '14.30.59'
"%H.%M.%S.%f", # '14.30.59.000200'
"%H.%M", # '14.30'
]
DATETIME_INPUT_FORMATS = [
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
"%d.%m.%Y %H:%M:%S", # '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # '25.10.2006 14:30'
"%d.%m.%y %H:%M:%S", # '25.10.06 14:30:59'
"%d.%m.%y %H:%M:%S.%f", # '25.10.06 14:30:59.000200'
"%d.%m.%y %H:%M", # '25.10.06 14:30'
"%d.%m.%y %H.%M.%S", # '25.10.06 14.30.59'
"%d.%m.%y %H.%M.%S.%f", # '25.10.06 14.30.59.000200'
"%d.%m.%y %H.%M", # '25.10.06 14.30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = " " # Non-breaking space
NUMBER_GROUPING = 3
|
fb2df4df7168ec23b263435bc5e2556061726eb9cb6e05ed45fd72046968bb2b | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j. F Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "j. F Y H:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j. F"
SHORT_DATE_FORMAT = "d.m.Y"
SHORT_DATETIME_FORMAT = "d.m.Y H:i"
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d.%m.%Y", # '25.10.2006'
]
DATETIME_INPUT_FORMATS = [
"%d.%m.%Y %H:%M:%S", # '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # '25.10.2006 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "."
NUMBER_GROUPING = 3
|
240ad533d74c96e48ff9cc296727525571c1e55b3d40ac91f5cf9b7ed23d85db | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F Y"
TIME_FORMAT = "g:i A"
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "d-m-Y"
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
# NUMBER_GROUPING =
|
62d381b3ab388f848e99f077729a767a4731545a151a050df264e1a12b9e0add | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j M Y" # '25 Oct 2006'
TIME_FORMAT = "P" # '2:30 p.m.'
DATETIME_FORMAT = "j M Y, P" # '25 Oct 2006, 2:30 p.m.'
YEAR_MONTH_FORMAT = "F Y" # 'October 2006'
MONTH_DAY_FORMAT = "j F" # '25 October'
SHORT_DATE_FORMAT = "d/m/Y" # '25/10/2006'
SHORT_DATETIME_FORMAT = "d/m/Y P" # '25/10/2006 2:30 p.m.'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%Y-%m-%d", # '2006-10-25'
"%d/%m/%Y", # '25/10/2006'
"%d/%m/%y", # '25/10/06'
"%d %b %Y", # '25 Oct 2006'
"%d %b, %Y", # '25 Oct, 2006'
"%d %B %Y", # '25 October 2006'
"%d %B, %Y", # '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
"%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59'
"%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200'
"%Y-%m-%d %H:%M", # '2006-10-25 14:30'
"%d/%m/%Y %H:%M:%S", # '25/10/2006 14:30:59'
"%d/%m/%Y %H:%M:%S.%f", # '25/10/2006 14:30:59.000200'
"%d/%m/%Y %H:%M", # '25/10/2006 14:30'
"%d/%m/%y %H:%M:%S", # '25/10/06 14:30:59'
"%d/%m/%y %H:%M:%S.%f", # '25/10/06 14:30:59.000200'
"%d/%m/%y %H:%M", # '25/10/06 14:30'
]
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
NUMBER_GROUPING = 3
|
9537d86de71d4a61c2c5e6e883fd9b774377da20f79c4abf7f96ee87d8a5fa72 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j E Y г."
TIME_FORMAT = "G:i"
DATETIME_FORMAT = "j E Y г. G:i"
YEAR_MONTH_FORMAT = "F Y г."
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "d.m.Y"
SHORT_DATETIME_FORMAT = "d.m.Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d.%m.%Y", # '25.10.2006'
"%d.%m.%y", # '25.10.06'
]
DATETIME_INPUT_FORMATS = [
"%d.%m.%Y %H:%M:%S", # '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # '25.10.2006 14:30'
"%d.%m.%y %H:%M:%S", # '25.10.06 14:30:59'
"%d.%m.%y %H:%M:%S.%f", # '25.10.06 14:30:59.000200'
"%d.%m.%y %H:%M", # '25.10.06 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "\xa0" # non-breaking space
NUMBER_GROUPING = 3
|
69404d7624353db748b74a064389bdb94274688433e0de06f6965f354b08bcd1 | """Translation helper functions."""
import functools
import gettext as gettext_module
import os
import re
import sys
import warnings
from asgiref.local import Local
from django.apps import apps
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.core.exceptions import AppRegistryNotReady
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import SafeData, mark_safe
from . import to_language, to_locale
# Translations are cached in a dictionary for every language.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = Local()
# The default translation is based on the settings file.
_default = None
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = _lazy_re_compile(
r"""
# "en", "en-au", "x-y-z", "es-419", "*"
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*)
# Optional "q=1.00", "q=0.8"
(?:\s*;\s*q=(0(?:\.[0-9]{,3})?|1(?:\.0{,3})?))?
# Multiple accepts per header.
(?:\s*,\s*|$)
""",
re.VERBOSE,
)
language_code_re = _lazy_re_compile(
r"^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$", re.IGNORECASE
)
language_code_prefix_re = _lazy_re_compile(r"^/(\w+([@-]\w+){0,2})(/|$)")
@receiver(setting_changed)
def reset_cache(*, setting, **kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if setting in ("LANGUAGES", "LANGUAGE_CODE"):
check_for_language.cache_clear()
get_languages.cache_clear()
get_supported_language_variant.cache_clear()
class TranslationCatalog:
"""
Simulate a dict for DjangoTranslation._catalog so as multiple catalogs
with different plural equations are kept separate.
"""
def __init__(self, trans=None):
self._catalogs = [trans._catalog.copy()] if trans else [{}]
self._plurals = [trans.plural] if trans else [lambda n: int(n != 1)]
def __getitem__(self, key):
for cat in self._catalogs:
try:
return cat[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self._catalogs[0][key] = value
def __contains__(self, key):
return any(key in cat for cat in self._catalogs)
def items(self):
for cat in self._catalogs:
yield from cat.items()
def keys(self):
for cat in self._catalogs:
yield from cat.keys()
def update(self, trans):
# Merge if plural function is the same, else prepend.
for cat, plural in zip(self._catalogs, self._plurals):
if trans.plural.__code__ == plural.__code__:
cat.update(trans._catalog)
break
else:
self._catalogs.insert(0, trans._catalog.copy())
self._plurals.insert(0, trans.plural)
def get(self, key, default=None):
missing = object()
for cat in self._catalogs:
result = cat.get(key, missing)
if result is not missing:
return result
return default
def plural(self, msgid, num):
for cat, plural in zip(self._catalogs, self._plurals):
tmsg = cat.get((msgid, plural(num)))
if tmsg is not None:
return tmsg
raise KeyError
class DjangoTranslation(gettext_module.GNUTranslations):
"""
Set up the GNUTranslations context with regard to output charset.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct an object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
domain = "django"
def __init__(self, language, domain=None, localedirs=None):
"""Create a GNUTranslations() using many locale directories"""
gettext_module.GNUTranslations.__init__(self)
if domain is not None:
self.domain = domain
self.__language = language
self.__to_language = to_language(language)
self.__locale = to_locale(language)
self._catalog = None
# If a language doesn't have a catalog, use the Germanic default for
# pluralization: anything except one is pluralized.
self.plural = lambda n: int(n != 1)
if self.domain == "django":
if localedirs is not None:
# A module-level cache is used for caching 'django' translations
warnings.warn(
"localedirs is ignored when domain is 'django'.", RuntimeWarning
)
localedirs = None
self._init_translation_catalog()
if localedirs:
for localedir in localedirs:
translation = self._new_gnu_trans(localedir)
self.merge(translation)
else:
self._add_installed_apps_translations()
self._add_local_translations()
if (
self.__language == settings.LANGUAGE_CODE
and self.domain == "django"
and self._catalog is None
):
# default lang should have at least one translation file available.
raise OSError(
"No translation files found for default language %s."
% settings.LANGUAGE_CODE
)
self._add_fallback(localedirs)
if self._catalog is None:
# No catalogs found for this language, set an empty catalog.
self._catalog = TranslationCatalog()
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def _new_gnu_trans(self, localedir, use_null_fallback=True):
"""
Return a mergeable gettext.GNUTranslations instance.
A convenience wrapper. By default gettext uses 'fallback=False'.
Using param `use_null_fallback` to avoid confusion with any other
references to 'fallback'.
"""
return gettext_module.translation(
domain=self.domain,
localedir=localedir,
languages=[self.__locale],
fallback=use_null_fallback,
)
def _init_translation_catalog(self):
"""Create a base catalog using global django translations."""
settingsfile = sys.modules[settings.__module__].__file__
localedir = os.path.join(os.path.dirname(settingsfile), "locale")
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_installed_apps_translations(self):
"""Merge translations from each installed app."""
try:
app_configs = reversed(apps.get_app_configs())
except AppRegistryNotReady:
raise AppRegistryNotReady(
"The translation infrastructure cannot be initialized before the "
"apps registry is ready. Check that you don't make non-lazy "
"gettext calls at import time."
)
for app_config in app_configs:
localedir = os.path.join(app_config.path, "locale")
if os.path.exists(localedir):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_local_translations(self):
"""Merge translations defined in LOCALE_PATHS."""
for localedir in reversed(settings.LOCALE_PATHS):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_fallback(self, localedirs=None):
"""Set the GNUTranslations() fallback with the default language."""
# Don't set a fallback for the default language or any English variant
# (as it's empty, so it'll ALWAYS fall back to the default language)
if self.__language == settings.LANGUAGE_CODE or self.__language.startswith(
"en"
):
return
if self.domain == "django":
# Get from cache
default_translation = translation(settings.LANGUAGE_CODE)
else:
default_translation = DjangoTranslation(
settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs
)
self.add_fallback(default_translation)
def merge(self, other):
"""Merge another translation into this catalog."""
if not getattr(other, "_catalog", None):
return # NullTranslations() has no _catalog
if self._catalog is None:
# Take plural and _info from first catalog found (generally Django's).
self.plural = other.plural
self._info = other._info.copy()
self._catalog = TranslationCatalog(other)
else:
self._catalog.update(other)
if other._fallback:
self.add_fallback(other._fallback)
def language(self):
"""Return the translation language."""
return self.__language
def to_language(self):
"""Return the translation language name."""
return self.__to_language
def ngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog.plural(msgid1, n)
except KeyError:
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
return tmsg
def translation(language):
"""
Return a translation object in the default 'django' domain.
"""
global _translations
if language not in _translations:
_translations[language] = DjangoTranslation(language)
return _translations[language]
def activate(language):
"""
Fetch the translation object for a given language and install it as the
current translation object for the current thread.
"""
if not language:
return
_active.value = translation(language)
def deactivate():
"""
Uninstall the active translation object so that further _() calls resolve
to the default translation object.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Make the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
_active.value.to_language = lambda *args: None
def get_language():
"""Return the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Return selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
lang = get_language()
if lang is None:
return False
else:
base_lang = get_language().split("-")[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Return the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return _default
def gettext(message):
"""
Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
eol_message = message.replace("\r\n", "\n").replace("\r", "\n")
if eol_message:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = translation_object.gettext(eol_message)
else:
# Return an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)("")
if isinstance(message, SafeData):
return mark_safe(result)
return result
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = gettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
elif isinstance(message, SafeData):
result = mark_safe(result)
return result
def gettext_noop(message):
"""
Mark strings for translation but don't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Return a string of the translation of either the singular or plural,
based on the number.
"""
return do_ntranslate(singular, plural, number, "ngettext")
def npgettext(context, singular, plural, number):
msgs_with_ctxt = (
"%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number,
)
result = ngettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ngettext(singular, plural, number)
return result
def all_locale_paths():
"""
Return a list of paths to user-provides languages files.
"""
globalpath = os.path.join(
os.path.dirname(sys.modules[settings.__module__].__file__), "locale"
)
app_paths = []
for app_config in apps.get_app_configs():
locale_path = os.path.join(app_config.path, "locale")
if os.path.exists(locale_path):
app_paths.append(locale_path)
return [globalpath, *settings.LOCALE_PATHS, *app_paths]
@functools.lru_cache(maxsize=1000)
def check_for_language(lang_code):
"""
Check whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if lang_code is None or not language_code_re.search(lang_code):
return False
return any(
gettext_module.find("django", path, [to_locale(lang_code)]) is not None
for path in all_locale_paths()
)
@functools.lru_cache
def get_languages():
"""
Cache of settings.LANGUAGES in a dictionary for easy lookups by key.
"""
return dict(settings.LANGUAGES)
@functools.lru_cache(maxsize=1000)
def get_supported_language_variant(lang_code, strict=False):
"""
Return the language code that's listed in supported languages, possibly
selecting a more generic variant. Raise LookupError if nothing is found.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'zh-hant-tw' is not supported, try special fallback or subsequent
# language codes i.e. 'zh-hant' and 'zh'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]["fallback"])
except KeyError:
pass
i = None
while (i := lang_code.rfind("-", 0, i)) > -1:
possible_lang_codes.append(lang_code[:i])
generic_lang_code = possible_lang_codes[-1]
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + "-"):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, strict=False):
"""
Return the language code if there's a valid language code found in `path`.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
"""
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match[1]
try:
return get_supported_language_variant(lang_code, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyze the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if (
lang_code is not None
and lang_code in get_languages()
and check_for_language(lang_code)
):
return lang_code
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get("HTTP_ACCEPT_LANGUAGE", "")
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == "*":
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
@functools.lru_cache(maxsize=1000)
def parse_accept_lang_header(lang_string):
"""
Parse the lang_string, which is the body of an HTTP Accept-Language
header, and return a tuple of (lang, q-value), ordered by 'q' values.
Return an empty tuple if there are any format errors in lang_string.
"""
result = []
pieces = accept_language_re.split(lang_string.lower())
if pieces[-1]:
return ()
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return ()
if priority:
priority = float(priority)
else:
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return tuple(result)
|
0562df1b05b9ee45f058e1a962ca7aa08cd4bce94f0f1afcce4ee1a36655967a | """
Internationalization support.
"""
from contextlib import ContextDecorator
from decimal import ROUND_UP, Decimal
from django.utils.autoreload import autoreload_started, file_changed
from django.utils.functional import lazy
from django.utils.regex_helper import _lazy_re_compile
__all__ = [
"activate",
"deactivate",
"override",
"deactivate_all",
"get_language",
"get_language_from_request",
"get_language_info",
"get_language_bidi",
"check_for_language",
"to_language",
"to_locale",
"templatize",
"gettext",
"gettext_lazy",
"gettext_noop",
"ngettext",
"ngettext_lazy",
"pgettext",
"pgettext_lazy",
"npgettext",
"npgettext_lazy",
]
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans:
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
from django.utils.translation.reloader import (
translation_file_changed,
watch_for_translation_changes,
)
autoreload_started.connect(
watch_for_translation_changes, dispatch_uid="translation_file_changed"
)
file_changed.connect(
translation_file_changed, dispatch_uid="translation_file_changed"
)
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
pgettext_lazy = lazy(pgettext, str)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, int):
kwargs["number"] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
original_kwargs = kwargs.copy()
class NumberAwareString(resultclass):
def __bool__(self):
return bool(kwargs["singular"])
def _get_number_value(self, values):
try:
return values[number]
except KeyError:
raise KeyError(
"Your dictionary lacks key '%s'. Please provide "
"it, because it is required to determine whether "
"string is singular or plural." % number
)
def _translate(self, number_value):
kwargs["number"] = number_value
return func(**kwargs)
def format(self, *args, **kwargs):
number_value = (
self._get_number_value(kwargs) if kwargs and number else args[0]
)
return self._translate(number_value).format(*args, **kwargs)
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
number_value = self._get_number_value(rhs)
else:
number_value = rhs
translated = self._translate(number_value)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number.
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
proxy.__reduce__ = lambda: (
_lazy_number_unpickle,
(func, resultclass, number, original_kwargs),
)
return proxy
def _lazy_number_unpickle(func, resultclass, number, kwargs):
return lazy_number(func, resultclass, number=number, **kwargs)
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(
npgettext, str, context=context, singular=singular, plural=plural, number=number
)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(ContextDecorator):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
def __enter__(self):
self.old_language = get_language()
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.old_language is None:
deactivate_all()
elif self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_language(locale):
"""Turn a locale name (en_US) into a language name (en-us)."""
p = locale.find("_")
if p >= 0:
return locale[:p].lower() + "-" + locale[p + 1 :].lower()
else:
return locale.lower()
def to_locale(language):
"""Turn a language name (en-us) into a locale name (en_US)."""
lang, _, country = language.lower().partition("-")
if not country:
return language[:3].lower() + language[3:]
# A language with > 2 characters after the dash only has its first
# character after the dash capitalized; e.g. sr-latn becomes sr_Latn.
# A language with 2 characters after the dash has both characters
# capitalized; e.g. en-us becomes en_US.
country, _, tail = country.partition("-")
country = country.title() if len(country) > 2 else country.upper()
if tail:
country += "-" + tail
return lang + "_" + country
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def get_supported_language_variant(lang_code, *, strict=False):
return _trans.get_supported_language_variant(lang_code, strict)
def templatize(src, **kwargs):
from .template import templatize
return templatize(src, **kwargs)
def deactivate_all():
return _trans.deactivate_all()
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
lang_info = LANG_INFO[lang_code]
if "fallback" in lang_info and "name" not in lang_info:
info = get_language_info(lang_info["fallback"][0])
else:
info = lang_info
except KeyError:
if "-" not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split("-")[0]
try:
info = LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError(
"Unknown language code %s and %s." % (lang_code, generic_lang_code)
)
if info:
info["name_translated"] = gettext_lazy(info["name"])
return info
trim_whitespace_re = _lazy_re_compile(r"\s*\n\s*")
def trim_whitespace(s):
return trim_whitespace_re.sub(" ", s.strip())
def round_away_from_one(value):
return int(Decimal(value - 1).quantize(Decimal("0"), rounding=ROUND_UP)) + 1
|
4ce7cf353eb646751b506eb86bfe9dfd543bb6c0c2d7f1754c2a29c3f1f095c0 | import warnings
from io import StringIO
from django.template.base import Lexer, TokenType
from django.utils.regex_helper import _lazy_re_compile
from . import TranslatorCommentWarning, trim_whitespace
TRANSLATOR_COMMENT_MARK = "Translators"
dot_re = _lazy_re_compile(r"\S")
def blankout(src, char):
"""
Change every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = _lazy_re_compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = _lazy_re_compile(
# Match the trans/translate 'some text' part.
r"""^\s*trans(?:late)?\s+((?:"[^"]*?")|(?:'[^']*?'))"""
# Match and ignore optional filters
r"""(?:\s*\|\s*[^\s:]+(?::(?:[^\s'":]+|(?:"[^"]*?")|(?:'[^']*?')))?)*"""
# Match the optional context part
r"""(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*"""
)
block_re = _lazy_re_compile(
r"""^\s*blocktrans(?:late)?(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)"""
)
endblock_re = _lazy_re_compile(r"""^\s*endblocktrans(?:late)?$""")
plural_re = _lazy_re_compile(r"""^\s*plural$""")
constant_re = _lazy_re_compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src, origin=None):
"""
Turn a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
out = StringIO("")
message_context = None
intrans = False
inplural = False
trimmed = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
# Adding the u prefix allows gettext to recognize the string (#26093).
raw_prefix = "u"
def join_tokens(tokens, trim=False):
message = "".join(tokens)
if trim:
message = trim_whitespace(message)
return message
for t in Lexer(src).tokenize():
if incomment:
if t.token_type == TokenType.BLOCK and t.contents == "endcomment":
content = "".join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if (
translators_comment_start is not None
and lineno >= translators_comment_start
):
out.write(" # %s" % line)
else:
out.write(" #\n")
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TokenType.BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(
" npgettext({p}{!r}, {p}{!r}, {p}{!r},count) ".format(
message_context,
join_tokens(singular, trimmed),
join_tokens(plural, trimmed),
p=raw_prefix,
)
)
else:
out.write(
" ngettext({p}{!r}, {p}{!r}, count) ".format(
join_tokens(singular, trimmed),
join_tokens(plural, trimmed),
p=raw_prefix,
)
)
for part in singular:
out.write(blankout(part, "S"))
for part in plural:
out.write(blankout(part, "P"))
else:
if message_context:
out.write(
" pgettext({p}{!r}, {p}{!r}) ".format(
message_context,
join_tokens(singular, trimmed),
p=raw_prefix,
)
)
else:
out.write(
" gettext({p}{!r}) ".format(
join_tokens(singular, trimmed),
p=raw_prefix,
)
)
for part in singular:
out.write(blankout(part, "S"))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ""
if origin:
filemsg = "file %s, " % origin
raise SyntaxError(
"Translation blocks must not include other block tags: "
"%s (%sline %d)" % (t.contents, filemsg, t.lineno)
)
elif t.token_type == TokenType.VAR:
if inplural:
plural.append("%%(%s)s" % t.contents)
else:
singular.append("%%(%s)s" % t.contents)
elif t.token_type == TokenType.TEXT:
contents = t.contents.replace("%", "%%")
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count("\n")
if comment_lineno_cache == cur_lineno:
if t.token_type != TokenType.COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ""
if origin:
filemsg = "file %s, " % origin
warn_msg = (
"The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't "
"the last item on the line."
) % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write(
"# %s" % " | ".join(lineno_comment_map[comment_lineno_cache])
)
comment_lineno_cache = None
if t.token_type == TokenType.BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch[1]
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = g.replace("%", "%%")
if imatch[2]:
# A context is provided
context_match = context_re.match(imatch[2])
message_context = context_match[1]
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(
" pgettext({p}{!r}, {p}{!r}) ".format(
message_context, g, p=raw_prefix
)
)
message_context = None
else:
out.write(" gettext({p}{!r}) ".format(g, p=raw_prefix))
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(" _(%s) " % fmatch)
if bmatch[1]:
# A context is provided
context_match = context_re.match(bmatch[1])
message_context = context_match[1]
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
trimmed = "trimmed" in t.split_contents()
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(" _(%s) " % cmatch)
elif t.contents == "comment":
incomment = True
else:
out.write(blankout(t.contents, "B"))
elif t.token_type == TokenType.VAR:
parts = t.contents.split("|")
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(" _(%s) " % cmatch[1])
for p in parts[1:]:
if p.find(":_(") >= 0:
out.write(" %s " % p.split(":", 1)[1])
else:
out.write(blankout(p, "F"))
elif t.token_type == TokenType.COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno, []).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, "X"))
return out.getvalue()
|
a15334c5e9e7ddfada52898c105b9cbf095baf95066148a35a7527e852f9e597 | from pathlib import Path
from asgiref.local import Local
from django.apps import apps
from django.utils.autoreload import is_django_module
def watch_for_translation_changes(sender, **kwargs):
"""Register file watchers for .mo files in potential locale paths."""
from django.conf import settings
if settings.USE_I18N:
directories = [Path("locale")]
directories.extend(
Path(config.path) / "locale"
for config in apps.get_app_configs()
if not is_django_module(config.module)
)
directories.extend(Path(p) for p in settings.LOCALE_PATHS)
for path in directories:
sender.watch_dir(path, "**/*.mo")
def translation_file_changed(sender, file_path, **kwargs):
"""Clear the internal translations cache if a .mo file is modified."""
if file_path.suffix == ".mo":
import gettext
from django.utils.translation import trans_real
gettext._translations = {}
trans_real._translations = {}
trans_real._default = None
trans_real._active = Local()
return True
|
b10a555ca6299caafd465d584ad35c6be6c62101dc3a44a79a6d65daa4461551 | """
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
from django.template.utils import get_app_template_dirs
from .filesystem import Loader as FilesystemLoader
class Loader(FilesystemLoader):
def get_dirs(self):
return get_app_template_dirs("templates")
|
639578834972f46b8d7bb050c5a2523043499efc735c99bb5e1493c737c5334b | from django.template import Template, TemplateDoesNotExist
class Loader:
def __init__(self, engine):
self.engine = engine
def get_template(self, template_name, skip=None):
"""
Call self.get_template_sources() and return a Template object for
the first template matching template_name. If skip is provided, ignore
template origins in skip. This is used to avoid recursion during
template extending.
"""
tried = []
for origin in self.get_template_sources(template_name):
if skip is not None and origin in skip:
tried.append((origin, "Skipped to avoid recursion"))
continue
try:
contents = self.get_contents(origin)
except TemplateDoesNotExist:
tried.append((origin, "Source does not exist"))
continue
else:
return Template(
contents,
origin,
origin.template_name,
self.engine,
)
raise TemplateDoesNotExist(template_name, tried=tried)
def get_template_sources(self, template_name):
"""
An iterator that yields possible matching template paths for a
template name.
"""
raise NotImplementedError(
"subclasses of Loader must provide a get_template_sources() method"
)
def reset(self):
"""
Reset any state maintained by the loader instance (e.g. cached
templates or cached loader modules).
"""
pass
|
7f8b22943ed65a1bf72bd43248c816edd94618dc3060070708c493169c228976 | """
Wrapper for loading templates from the filesystem.
"""
from django.core.exceptions import SuspiciousFileOperation
from django.template import Origin, TemplateDoesNotExist
from django.utils._os import safe_join
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, dirs=None):
super().__init__(engine)
self.dirs = dirs
def get_dirs(self):
return self.dirs if self.dirs is not None else self.engine.dirs
def get_contents(self, origin):
try:
with open(origin.name, encoding=self.engine.file_charset) as fp:
return fp.read()
except FileNotFoundError:
raise TemplateDoesNotExist(origin)
def get_template_sources(self, template_name):
"""
Return an Origin object pointing to an absolute path in each directory
in template_dirs. For security reasons, if a path doesn't lie inside
one of the template_dirs it is excluded from the result set.
"""
for template_dir in self.get_dirs():
try:
name = safe_join(template_dir, template_name)
except SuspiciousFileOperation:
# The joined path was located outside of this template_dir
# (it might be inside another one, so this isn't fatal).
continue
yield Origin(
name=name,
template_name=template_name,
loader=self,
)
|
6c3c245983e06efa6b53fbbd7fdc3da0d629496fe3f5beeca3f9a52b3a7df8de | """
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
from django.template import TemplateDoesNotExist
from django.template.backends.django import copy_exception
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, loaders):
self.get_template_cache = {}
self.loaders = engine.get_template_loaders(loaders)
super().__init__(engine)
def get_dirs(self):
for loader in self.loaders:
if hasattr(loader, "get_dirs"):
yield from loader.get_dirs()
def get_contents(self, origin):
return origin.loader.get_contents(origin)
def get_template(self, template_name, skip=None):
"""
Perform the caching that gives this loader its name. Often many of the
templates attempted will be missing, so memory use is of concern here.
To keep it in check, caching behavior is a little complicated when a
template is not found. See ticket #26306 for more details.
With template debugging disabled, cache the TemplateDoesNotExist class
for every missing template and raise a new instance of it after
fetching it from the cache.
With template debugging enabled, a unique TemplateDoesNotExist object
is cached for each missing template to preserve debug data. When
raising an exception, Python sets __traceback__, __context__, and
__cause__ attributes on it. Those attributes can contain references to
all sorts of objects up the call chain and caching them creates a
memory leak. Thus, unraised copies of the exceptions are cached and
copies of those copies are raised after they're fetched from the cache.
"""
key = self.cache_key(template_name, skip)
cached = self.get_template_cache.get(key)
if cached:
if isinstance(cached, type) and issubclass(cached, TemplateDoesNotExist):
raise cached(template_name)
elif isinstance(cached, TemplateDoesNotExist):
raise copy_exception(cached)
return cached
try:
template = super().get_template(template_name, skip)
except TemplateDoesNotExist as e:
self.get_template_cache[key] = (
copy_exception(e) if self.engine.debug else TemplateDoesNotExist
)
raise
else:
self.get_template_cache[key] = template
return template
def get_template_sources(self, template_name):
for loader in self.loaders:
yield from loader.get_template_sources(template_name)
def cache_key(self, template_name, skip=None):
"""
Generate a cache key for the template name and skip.
If skip is provided, only origins that match template_name are included
in the cache key. This ensures each template is only parsed and cached
once if contained in different extend chains like:
x -> a -> a
y -> a -> a
z -> a -> a
"""
skip_prefix = ""
if skip:
matching = [
origin.name for origin in skip if origin.template_name == template_name
]
if matching:
skip_prefix = self.generate_hash(matching)
return "-".join(s for s in (str(template_name), skip_prefix) if s)
def generate_hash(self, values):
return hashlib.sha1("|".join(values).encode()).hexdigest()
def reset(self):
"Empty the template cache."
self.get_template_cache.clear()
|
b7da741981765477f85c6e86820a7428198791d22e48ab8b7625573156f6887b | """
Wrapper for loading templates from a plain Python dict.
"""
from django.template import Origin, TemplateDoesNotExist
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, templates_dict):
self.templates_dict = templates_dict
super().__init__(engine)
def get_contents(self, origin):
try:
return self.templates_dict[origin.name]
except KeyError:
raise TemplateDoesNotExist(origin)
def get_template_sources(self, template_name):
yield Origin(
name=template_name,
template_name=template_name,
loader=self,
)
|
95e5e4e9edd74de0fc6186788f31983fd6ea2acd87759ba9cf70b56fe5dc2261 | from django.core.exceptions import ImproperlyConfigured, SuspiciousFileOperation
from django.template.utils import get_app_template_dirs
from django.utils._os import safe_join
from django.utils.functional import cached_property
class BaseEngine:
# Core methods: engines have to provide their own implementation
# (except for from_string which is optional).
def __init__(self, params):
"""
Initialize the template engine.
`params` is a dict of configuration settings.
"""
params = params.copy()
self.name = params.pop("NAME")
self.dirs = list(params.pop("DIRS"))
self.app_dirs = params.pop("APP_DIRS")
if params:
raise ImproperlyConfigured(
"Unknown parameters: {}".format(", ".join(params))
)
@property
def app_dirname(self):
raise ImproperlyConfigured(
"{} doesn't support loading templates from installed "
"applications.".format(self.__class__.__name__)
)
def from_string(self, template_code):
"""
Create and return a template for the given source code.
This method is optional.
"""
raise NotImplementedError(
"subclasses of BaseEngine should provide a from_string() method"
)
def get_template(self, template_name):
"""
Load and return a template for the given name.
Raise TemplateDoesNotExist if no such template exists.
"""
raise NotImplementedError(
"subclasses of BaseEngine must provide a get_template() method"
)
# Utility methods: they are provided to minimize code duplication and
# security issues in third-party backends.
@cached_property
def template_dirs(self):
"""
Return a list of directories to search for templates.
"""
# Immutable return value because it's cached and shared by callers.
template_dirs = tuple(self.dirs)
if self.app_dirs:
template_dirs += get_app_template_dirs(self.app_dirname)
return template_dirs
def iter_template_filenames(self, template_name):
"""
Iterate over candidate files for template_name.
Ignore files that don't lie inside configured template dirs to avoid
directory traversal attacks.
"""
for template_dir in self.template_dirs:
try:
yield safe_join(template_dir, template_name)
except SuspiciousFileOperation:
# The joined path was located outside of this template_dir
# (it might be inside another one, so this isn't fatal).
pass
|
cf95ff97129af6a2f82850d579a8be166b1ec14dca2e05473bccbe8072e256db | from django.middleware.csrf import get_token
from django.utils.functional import lazy
from django.utils.html import format_html
from django.utils.safestring import SafeString
def csrf_input(request):
return format_html(
'<input type="hidden" name="csrfmiddlewaretoken" value="{}">',
get_token(request),
)
csrf_input_lazy = lazy(csrf_input, SafeString, str)
csrf_token_lazy = lazy(get_token, str)
|
5c0b48d709959062437d53b3f8436be2fa0f5ab7d8de1e2c5b40a332aa2208ae | import string
from django.core.exceptions import ImproperlyConfigured
from django.template import Origin, TemplateDoesNotExist
from django.utils.html import conditional_escape
from .base import BaseEngine
from .utils import csrf_input_lazy, csrf_token_lazy
class TemplateStrings(BaseEngine):
app_dirname = "template_strings"
def __init__(self, params):
params = params.copy()
options = params.pop("OPTIONS").copy()
if options:
raise ImproperlyConfigured("Unknown options: {}".format(", ".join(options)))
super().__init__(params)
def from_string(self, template_code):
return Template(template_code)
def get_template(self, template_name):
tried = []
for template_file in self.iter_template_filenames(template_name):
try:
with open(template_file, encoding="utf-8") as fp:
template_code = fp.read()
except FileNotFoundError:
tried.append(
(
Origin(template_file, template_name, self),
"Source does not exist",
)
)
else:
return Template(template_code)
raise TemplateDoesNotExist(template_name, tried=tried, backend=self)
class Template(string.Template):
def render(self, context=None, request=None):
if context is None:
context = {}
else:
context = {k: conditional_escape(v) for k, v in context.items()}
if request is not None:
context["csrf_input"] = csrf_input_lazy(request)
context["csrf_token"] = csrf_token_lazy(request)
return self.safe_substitute(context)
|
1ad2ca2bc3add6f05cca50f92c50c2d8f04823f65c5ccfc59e3d099072ace53a | from pathlib import Path
import jinja2
from django.conf import settings
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from .base import BaseEngine
class Jinja2(BaseEngine):
app_dirname = "jinja2"
def __init__(self, params):
params = params.copy()
options = params.pop("OPTIONS").copy()
super().__init__(params)
self.context_processors = options.pop("context_processors", [])
environment = options.pop("environment", "jinja2.Environment")
environment_cls = import_string(environment)
if "loader" not in options:
options["loader"] = jinja2.FileSystemLoader(self.template_dirs)
options.setdefault("autoescape", True)
options.setdefault("auto_reload", settings.DEBUG)
options.setdefault(
"undefined", jinja2.DebugUndefined if settings.DEBUG else jinja2.Undefined
)
self.env = environment_cls(**options)
def from_string(self, template_code):
return Template(self.env.from_string(template_code), self)
def get_template(self, template_name):
try:
return Template(self.env.get_template(template_name), self)
except jinja2.TemplateNotFound as exc:
raise TemplateDoesNotExist(exc.name, backend=self) from exc
except jinja2.TemplateSyntaxError as exc:
new = TemplateSyntaxError(exc.args)
new.template_debug = get_exception_info(exc)
raise new from exc
@cached_property
def template_context_processors(self):
return [import_string(path) for path in self.context_processors]
class Template:
def __init__(self, template, backend):
self.template = template
self.backend = backend
self.origin = Origin(
name=template.filename,
template_name=template.name,
)
def render(self, context=None, request=None):
from .utils import csrf_input_lazy, csrf_token_lazy
if context is None:
context = {}
if request is not None:
context["request"] = request
context["csrf_input"] = csrf_input_lazy(request)
context["csrf_token"] = csrf_token_lazy(request)
for context_processor in self.backend.template_context_processors:
context.update(context_processor(request))
try:
return self.template.render(context)
except jinja2.TemplateSyntaxError as exc:
new = TemplateSyntaxError(exc.args)
new.template_debug = get_exception_info(exc)
raise new from exc
class Origin:
"""
A container to hold debug information as described in the template API
documentation.
"""
def __init__(self, name, template_name):
self.name = name
self.template_name = template_name
def get_exception_info(exception):
"""
Format exception information for display on the debug page using the
structure described in the template API documentation.
"""
context_lines = 10
lineno = exception.lineno
source = exception.source
if source is None:
exception_file = Path(exception.filename)
if exception_file.exists():
source = exception_file.read_text()
if source is not None:
lines = list(enumerate(source.strip().split("\n"), start=1))
during = lines[lineno - 1][1]
total = len(lines)
top = max(0, lineno - context_lines - 1)
bottom = min(total, lineno + context_lines)
else:
during = ""
lines = []
total = top = bottom = 0
return {
"name": exception.filename,
"message": exception.message,
"source_lines": lines[top:bottom],
"line": lineno,
"before": "",
"during": during,
"after": "",
"total": total,
"top": top,
"bottom": bottom,
}
|
2fefe22cf0241d9002758607a1d75dfc67826b0f19fcd156174af58c52cd64f5 | from importlib import import_module
from pkgutil import walk_packages
from django.apps import apps
from django.conf import settings
from django.template import TemplateDoesNotExist
from django.template.context import make_context
from django.template.engine import Engine
from django.template.library import InvalidTemplateLibrary
from .base import BaseEngine
class DjangoTemplates(BaseEngine):
app_dirname = "templates"
def __init__(self, params):
params = params.copy()
options = params.pop("OPTIONS").copy()
options.setdefault("autoescape", True)
options.setdefault("debug", settings.DEBUG)
options.setdefault("file_charset", "utf-8")
libraries = options.get("libraries", {})
options["libraries"] = self.get_templatetag_libraries(libraries)
super().__init__(params)
self.engine = Engine(self.dirs, self.app_dirs, **options)
def from_string(self, template_code):
return Template(self.engine.from_string(template_code), self)
def get_template(self, template_name):
try:
return Template(self.engine.get_template(template_name), self)
except TemplateDoesNotExist as exc:
reraise(exc, self)
def get_templatetag_libraries(self, custom_libraries):
"""
Return a collation of template tag libraries from installed
applications and the supplied custom_libraries argument.
"""
libraries = get_installed_libraries()
libraries.update(custom_libraries)
return libraries
class Template:
def __init__(self, template, backend):
self.template = template
self.backend = backend
@property
def origin(self):
return self.template.origin
def render(self, context=None, request=None):
context = make_context(
context, request, autoescape=self.backend.engine.autoescape
)
try:
return self.template.render(context)
except TemplateDoesNotExist as exc:
reraise(exc, self.backend)
def copy_exception(exc, backend=None):
"""
Create a new TemplateDoesNotExist. Preserve its declared attributes and
template debug data but discard __traceback__, __context__, and __cause__
to make this object suitable for keeping around (in a cache, for example).
"""
backend = backend or exc.backend
new = exc.__class__(*exc.args, tried=exc.tried, backend=backend, chain=exc.chain)
if hasattr(exc, "template_debug"):
new.template_debug = exc.template_debug
return new
def reraise(exc, backend):
"""
Reraise TemplateDoesNotExist while maintaining template debug information.
"""
new = copy_exception(exc, backend)
raise new from exc
def get_template_tag_modules():
"""
Yield (module_name, module_path) pairs for all installed template tag
libraries.
"""
candidates = ["django.templatetags"]
candidates.extend(
f"{app_config.name}.templatetags" for app_config in apps.get_app_configs()
)
for candidate in candidates:
try:
pkg = import_module(candidate)
except ImportError:
# No templatetags package defined. This is safe to ignore.
continue
if hasattr(pkg, "__path__"):
for name in get_package_libraries(pkg):
yield name[len(candidate) + 1 :], name
def get_installed_libraries():
"""
Return the built-in template tag libraries and those from installed
applications. Libraries are stored in a dictionary where keys are the
individual module names, not the full module paths. Example:
django.templatetags.i18n is stored as i18n.
"""
return {
module_name: full_name for module_name, full_name in get_template_tag_modules()
}
def get_package_libraries(pkg):
"""
Recursively yield template tag libraries defined in submodules of a
package.
"""
for entry in walk_packages(pkg.__path__, pkg.__name__ + "."):
try:
module = import_module(entry[1])
except ImportError as e:
raise InvalidTemplateLibrary(
"Invalid template library specified. ImportError raised when "
"trying to load '%s': %s" % (entry[1], e)
) from e
if hasattr(module, "register"):
yield entry[1]
|
fd7c530921e2c00cba2aa16ca92fd5d9f5688c374c74a9c372109d7359d4d81a | from django.apps.registry import apps as global_apps
from django.db import migrations, router
from .exceptions import InvalidMigrationPlan
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor:
"""
End-to-end migration execution - load migrations and run them up or down
to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, return a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
applied = {}
else:
applied = dict(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.pop(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# If the target is missing, it's likely a replaced migration.
# Reload the graph without replacements.
if (
self.loader.replace_migrations
and target not in self.loader.graph.node_map
):
self.loader.replace_migrations = False
self.loader.build_graph()
return self.migration_plan(targets, clean_start=clean_start)
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n
for n in self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.pop(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied[migration] = self.loader.graph.nodes[migration]
return plan
def _create_project_state(self, with_applied_migrations=False):
"""
Create a project state including all the applications without
migrations and applied migrations if with_applied_migrations=True.
"""
state = ProjectState(real_apps=self.loader.unmigrated_apps)
if with_applied_migrations:
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(
self.loader.graph.leaf_nodes(), clean_start=True
)
applied_migrations = {
self.loader.graph.nodes[key]
for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
for migration, _ in full_plan:
if migration in applied_migrations:
migration.mutate_state(state, preserve=False)
return state
def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False):
"""
Migrate the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
# The django_migrations table must be present to record applied
# migrations, but don't create it if there are no migrations to apply.
if plan == []:
if not self.recorder.has_table():
return self._create_project_state(with_applied_migrations=False)
else:
self.recorder.ensure_schema()
if plan is None:
plan = self.migration_plan(targets)
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(
self.loader.graph.leaf_nodes(), clean_start=True
)
all_forwards = all(not backwards for mig, backwards in plan)
all_backwards = all(backwards for mig, backwards in plan)
if not plan:
if state is None:
# The resulting state should include applied migrations.
state = self._create_project_state(with_applied_migrations=True)
elif all_forwards == all_backwards:
# This should only happen if there's a mixed plan
raise InvalidMigrationPlan(
"Migration plans with both forwards and backwards migrations "
"are not supported. Please split your migration process into "
"separate plans of only forwards OR backwards migrations.",
plan,
)
elif all_forwards:
if state is None:
# The resulting state should still include applied migrations.
state = self._create_project_state(with_applied_migrations=True)
state = self._migrate_all_forwards(
state, plan, full_plan, fake=fake, fake_initial=fake_initial
)
else:
# No need to check for `elif all_backwards` here, as that condition
# would always evaluate to true.
state = self._migrate_all_backwards(plan, full_plan, fake=fake)
self.check_replacements()
return state
def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial):
"""
Take a list of 2-tuples of the form (migration instance, False) and
apply them in the order they occur in the full_plan.
"""
migrations_to_run = {m[0] for m in plan}
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from these sets so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if "apps" not in state.__dict__:
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
state = self.apply_migration(
state, migration, fake=fake, fake_initial=fake_initial
)
migrations_to_run.remove(migration)
return state
def _migrate_all_backwards(self, plan, full_plan, fake):
"""
Take a list of 2-tuples of the form (migration instance, True) and
unapply them in reverse order they occur in the full_plan.
Since unapplying a migration requires the project state prior to that
migration, Django will compute the migration states before each of them
in a first run over the plan and then unapply them in a second run over
the plan.
"""
migrations_to_run = {m[0] for m in plan}
# Holds all migration states prior to the migrations being unapplied
states = {}
state = self._create_project_state()
applied_migrations = {
self.loader.graph.nodes[key]
for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
if self.progress_callback:
self.progress_callback("render_start")
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if "apps" not in state.__dict__:
state.apps # Render all -- performance critical
# The state before this migration
states[migration] = state
# The old state keeps as-is, we continue with the new state
state = migration.mutate_state(state, preserve=True)
migrations_to_run.remove(migration)
elif migration in applied_migrations:
# Only mutate the state if the migration is actually applied
# to make sure the resulting state doesn't include changes
# from unrelated migrations.
migration.mutate_state(state, preserve=False)
if self.progress_callback:
self.progress_callback("render_success")
for migration, _ in plan:
self.unapply_migration(states[migration], migration, fake=fake)
applied_migrations.remove(migration)
# Generate the post migration state by starting from the state before
# the last migration is unapplied and mutating it to include all the
# remaining applied migrations.
last_unapplied_migration = plan[-1][0]
state = states[last_unapplied_migration]
for index, (migration, _) in enumerate(full_plan):
if migration == last_unapplied_migration:
for migration, _ in full_plan[index:]:
if migration in applied_migrations:
migration.mutate_state(state, preserve=False)
break
return state
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""Run a migration forwards."""
migration_recorded = False
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor(
atomic=migration.atomic
) as schema_editor:
state = migration.apply(state, schema_editor)
if not schema_editor.deferred_sql:
self.record_migration(migration)
migration_recorded = True
if not migration_recorded:
self.record_migration(migration)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def record_migration(self, migration):
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
def unapply_migration(self, state, migration, fake=False):
"""Run a migration backwards."""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor(
atomic=migration.atomic
) as schema_editor:
state = migration.unapply(state, schema_editor)
# For replacement migrations, also record individual statuses.
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
"""
Mark replacement migrations applied if their replaced set all are.
Do this unconditionally on every migrate, rather than just when
migrations are applied or unapplied, to correctly handle the case
when a new squash migration is pushed to a deployment that already had
all its replaced migrations applied. In this case no new migration will
be applied, but the applied state of the squashed migration must be
maintained.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
"""
Test whether a migration has been implicitly applied - that the
tables or columns it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel and AddField).
"""
def should_skip_detecting_model(migration, model):
"""
No need to detect tables for proxy models, unmanaged models, or
models that can't be migrated on the current database.
"""
return (
model._meta.proxy
or not model._meta.managed
or not router.allow_migrate(
self.connection.alias,
migration.app_label,
model_name=model._meta.model_name,
)
)
if migration.initial is None:
# Bail if the migration isn't the first one in its app
if any(app == migration.app_label for app, name in migration.dependencies):
return False, project_state
elif migration.initial is False:
# Bail if it's NOT an initial migration
return False, project_state
if project_state is None:
after_state = self.loader.project_state(
(migration.app_label, migration.name), at_end=True
)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_model_migration = False
found_add_field_migration = False
fold_identifier_case = self.connection.features.ignores_table_name_case
with self.connection.cursor() as cursor:
existing_table_names = set(
self.connection.introspection.table_names(cursor)
)
if fold_identifier_case:
existing_table_names = {
name.casefold() for name in existing_table_names
}
# Make sure all create model and add field operations are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
db_table = model._meta.db_table
if fold_identifier_case:
db_table = db_table.casefold()
if db_table not in existing_table_names:
return False, project_state
found_create_model_migration = True
elif isinstance(operation, migrations.AddField):
model = apps.get_model(migration.app_label, operation.model_name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
table = model._meta.db_table
field = model._meta.get_field(operation.name)
# Handle implicit many-to-many tables created by AddField.
if field.many_to_many:
through_db_table = field.remote_field.through._meta.db_table
if fold_identifier_case:
through_db_table = through_db_table.casefold()
if through_db_table not in existing_table_names:
return False, project_state
else:
found_add_field_migration = True
continue
with self.connection.cursor() as cursor:
columns = self.connection.introspection.get_table_description(
cursor, table
)
for column in columns:
field_column = field.column
column_name = column.name
if fold_identifier_case:
column_name = column_name.casefold()
field_column = field_column.casefold()
if column_name == field_column:
found_add_field_migration = True
break
else:
return False, project_state
# If we get this far and we found at least one CreateModel or AddField
# migration, the migration is considered implicitly applied.
return (found_create_model_migration or found_add_field_migration), after_state
|
e1026da2b4edf6761546ea654e448ef39c8bdd78c30b6d7d9abc5de22646ed93 | import copy
from collections import defaultdict
from contextlib import contextmanager
from functools import partial
from django.apps import AppConfig
from django.apps.registry import Apps
from django.apps.registry import apps as global_apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.db.migrations.utils import field_is_referenced, get_references
from django.db.models import NOT_PROVIDED
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.utils import make_model_tuple
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
from .exceptions import InvalidBasesError
from .utils import resolve_relation
def _get_app_label_and_model_name(model, app_label=""):
if isinstance(model, str):
split = model.split(".", 1)
return tuple(split) if len(split) == 2 else (app_label, split[0])
else:
return model._meta.app_label, model._meta.model_name
def _get_related_models(m):
"""Return all models that have a direct relationship to the given model."""
related_models = [
subclass
for subclass in m.__subclasses__()
if issubclass(subclass, models.Model)
]
related_fields_models = set()
for f in m._meta.get_fields(include_parents=True, include_hidden=True):
if (
f.is_relation
and f.related_model is not None
and not isinstance(f.related_model, str)
):
related_fields_models.add(f.model)
related_models.append(f.related_model)
# Reverse accessors of foreign keys to proxy models are attached to their
# concrete proxied model.
opts = m._meta
if opts.proxy and m in related_fields_models:
related_models.append(opts.concrete_model)
return related_models
def get_related_models_tuples(model):
"""
Return a list of typical (app_label, model_name) tuples for all related
models for the given model.
"""
return {
(rel_mod._meta.app_label, rel_mod._meta.model_name)
for rel_mod in _get_related_models(model)
}
def get_related_models_recursive(model):
"""
Return all models that have a direct or indirect relationship
to the given model.
Relationships are either defined by explicit relational fields, like
ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another
model (a superclass is related to its subclasses, but not vice versa). Note,
however, that a model inheriting from a concrete model is also related to
its superclass through the implicit *_ptr OneToOneField on the subclass.
"""
seen = set()
queue = _get_related_models(model)
for rel_mod in queue:
rel_app_label, rel_model_name = (
rel_mod._meta.app_label,
rel_mod._meta.model_name,
)
if (rel_app_label, rel_model_name) in seen:
continue
seen.add((rel_app_label, rel_model_name))
queue.extend(_get_related_models(rel_mod))
return seen - {(model._meta.app_label, model._meta.model_name)}
class ProjectState:
"""
Represent the entire project's overall state. This is the item that is
passed around - do it here rather than at the app level so that cross-app
FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
# Apps to include from main registry, usually unmigrated ones
if real_apps is None:
real_apps = set()
else:
assert isinstance(real_apps, set)
self.real_apps = real_apps
self.is_delayed = False
# {remote_model_key: {model_key: {field_name: field}}}
self._relations = None
@property
def relations(self):
if self._relations is None:
self.resolve_fields_and_relations()
return self._relations
def add_model(self, model_state):
model_key = model_state.app_label, model_state.name_lower
self.models[model_key] = model_state
if self._relations is not None:
self.resolve_model_relations(model_key)
if "apps" in self.__dict__: # hasattr would cache the property
self.reload_model(*model_key)
def remove_model(self, app_label, model_name):
model_key = app_label, model_name
del self.models[model_key]
if self._relations is not None:
self._relations.pop(model_key, None)
# Call list() since _relations can change size during iteration.
for related_model_key, model_relations in list(self._relations.items()):
model_relations.pop(model_key, None)
if not model_relations:
del self._relations[related_model_key]
if "apps" in self.__dict__: # hasattr would cache the property
self.apps.unregister_model(*model_key)
# Need to do this explicitly since unregister_model() doesn't clear
# the cache automatically (#24513)
self.apps.clear_cache()
def rename_model(self, app_label, old_name, new_name):
# Add a new model.
old_name_lower = old_name.lower()
new_name_lower = new_name.lower()
renamed_model = self.models[app_label, old_name_lower].clone()
renamed_model.name = new_name
self.models[app_label, new_name_lower] = renamed_model
# Repoint all fields pointing to the old model to the new one.
old_model_tuple = (app_label, old_name_lower)
new_remote_model = f"{app_label}.{new_name}"
to_reload = set()
for model_state, name, field, reference in get_references(
self, old_model_tuple
):
changed_field = None
if reference.to:
changed_field = field.clone()
changed_field.remote_field.model = new_remote_model
if reference.through:
if changed_field is None:
changed_field = field.clone()
changed_field.remote_field.through = new_remote_model
if changed_field:
model_state.fields[name] = changed_field
to_reload.add((model_state.app_label, model_state.name_lower))
if self._relations is not None:
old_name_key = app_label, old_name_lower
new_name_key = app_label, new_name_lower
if old_name_key in self._relations:
self._relations[new_name_key] = self._relations.pop(old_name_key)
for model_relations in self._relations.values():
if old_name_key in model_relations:
model_relations[new_name_key] = model_relations.pop(old_name_key)
# Reload models related to old model before removing the old model.
self.reload_models(to_reload, delay=True)
# Remove the old model.
self.remove_model(app_label, old_name_lower)
self.reload_model(app_label, new_name_lower, delay=True)
def alter_model_options(self, app_label, model_name, options, option_keys=None):
model_state = self.models[app_label, model_name]
model_state.options = {**model_state.options, **options}
if option_keys:
for key in option_keys:
if key not in options:
model_state.options.pop(key, False)
self.reload_model(app_label, model_name, delay=True)
def alter_model_managers(self, app_label, model_name, managers):
model_state = self.models[app_label, model_name]
model_state.managers = list(managers)
self.reload_model(app_label, model_name, delay=True)
def _append_option(self, app_label, model_name, option_name, obj):
model_state = self.models[app_label, model_name]
model_state.options[option_name] = [*model_state.options[option_name], obj]
self.reload_model(app_label, model_name, delay=True)
def _remove_option(self, app_label, model_name, option_name, obj_name):
model_state = self.models[app_label, model_name]
objs = model_state.options[option_name]
model_state.options[option_name] = [obj for obj in objs if obj.name != obj_name]
self.reload_model(app_label, model_name, delay=True)
def add_index(self, app_label, model_name, index):
self._append_option(app_label, model_name, "indexes", index)
def remove_index(self, app_label, model_name, index_name):
self._remove_option(app_label, model_name, "indexes", index_name)
def add_constraint(self, app_label, model_name, constraint):
self._append_option(app_label, model_name, "constraints", constraint)
def remove_constraint(self, app_label, model_name, constraint_name):
self._remove_option(app_label, model_name, "constraints", constraint_name)
def add_field(self, app_label, model_name, name, field, preserve_default):
# If preserve default is off, don't use the default for future state.
if not preserve_default:
field = field.clone()
field.default = NOT_PROVIDED
else:
field = field
model_key = app_label, model_name
self.models[model_key].fields[name] = field
if self._relations is not None:
self.resolve_model_field_relations(model_key, name, field)
# Delay rendering of relationships if it's not a relational field.
delay = not field.is_relation
self.reload_model(*model_key, delay=delay)
def remove_field(self, app_label, model_name, name):
model_key = app_label, model_name
model_state = self.models[model_key]
old_field = model_state.fields.pop(name)
if self._relations is not None:
self.resolve_model_field_relations(model_key, name, old_field)
# Delay rendering of relationships if it's not a relational field.
delay = not old_field.is_relation
self.reload_model(*model_key, delay=delay)
def alter_field(self, app_label, model_name, name, field, preserve_default):
if not preserve_default:
field = field.clone()
field.default = NOT_PROVIDED
else:
field = field
model_key = app_label, model_name
fields = self.models[model_key].fields
if self._relations is not None:
old_field = fields.pop(name)
if old_field.is_relation:
self.resolve_model_field_relations(model_key, name, old_field)
fields[name] = field
if field.is_relation:
self.resolve_model_field_relations(model_key, name, field)
else:
fields[name] = field
# TODO: investigate if old relational fields must be reloaded or if
# it's sufficient if the new field is (#27737).
# Delay rendering of relationships if it's not a relational field and
# not referenced by a foreign key.
delay = not field.is_relation and not field_is_referenced(
self, model_key, (name, field)
)
self.reload_model(*model_key, delay=delay)
def rename_field(self, app_label, model_name, old_name, new_name):
model_key = app_label, model_name
model_state = self.models[model_key]
# Rename the field.
fields = model_state.fields
try:
found = fields.pop(old_name)
except KeyError:
raise FieldDoesNotExist(
f"{app_label}.{model_name} has no field named '{old_name}'"
)
fields[new_name] = found
for field in fields.values():
# Fix from_fields to refer to the new field.
from_fields = getattr(field, "from_fields", None)
if from_fields:
field.from_fields = tuple(
[
new_name if from_field_name == old_name else from_field_name
for from_field_name in from_fields
]
)
# Fix index/unique_together to refer to the new field.
options = model_state.options
for option in ("index_together", "unique_together"):
if option in options:
options[option] = [
[new_name if n == old_name else n for n in together]
for together in options[option]
]
# Fix to_fields to refer to the new field.
delay = True
references = get_references(self, model_key, (old_name, found))
for *_, field, reference in references:
delay = False
if reference.to:
remote_field, to_fields = reference.to
if getattr(remote_field, "field_name", None) == old_name:
remote_field.field_name = new_name
if to_fields:
field.to_fields = tuple(
[
new_name if to_field_name == old_name else to_field_name
for to_field_name in to_fields
]
)
if self._relations is not None:
old_name_lower = old_name.lower()
new_name_lower = new_name.lower()
for to_model in self._relations.values():
if old_name_lower in to_model[model_key]:
field = to_model[model_key].pop(old_name_lower)
field.name = new_name_lower
to_model[model_key][new_name_lower] = field
self.reload_model(*model_key, delay=delay)
def _find_reload_model(self, app_label, model_name, delay=False):
if delay:
self.is_delayed = True
related_models = set()
try:
old_model = self.apps.get_model(app_label, model_name)
except LookupError:
pass
else:
# Get all relations to and from the old model before reloading,
# as _meta.apps may change
if delay:
related_models = get_related_models_tuples(old_model)
else:
related_models = get_related_models_recursive(old_model)
# Get all outgoing references from the model to be rendered
model_state = self.models[(app_label, model_name)]
# Directly related models are the models pointed to by ForeignKeys,
# OneToOneFields, and ManyToManyFields.
direct_related_models = set()
for field in model_state.fields.values():
if field.is_relation:
if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT:
continue
rel_app_label, rel_model_name = _get_app_label_and_model_name(
field.related_model, app_label
)
direct_related_models.add((rel_app_label, rel_model_name.lower()))
# For all direct related models recursively get all related models.
related_models.update(direct_related_models)
for rel_app_label, rel_model_name in direct_related_models:
try:
rel_model = self.apps.get_model(rel_app_label, rel_model_name)
except LookupError:
pass
else:
if delay:
related_models.update(get_related_models_tuples(rel_model))
else:
related_models.update(get_related_models_recursive(rel_model))
# Include the model itself
related_models.add((app_label, model_name))
return related_models
def reload_model(self, app_label, model_name, delay=False):
if "apps" in self.__dict__: # hasattr would cache the property
related_models = self._find_reload_model(app_label, model_name, delay)
self._reload(related_models)
def reload_models(self, models, delay=True):
if "apps" in self.__dict__: # hasattr would cache the property
related_models = set()
for app_label, model_name in models:
related_models.update(
self._find_reload_model(app_label, model_name, delay)
)
self._reload(related_models)
def _reload(self, related_models):
# Unregister all related models
with self.apps.bulk_update():
for rel_app_label, rel_model_name in related_models:
self.apps.unregister_model(rel_app_label, rel_model_name)
states_to_be_rendered = []
# Gather all models states of those models that will be rerendered.
# This includes:
# 1. All related models of unmigrated apps
for model_state in self.apps.real_models:
if (model_state.app_label, model_state.name_lower) in related_models:
states_to_be_rendered.append(model_state)
# 2. All related models of migrated apps
for rel_app_label, rel_model_name in related_models:
try:
model_state = self.models[rel_app_label, rel_model_name]
except KeyError:
pass
else:
states_to_be_rendered.append(model_state)
# Render all models
self.apps.render_multiple(states_to_be_rendered)
def update_model_field_relation(
self,
model,
model_key,
field_name,
field,
concretes,
):
remote_model_key = resolve_relation(model, *model_key)
if remote_model_key[0] not in self.real_apps and remote_model_key in concretes:
remote_model_key = concretes[remote_model_key]
relations_to_remote_model = self._relations[remote_model_key]
if field_name in self.models[model_key].fields:
# The assert holds because it's a new relation, or an altered
# relation, in which case references have been removed by
# alter_field().
assert field_name not in relations_to_remote_model[model_key]
relations_to_remote_model[model_key][field_name] = field
else:
del relations_to_remote_model[model_key][field_name]
if not relations_to_remote_model[model_key]:
del relations_to_remote_model[model_key]
def resolve_model_field_relations(
self,
model_key,
field_name,
field,
concretes=None,
):
remote_field = field.remote_field
if not remote_field:
return
if concretes is None:
concretes, _ = self._get_concrete_models_mapping_and_proxy_models()
self.update_model_field_relation(
remote_field.model,
model_key,
field_name,
field,
concretes,
)
through = getattr(remote_field, "through", None)
if not through:
return
self.update_model_field_relation(
through, model_key, field_name, field, concretes
)
def resolve_model_relations(self, model_key, concretes=None):
if concretes is None:
concretes, _ = self._get_concrete_models_mapping_and_proxy_models()
model_state = self.models[model_key]
for field_name, field in model_state.fields.items():
self.resolve_model_field_relations(model_key, field_name, field, concretes)
def resolve_fields_and_relations(self):
# Resolve fields.
for model_state in self.models.values():
for field_name, field in model_state.fields.items():
field.name = field_name
# Resolve relations.
# {remote_model_key: {model_key: {field_name: field}}}
self._relations = defaultdict(partial(defaultdict, dict))
concretes, proxies = self._get_concrete_models_mapping_and_proxy_models()
for model_key in concretes:
self.resolve_model_relations(model_key, concretes)
for model_key in proxies:
self._relations[model_key] = self._relations[concretes[model_key]]
def get_concrete_model_key(self, model):
(
concrete_models_mapping,
_,
) = self._get_concrete_models_mapping_and_proxy_models()
model_key = make_model_tuple(model)
return concrete_models_mapping[model_key]
def _get_concrete_models_mapping_and_proxy_models(self):
concrete_models_mapping = {}
proxy_models = {}
# Split models to proxy and concrete models.
for model_key, model_state in self.models.items():
if model_state.options.get("proxy"):
proxy_models[model_key] = model_state
# Find a concrete model for the proxy.
concrete_models_mapping[
model_key
] = self._find_concrete_model_from_proxy(
proxy_models,
model_state,
)
else:
concrete_models_mapping[model_key] = model_key
return concrete_models_mapping, proxy_models
def _find_concrete_model_from_proxy(self, proxy_models, model_state):
for base in model_state.bases:
if not (isinstance(base, str) or issubclass(base, models.Model)):
continue
base_key = make_model_tuple(base)
base_state = proxy_models.get(base_key)
if not base_state:
# Concrete model found, stop looking at bases.
return base_key
return self._find_concrete_model_from_proxy(proxy_models, base_state)
def clone(self):
"""Return an exact copy of this ProjectState."""
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
)
if "apps" in self.__dict__:
new_state.apps = self.apps.clone()
new_state.is_delayed = self.is_delayed
return new_state
def clear_delayed_apps_cache(self):
if self.is_delayed and "apps" in self.__dict__:
del self.__dict__["apps"]
@cached_property
def apps(self):
return StateApps(self.real_apps, self.models)
@classmethod
def from_apps(cls, apps):
"""Take an Apps and return a ProjectState matching it."""
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name_lower)] = model_state
return cls(app_models)
def __eq__(self, other):
return self.models == other.models and self.real_apps == other.real_apps
class AppConfigStub(AppConfig):
"""Stub of an AppConfig. Only provides a label and a dict of models."""
def __init__(self, label):
self.apps = None
self.models = {}
# App-label and app-name are not the same thing, so technically passing
# in the label here is wrong. In practice, migrations don't care about
# the app name, but we need something unique, and the label works fine.
self.label = label
self.name = label
def import_models(self):
self.models = self.apps.all_models[self.label]
class StateApps(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
self.real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
self.real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [
AppConfigStub(label) for label in sorted([*real_apps, *app_labels])
]
super().__init__(app_configs)
# These locks get in the way of copying as implemented in clone(),
# which is called whenever Django duplicates a StateApps before
# updating it.
self._lock = None
self.ready_event = None
self.render_multiple([*models.values(), *self.real_models])
# There shouldn't be any operations pending at this point.
from django.core.checks.model_checks import _check_lazy_references
ignore = (
{make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set()
)
errors = _check_lazy_references(self, ignore=ignore)
if errors:
raise ValueError("\n".join(error.msg for error in errors))
@contextmanager
def bulk_update(self):
# Avoid clearing each model's cache for each change. Instead, clear
# all caches when we're finished updating the model instances.
ready = self.ready
self.ready = False
try:
yield
finally:
self.ready = ready
self.clear_cache()
def render_multiple(self, model_states):
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
if not model_states:
return
# Prevent that all model caches are expired for each render.
with self.bulk_update():
unrendered_models = model_states
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are "
"inheriting models from an app with migrations (e.g. "
"contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/"
"#dependencies for more"
% (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
def clone(self):
"""Return a clone of this registry."""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
# Set the pointer to the correct app registry.
for app_config in clone.app_configs.values():
app_config.apps = clone
# No need to actually clone them, they'll never change
clone.real_models = self.real_models
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].apps = self
self.app_configs[app_label].models[model._meta.model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
class ModelState:
"""
Represent a Django Model. Don't use the actual Model class as it's not
designed to have its options changed - instead, mutate this one and then
render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(
self, app_label, name, fields, options=None, bases=None, managers=None
):
self.app_label = app_label
self.name = name
self.fields = dict(fields)
self.options = options or {}
self.options.setdefault("indexes", [])
self.options.setdefault("constraints", [])
self.bases = bases or (models.Model,)
self.managers = managers or []
for name, field in self.fields.items():
# Sanity-check that fields are NOT already bound to a model.
if hasattr(field, "model"):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
# Sanity-check that relation fields are NOT referring to a model class.
if field.is_relation and hasattr(field.related_model, "_meta"):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.to" does. '
"Use a string reference instead." % name
)
if field.many_to_many and hasattr(field.remote_field.through, "_meta"):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.through" '
"does. Use a string reference instead." % name
)
# Sanity-check that indexes have their name set.
for index in self.options["indexes"]:
if not index.name:
raise ValueError(
"Indexes passed to ModelState require a name attribute. "
"%r doesn't have one." % index
)
@cached_property
def name_lower(self):
return self.name.lower()
def get_field(self, field_name):
if field_name == "_order":
field_name = self.options.get("order_with_respect_to", field_name)
return self.fields[field_name]
@classmethod
def from_model(cls, model, exclude_rels=False):
"""Given a model, return a ModelState representing it."""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "remote_field", None) and exclude_rels:
continue
if isinstance(field, models.OrderWrt):
continue
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError(
"Couldn't reconstruct field %s on %s: %s"
% (
name,
model._meta.label,
e,
)
)
if not exclude_rels:
for field in model._meta.local_many_to_many:
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError(
"Couldn't reconstruct m2m field %s on %s: %s"
% (
name,
model._meta.object_name,
e,
)
)
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
elif name == "indexes":
indexes = [idx.clone() for idx in model._meta.indexes]
for index in indexes:
if not index.name:
index.set_name_with_model(model)
options["indexes"] = indexes
elif name == "constraints":
options["constraints"] = [
con.clone() for con in model._meta.constraints
]
else:
options[name] = model._meta.original_attrs[name]
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
# Private fields are ignored, so remove options that refer to them.
elif options.get("order_with_respect_to") in {
field.name for field in model._meta.private_fields
}:
del options["order_with_respect_to"]
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(
set(flatten_bases(model)), key=lambda x: model.__mro__.index(x)
)
# Make our record
bases = tuple(
(base._meta.label_lower if hasattr(base, "_meta") else base)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any(
(isinstance(base, str) or issubclass(base, models.Model)) for base in bases
):
bases = (models.Model,)
managers = []
manager_names = set()
default_manager_shim = None
for manager in model._meta.managers:
if manager.name in manager_names:
# Skip overridden managers.
continue
elif manager.use_in_migrations:
# Copy managers usable in migrations.
new_manager = copy.copy(manager)
new_manager._set_creation_counter()
elif manager is model._base_manager or manager is model._default_manager:
# Shim custom managers used as default and base managers.
new_manager = models.Manager()
new_manager.model = manager.model
new_manager.name = manager.name
if manager is model._default_manager:
default_manager_shim = new_manager
else:
continue
manager_names.add(manager.name)
managers.append((manager.name, new_manager))
# Ignore a shimmed default manager called objects if it's the only one.
if managers == [("objects", default_manager_shim)]:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
def construct_managers(self):
"""Deep-clone the managers using deconstruction."""
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"""Return an exact copy of this ModelState."""
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=dict(self.fields),
# Since options are shallow-copied here, operations such as
# AddIndex must replace their option (e.g 'indexes') rather
# than mutating it.
options=dict(self.options),
bases=self.bases,
managers=list(self.managers),
)
def render(self, apps):
"""Create a Model object from our current state into the given apps."""
# First, make a Meta object
meta_contents = {"app_label": self.app_label, "apps": apps, **self.options}
meta = type("Meta", (), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, str) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError(
"Cannot resolve one or more bases from %r" % (self.bases,)
)
# Clone fields for the body, add other bits.
body = {name: field.clone() for name, field in self.fields.items()}
body["Meta"] = meta
body["__module__"] = "__fake__"
# Restore managers
body.update(self.construct_managers())
# Then, make a Model object (apps.register_model is called in __new__)
return type(self.name, bases, body)
def get_index_by_name(self, name):
for index in self.options["indexes"]:
if index.name == name:
return index
raise ValueError("No index named %s on model %s" % (name, self.name))
def get_constraint_by_name(self, name):
for constraint in self.options["constraints"]:
if constraint.name == name:
return constraint
raise ValueError("No constraint named %s on model %s" % (name, self.name))
def __repr__(self):
return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label)
and (self.name == other.name)
and (len(self.fields) == len(other.fields))
and all(
k1 == k2 and f1.deconstruct()[1:] == f2.deconstruct()[1:]
for (k1, f1), (k2, f2) in zip(
sorted(self.fields.items()),
sorted(other.fields.items()),
)
)
and (self.options == other.options)
and (self.bases == other.bases)
and (self.managers == other.managers)
)
|
2911dd8eaec0d2c1ea392d091d536547c32d42f6d8f6c9a321deeb9e0c2bace5 | import pkgutil
import sys
from importlib import import_module, reload
from django.apps import apps
from django.conf import settings
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from .exceptions import (
AmbiguityError,
BadMigrationError,
InconsistentMigrationHistory,
NodeNotFoundError,
)
MIGRATIONS_MODULE_NAME = "migrations"
class MigrationLoader:
"""
Load migration files from disk and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the Python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(
self,
connection,
load=True,
ignore_no_migrations=False,
replace_migrations=True,
):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
self.ignore_no_migrations = ignore_no_migrations
self.replace_migrations = replace_migrations
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
"""
Return the path to the migrations module for the specified app_label
and a boolean indicating if the module is specified in
settings.MIGRATION_MODULE.
"""
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label], True
else:
app_package_name = apps.get_app_config(app_label).name
return "%s.%s" % (app_package_name, MIGRATIONS_MODULE_NAME), False
def load_disk(self):
"""Load the migrations from all INSTALLED_APPS from disk."""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name, explicit = self.migrations_module(app_config.label)
if module_name is None:
self.unmigrated_apps.add(app_config.label)
continue
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ModuleNotFoundError as e:
if (explicit and self.ignore_no_migrations) or (
not explicit and MIGRATIONS_MODULE_NAME in e.name.split(".")
):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# Module is not a package (e.g. migrations.py).
if not hasattr(module, "__path__"):
self.unmigrated_apps.add(app_config.label)
continue
# Empty directories are namespaces. Namespace packages have no
# __file__ and don't use a list for __path__. See
# https://docs.python.org/3/reference/import.html#namespace-packages
if getattr(module, "__file__", None) is None and not isinstance(
module.__path__, list
):
self.unmigrated_apps.add(app_config.label)
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
reload(module)
self.migrated_apps.add(app_config.label)
migration_names = {
name
for _, name, is_pkg in pkgutil.iter_modules(module.__path__)
if not is_pkg and name[0] not in "_~"
}
# Load migrations
for migration_name in migration_names:
migration_path = "%s.%s" % (module_name, migration_name)
try:
migration_module = import_module(migration_path)
except ImportError as e:
if "bad magic number" in str(e):
raise ImportError(
"Couldn't import %r as it appears to be a stale "
".pyc file." % migration_path
) from e
else:
raise
if not hasattr(migration_module, "Migration"):
raise BadMigrationError(
"Migration %s in app %s has no Migration class"
% (migration_name, app_config.label)
)
self.disk_migrations[
app_config.label, migration_name
] = migration_module.Migration(
migration_name,
app_config.label,
)
def get_migration(self, app_label, name_prefix):
"""Return the named migration or raise NodeNotFoundError."""
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"""
Return the migration(s) which match the given app label and name_prefix.
"""
# Do the search
results = []
for migration_app_label, migration_name in self.disk_migrations:
if migration_app_label == app_label and migration_name.startswith(
name_prefix
):
results.append((migration_app_label, migration_name))
if len(results) > 1:
raise AmbiguityError(
"There is more than one migration for '%s' with the prefix '%s'"
% (app_label, name_prefix)
)
elif not results:
raise KeyError(
f"There is no migration for '{app_label}' with the prefix "
f"'{name_prefix}'"
)
else:
return self.disk_migrations[results[0]]
def check_key(self, key, current_app):
if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph:
return key
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they even have
# migrations.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.migrated_apps:
try:
if key[1] == "__first__":
return self.graph.root_nodes(key[0])[0]
else: # "__latest__"
return self.graph.leaf_nodes(key[0])[0]
except IndexError:
if self.ignore_no_migrations:
return None
else:
raise ValueError(
"Dependency on app with no migrations: %s" % key[0]
)
raise ValueError("Dependency on unknown app: %s" % key[0])
def add_internal_dependencies(self, key, migration):
"""
Internal dependencies need to be added first to ensure `__first__`
dependencies find the correct root node.
"""
for parent in migration.dependencies:
# Ignore __first__ references to the same app.
if parent[0] == key[0] and parent[1] != "__first__":
self.graph.add_dependency(migration, key, parent, skip_validation=True)
def add_external_dependencies(self, key, migration):
for parent in migration.dependencies:
# Skip internal dependencies
if key[0] == parent[0]:
continue
parent = self.check_key(parent, key[0])
if parent is not None:
self.graph.add_dependency(migration, key, parent, skip_validation=True)
for child in migration.run_before:
child = self.check_key(child, key[0])
if child is not None:
self.graph.add_dependency(migration, child, key, skip_validation=True)
def build_graph(self):
"""
Build a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_migrations = {}
else:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# To start, populate the migration graph with nodes for ALL migrations
# and their dependencies. Also make note of replacing migrations at this step.
self.graph = MigrationGraph()
self.replacements = {}
for key, migration in self.disk_migrations.items():
self.graph.add_node(key, migration)
# Replacing migrations.
if migration.replaces:
self.replacements[key] = migration
for key, migration in self.disk_migrations.items():
# Internal (same app) dependencies.
self.add_internal_dependencies(key, migration)
# Add external dependencies now that the internal ones have been resolved.
for key, migration in self.disk_migrations.items():
self.add_external_dependencies(key, migration)
# Carry out replacements where possible and if enabled.
if self.replace_migrations:
for key, migration in self.replacements.items():
# Get applied status of each of this migration's replacement
# targets.
applied_statuses = [
(target in self.applied_migrations) for target in migration.replaces
]
# The replacing migration is only marked as applied if all of
# its replacement targets are.
if all(applied_statuses):
self.applied_migrations[key] = migration
else:
self.applied_migrations.pop(key, None)
# A replacing migration can be used if either all or none of
# its replacement targets have been applied.
if all(applied_statuses) or (not any(applied_statuses)):
self.graph.remove_replaced_nodes(key, migration.replaces)
else:
# This replacing migration cannot be used because it is
# partially applied. Remove it from the graph and remap
# dependencies to it (#25945).
self.graph.remove_replacement_node(key, migration.replaces)
# Ensure the graph is consistent.
try:
self.graph.validate_consistency()
except NodeNotFoundError as exc:
# Check if the missing node could have been replaced by any squash
# migration but wasn't because the squash migration was partially
# applied before. In that case raise a more understandable exception
# (#23556).
# Get reverse replacements.
reverse_replacements = {}
for key, migration in self.replacements.items():
for replaced in migration.replaces:
reverse_replacements.setdefault(replaced, set()).add(key)
# Try to reraise exception with more detail.
if exc.node in reverse_replacements:
candidates = reverse_replacements.get(exc.node, set())
is_replaced = any(
candidate in self.graph.nodes for candidate in candidates
)
if not is_replaced:
tries = ", ".join("%s.%s" % c for c in candidates)
raise NodeNotFoundError(
"Migration {0} depends on nonexistent node ('{1}', '{2}'). "
"Django tried to replace migration {1}.{2} with any of [{3}] "
"but wasn't able to because some of the replaced migrations "
"are already applied.".format(
exc.origin, exc.node[0], exc.node[1], tries
),
exc.node,
) from exc
raise
self.graph.ensure_not_cyclic()
def check_consistent_history(self, connection):
"""
Raise InconsistentMigrationHistory if any applied migrations have
unapplied dependencies.
"""
recorder = MigrationRecorder(connection)
applied = recorder.applied_migrations()
for migration in applied:
# If the migration is unknown, skip it.
if migration not in self.graph.nodes:
continue
for parent in self.graph.node_map[migration].parents:
if parent not in applied:
# Skip unapplied squashed migrations that have all of their
# `replaces` applied.
if parent in self.replacements:
if all(
m in applied for m in self.replacements[parent].replaces
):
continue
raise InconsistentMigrationHistory(
"Migration {}.{} is applied before its dependency "
"{}.{} on database '{}'.".format(
migration[0],
migration[1],
parent[0],
parent[1],
connection.alias,
)
)
def detect_conflicts(self):
"""
Look through the loaded graph and detect any conflicts - apps
with more than one leaf migration. Return a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return {
app_label: sorted(seen_apps[app_label]) for app_label in conflicting_apps
}
def project_state(self, nodes=None, at_end=True):
"""
Return a ProjectState object representing the most recent state
that the loaded migrations represent.
See graph.make_state() for the meaning of "nodes" and "at_end".
"""
return self.graph.make_state(
nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps
)
def collect_sql(self, plan):
"""
Take a migration plan and return a list of collected SQL statements
that represent the best-efforts version of that plan.
"""
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(
collect_sql=True, atomic=migration.atomic
) as schema_editor:
if state is None:
state = self.project_state(
(migration.app_label, migration.name), at_end=False
)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
|
4a8b5017b64a809a5df4a37e80a0cbf3008900a48481b653a0cfefb54027a87c | from django.db import DatabaseError
class AmbiguityError(Exception):
"""More than one migration matches a name prefix."""
pass
class BadMigrationError(Exception):
"""There's a bad migration (unreadable/bad format/etc.)."""
pass
class CircularDependencyError(Exception):
"""There's an impossible-to-resolve circular dependency."""
pass
class InconsistentMigrationHistory(Exception):
"""An applied migration has some of its dependencies not applied."""
pass
class InvalidBasesError(ValueError):
"""A model's base classes can't be resolved."""
pass
class IrreversibleError(RuntimeError):
"""An irreversible migration is about to be reversed."""
pass
class NodeNotFoundError(LookupError):
"""An attempt on a node is made that is not available in the graph."""
def __init__(self, message, node, origin=None):
self.message = message
self.origin = origin
self.node = node
def __str__(self):
return self.message
def __repr__(self):
return "NodeNotFoundError(%r)" % (self.node,)
class MigrationSchemaMissing(DatabaseError):
pass
class InvalidMigrationPlan(ValueError):
pass
|
2aab183776c34e31969eebd5be4023d3aaa4da584540b91a5acafd716fa85582 | import os
import re
from importlib import import_module
from django import get_version
from django.apps import apps
# SettingsReference imported for backwards compatibility in Django 2.2.
from django.conf import SettingsReference # NOQA
from django.db import migrations
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.serializer import Serializer, serializer_factory
from django.utils.inspect import get_func_args
from django.utils.module_loading import module_dir
from django.utils.timezone import now
class OperationWriter:
def __init__(self, operation, indentation=2):
self.operation = operation
self.buff = []
self.indentation = indentation
def serialize(self):
def _write(_arg_name, _arg_value):
if _arg_name in self.operation.serialization_expand_args and isinstance(
_arg_value, (list, tuple, dict)
):
if isinstance(_arg_value, dict):
self.feed("%s={" % _arg_name)
self.indent()
for key, value in _arg_value.items():
key_string, key_imports = MigrationWriter.serialize(key)
arg_string, arg_imports = MigrationWriter.serialize(value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed("%s: %s" % (key_string, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed("%s," % args[-1])
else:
self.feed("%s: %s," % (key_string, arg_string))
imports.update(key_imports)
imports.update(arg_imports)
self.unindent()
self.feed("},")
else:
self.feed("%s=[" % _arg_name)
self.indent()
for item in _arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
args = arg_string.splitlines()
if len(args) > 1:
for arg in args[:-1]:
self.feed(arg)
self.feed("%s," % args[-1])
else:
self.feed("%s," % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed("],")
else:
arg_string, arg_imports = MigrationWriter.serialize(_arg_value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed("%s=%s" % (_arg_name, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed("%s," % args[-1])
else:
self.feed("%s=%s," % (_arg_name, arg_string))
imports.update(arg_imports)
imports = set()
name, args, kwargs = self.operation.deconstruct()
operation_args = get_func_args(self.operation.__init__)
# See if this operation is in django.db.migrations. If it is,
# We can just use the fact we already have that imported,
# otherwise, we need to add an import for the operation class.
if getattr(migrations, name, None) == self.operation.__class__:
self.feed("migrations.%s(" % name)
else:
imports.add("import %s" % (self.operation.__class__.__module__))
self.feed("%s.%s(" % (self.operation.__class__.__module__, name))
self.indent()
for i, arg in enumerate(args):
arg_value = arg
arg_name = operation_args[i]
_write(arg_name, arg_value)
i = len(args)
# Only iterate over remaining arguments
for arg_name in operation_args[i:]:
if arg_name in kwargs: # Don't sort to maintain signature order
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
self.unindent()
self.feed("),")
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(" " * (self.indentation * 4) + line)
def render(self):
return "\n".join(self.buff)
class MigrationWriter:
"""
Take a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration, include_header=True):
self.migration = migration
self.include_header = include_header
self.needs_manual_porting = False
def as_string(self):
"""Return a string of the file contents."""
items = {
"replaces_str": "",
"initial_str": "",
}
imports = set()
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(
" migrations.swappable_dependency(settings.%s),"
% dependency[1]
)
imports.add("from django.conf import settings")
else:
dependencies.append(" %s," % self.serialize(dependency)[0])
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely, swapping imports of functions from migration files
# for comments
migration_imports = set()
for line in list(imports):
if re.match(r"^import (.*)\.\d+[^\s]*$", line):
migration_imports.add(line.split("import")[1].strip())
imports.remove(line)
self.needs_manual_porting = True
# django.db.migrations is always used, but models import may not be.
# If models import exists, merge it with migrations import.
if "from django.db import models" in imports:
imports.discard("from django.db import models")
imports.add("from django.db import migrations, models")
else:
imports.add("from django.db import migrations")
# Sort imports by the package / module to be imported (the part after
# "from" in "from ... import ..." or after "import" in "import ...").
sorted_imports = sorted(imports, key=lambda i: i.split()[1])
items["imports"] = "\n".join(sorted_imports) + "\n" if imports else ""
if migration_imports:
items["imports"] += (
"\n\n# Functions from the following migrations need manual "
"copying.\n# Move them and any dependencies into this file, "
"then update the\n# RunPython operations to refer to the local "
"versions:\n# %s"
) % "\n# ".join(sorted(migration_imports))
# If there's a replaces, make a string for it
if self.migration.replaces:
items["replaces_str"] = (
"\n replaces = %s\n" % self.serialize(self.migration.replaces)[0]
)
# Hinting that goes into comment
if self.include_header:
items["migration_header"] = MIGRATION_HEADER_TEMPLATE % {
"version": get_version(),
"timestamp": now().strftime("%Y-%m-%d %H:%M"),
}
else:
items["migration_header"] = ""
if self.migration.initial:
items["initial_str"] = "\n initial = True\n"
return MIGRATION_TEMPLATE % items
@property
def basedir(self):
migrations_package_name, _ = MigrationLoader.migrations_module(
self.migration.app_label
)
if migrations_package_name is None:
raise ValueError(
"Django can't create migrations for app '%s' because "
"migrations have been disabled via the MIGRATION_MODULES "
"setting." % self.migration.app_label
)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
except ImportError:
pass
else:
try:
return module_dir(migrations_module)
except ValueError:
pass
# Alright, see if it's a direct submodule of the app
app_config = apps.get_app_config(self.migration.app_label)
(
maybe_app_name,
_,
migrations_package_basename,
) = migrations_package_name.rpartition(".")
if app_config.name == maybe_app_name:
return os.path.join(app_config.path, migrations_package_basename)
# In case of using MIGRATION_MODULES setting and the custom package
# doesn't exist, create one, starting from an existing package
existing_dirs, missing_dirs = migrations_package_name.split("."), []
while existing_dirs:
missing_dirs.insert(0, existing_dirs.pop(-1))
try:
base_module = import_module(".".join(existing_dirs))
except (ImportError, ValueError):
continue
else:
try:
base_dir = module_dir(base_module)
except ValueError:
continue
else:
break
else:
raise ValueError(
"Could not locate an appropriate location to create "
"migrations package %s. Make sure the toplevel "
"package exists and can be imported." % migrations_package_name
)
final_dir = os.path.join(base_dir, *missing_dirs)
os.makedirs(final_dir, exist_ok=True)
for missing_dir in missing_dirs:
base_dir = os.path.join(base_dir, missing_dir)
with open(os.path.join(base_dir, "__init__.py"), "w"):
pass
return final_dir
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
return os.path.join(self.basedir, self.filename)
@classmethod
def serialize(cls, value):
return serializer_factory(value).serialize()
@classmethod
def register_serializer(cls, type_, serializer):
Serializer.register(type_, serializer)
@classmethod
def unregister_serializer(cls, type_):
Serializer.unregister(type_)
MIGRATION_HEADER_TEMPLATE = """\
# Generated by Django %(version)s on %(timestamp)s
"""
MIGRATION_TEMPLATE = """\
%(migration_header)s%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s%(initial_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
|
1d5b5c1014714302fd26b40ee6bd5b5486486541417ecf4bfac8ae0d0111661d | import datetime
import importlib
import os
import sys
from django.apps import apps
from django.core.management.base import OutputWrapper
from django.db.models import NOT_PROVIDED
from django.utils import timezone
from django.utils.version import get_docs_version
from .loader import MigrationLoader
class MigrationQuestioner:
"""
Give the autodetector responses to questions it might have.
This base class has a built-in noninteractive mode, but the
interactive subclass is what the command-line arguments will use.
"""
def __init__(self, defaults=None, specified_apps=None, dry_run=None):
self.defaults = defaults or {}
self.specified_apps = specified_apps or set()
self.dry_run = dry_run
def ask_initial(self, app_label):
"""Should we create an initial migration for the app?"""
# If it was specified on the command line, definitely true
if app_label in self.specified_apps:
return True
# Otherwise, we look to see if it has a migrations module
# without any Python files in it, apart from __init__.py.
# Apps from the new app template will have these; the Python
# file check will ensure we skip South ones.
try:
app_config = apps.get_app_config(app_label)
except LookupError: # It's a fake app.
return self.defaults.get("ask_initial", False)
migrations_import_path, _ = MigrationLoader.migrations_module(app_config.label)
if migrations_import_path is None:
# It's an application with migrations disabled.
return self.defaults.get("ask_initial", False)
try:
migrations_module = importlib.import_module(migrations_import_path)
except ImportError:
return self.defaults.get("ask_initial", False)
else:
if getattr(migrations_module, "__file__", None):
filenames = os.listdir(os.path.dirname(migrations_module.__file__))
elif hasattr(migrations_module, "__path__"):
if len(migrations_module.__path__) > 1:
return False
filenames = os.listdir(list(migrations_module.__path__)[0])
return not any(x.endswith(".py") for x in filenames if x != "__init__.py")
def ask_not_null_addition(self, field_name, model_name):
"""Adding a NOT NULL field to a model."""
# None means quit
return None
def ask_not_null_alteration(self, field_name, model_name):
"""Changing a NULL field to NOT NULL."""
# None means quit
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"""Was this field really renamed?"""
return self.defaults.get("ask_rename", False)
def ask_rename_model(self, old_model_state, new_model_state):
"""Was this model really renamed?"""
return self.defaults.get("ask_rename_model", False)
def ask_merge(self, app_label):
"""Should these migrations really be merged?"""
return self.defaults.get("ask_merge", False)
def ask_auto_now_add_addition(self, field_name, model_name):
"""Adding an auto_now_add field to a model."""
# None means quit
return None
def ask_unique_callable_default_addition(self, field_name, model_name):
"""Adding a unique field with a callable default."""
# None means continue.
return None
class InteractiveMigrationQuestioner(MigrationQuestioner):
def __init__(
self, defaults=None, specified_apps=None, dry_run=None, prompt_output=None
):
super().__init__(
defaults=defaults, specified_apps=specified_apps, dry_run=dry_run
)
self.prompt_output = prompt_output or OutputWrapper(sys.stdout)
def _boolean_input(self, question, default=None):
self.prompt_output.write(f"{question} ", ending="")
result = input()
if not result and default is not None:
return default
while not result or result[0].lower() not in "yn":
self.prompt_output.write("Please answer yes or no: ", ending="")
result = input()
return result[0].lower() == "y"
def _choice_input(self, question, choices):
self.prompt_output.write(f"{question}")
for i, choice in enumerate(choices):
self.prompt_output.write(" %s) %s" % (i + 1, choice))
self.prompt_output.write("Select an option: ", ending="")
result = input()
while True:
try:
value = int(result)
except ValueError:
pass
else:
if 0 < value <= len(choices):
return value
self.prompt_output.write("Please select a valid option: ", ending="")
result = input()
def _ask_default(self, default=""):
"""
Prompt for a default value.
The ``default`` argument allows providing a custom default value (as a
string) which will be shown to the user and used as the return value
if the user doesn't provide any other input.
"""
self.prompt_output.write("Please enter the default value as valid Python.")
if default:
self.prompt_output.write(
f"Accept the default '{default}' by pressing 'Enter' or "
f"provide another value."
)
self.prompt_output.write(
"The datetime and django.utils.timezone modules are available, so "
"it is possible to provide e.g. timezone.now as a value."
)
self.prompt_output.write("Type 'exit' to exit this prompt")
while True:
if default:
prompt = "[default: {}] >>> ".format(default)
else:
prompt = ">>> "
self.prompt_output.write(prompt, ending="")
code = input()
if not code and default:
code = default
if not code:
self.prompt_output.write(
"Please enter some code, or 'exit' (without quotes) to exit."
)
elif code == "exit":
sys.exit(1)
else:
try:
return eval(code, {}, {"datetime": datetime, "timezone": timezone})
except (SyntaxError, NameError) as e:
self.prompt_output.write("Invalid input: %s" % e)
def ask_not_null_addition(self, field_name, model_name):
"""Adding a NOT NULL field to a model."""
if not self.dry_run:
choice = self._choice_input(
f"It is impossible to add a non-nullable field '{field_name}' "
f"to {model_name} without specifying a default. This is "
f"because the database needs something to populate existing "
f"rows.\n"
f"Please select a fix:",
[
(
"Provide a one-off default now (will be set on all existing "
"rows with a null value for this column)"
),
"Quit and manually define a default value in models.py.",
],
)
if choice == 2:
sys.exit(3)
else:
return self._ask_default()
return None
def ask_not_null_alteration(self, field_name, model_name):
"""Changing a NULL field to NOT NULL."""
if not self.dry_run:
choice = self._choice_input(
f"It is impossible to change a nullable field '{field_name}' "
f"on {model_name} to non-nullable without providing a "
f"default. This is because the database needs something to "
f"populate existing rows.\n"
f"Please select a fix:",
[
(
"Provide a one-off default now (will be set on all existing "
"rows with a null value for this column)"
),
"Ignore for now. Existing rows that contain NULL values "
"will have to be handled manually, for example with a "
"RunPython or RunSQL operation.",
"Quit and manually define a default value in models.py.",
],
)
if choice == 2:
return NOT_PROVIDED
elif choice == 3:
sys.exit(3)
else:
return self._ask_default()
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"""Was this field really renamed?"""
msg = "Was %s.%s renamed to %s.%s (a %s)? [y/N]"
return self._boolean_input(
msg
% (
model_name,
old_name,
model_name,
new_name,
field_instance.__class__.__name__,
),
False,
)
def ask_rename_model(self, old_model_state, new_model_state):
"""Was this model really renamed?"""
msg = "Was the model %s.%s renamed to %s? [y/N]"
return self._boolean_input(
msg
% (old_model_state.app_label, old_model_state.name, new_model_state.name),
False,
)
def ask_merge(self, app_label):
return self._boolean_input(
"\nMerging will only work if the operations printed above do not conflict\n"
+ "with each other (working on different fields or models)\n"
+ "Should these migration branches be merged? [y/N]",
False,
)
def ask_auto_now_add_addition(self, field_name, model_name):
"""Adding an auto_now_add field to a model."""
if not self.dry_run:
choice = self._choice_input(
f"It is impossible to add the field '{field_name}' with "
f"'auto_now_add=True' to {model_name} without providing a "
f"default. This is because the database needs something to "
f"populate existing rows.\n",
[
"Provide a one-off default now which will be set on all "
"existing rows",
"Quit and manually define a default value in models.py.",
],
)
if choice == 2:
sys.exit(3)
else:
return self._ask_default(default="timezone.now")
return None
def ask_unique_callable_default_addition(self, field_name, model_name):
"""Adding a unique field with a callable default."""
if not self.dry_run:
version = get_docs_version()
choice = self._choice_input(
f"Callable default on unique field {model_name}.{field_name} "
f"will not generate unique values upon migrating.\n"
f"Please choose how to proceed:\n",
[
f"Continue making this migration as the first step in "
f"writing a manual migration to generate unique values "
f"described here: "
f"https://docs.djangoproject.com/en/{version}/howto/"
f"writing-migrations/#migrations-that-add-unique-fields.",
"Quit and edit field options in models.py.",
],
)
if choice == 2:
sys.exit(3)
return None
class NonInteractiveMigrationQuestioner(MigrationQuestioner):
def __init__(
self,
defaults=None,
specified_apps=None,
dry_run=None,
verbosity=1,
log=None,
):
self.verbosity = verbosity
self.log = log
super().__init__(
defaults=defaults,
specified_apps=specified_apps,
dry_run=dry_run,
)
def log_lack_of_migration(self, field_name, model_name, reason):
if self.verbosity > 0:
self.log(
f"Field '{field_name}' on model '{model_name}' not migrated: "
f"{reason}."
)
def ask_not_null_addition(self, field_name, model_name):
# We can't ask the user, so act like the user aborted.
self.log_lack_of_migration(
field_name,
model_name,
"it is impossible to add a non-nullable field without specifying "
"a default",
)
sys.exit(3)
def ask_not_null_alteration(self, field_name, model_name):
# We can't ask the user, so set as not provided.
self.log(
f"Field '{field_name}' on model '{model_name}' given a default of "
f"NOT PROVIDED and must be corrected."
)
return NOT_PROVIDED
def ask_auto_now_add_addition(self, field_name, model_name):
# We can't ask the user, so act like the user aborted.
self.log_lack_of_migration(
field_name,
model_name,
"it is impossible to add a field with 'auto_now_add=True' without "
"specifying a default",
)
sys.exit(3)
|
bedecf738e4bba25d1f1a442ad73f95269ba5430824ecd8b02be4d5e1fabc5c1 | from functools import total_ordering
from django.db.migrations.state import ProjectState
from .exceptions import CircularDependencyError, NodeNotFoundError
@total_ordering
class Node:
"""
A single node in the migration graph. Contains direct links to adjacent
nodes in either direction.
"""
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return "<%s: (%r, %r)>" % (self.__class__.__name__, self.key[0], self.key[1])
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
class DummyNode(Node):
"""
A node that doesn't correspond to a migration file on disk.
(A squashed migration that was removed, for example.)
After the migration graph is processed, all dummy nodes should be removed.
If there are any left, a nonexistent dependency error is raised.
"""
def __init__(self, key, origin, error_message):
super().__init__(key)
self.origin = origin
self.error_message = error_message
def raise_error(self):
raise NodeNotFoundError(self.error_message, self.key, origin=self.origin)
class MigrationGraph:
"""
Represent the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.node_map = {}
self.nodes = {}
def add_node(self, key, migration):
assert key not in self.node_map
node = Node(key)
self.node_map[key] = node
self.nodes[key] = migration
def add_dummy_node(self, key, origin, error_message):
node = DummyNode(key, origin, error_message)
self.node_map[key] = node
self.nodes[key] = None
def add_dependency(self, migration, child, parent, skip_validation=False):
"""
This may create dummy nodes if they don't yet exist. If
`skip_validation=True`, validate_consistency() should be called
afterward.
"""
if child not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" child node %r" % (migration, child)
)
self.add_dummy_node(child, migration, error_message)
if parent not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" parent node %r" % (migration, parent)
)
self.add_dummy_node(parent, migration, error_message)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
if not skip_validation:
self.validate_consistency()
def remove_replaced_nodes(self, replacement, replaced):
"""
Remove each of the `replaced` nodes (when they exist). Any
dependencies that were referencing them are changed to reference the
`replacement` node instead.
"""
# Cast list of replaced keys to set to speed up lookup later.
replaced = set(replaced)
try:
replacement_node = self.node_map[replacement]
except KeyError as err:
raise NodeNotFoundError(
"Unable to find replacement node %r. It was either never added"
" to the migration graph, or has been removed." % (replacement,),
replacement,
) from err
for replaced_key in replaced:
self.nodes.pop(replaced_key, None)
replaced_node = self.node_map.pop(replaced_key, None)
if replaced_node:
for child in replaced_node.children:
child.parents.remove(replaced_node)
# We don't want to create dependencies between the replaced
# node and the replacement node as this would lead to
# self-referencing on the replacement node at a later iteration.
if child.key not in replaced:
replacement_node.add_child(child)
child.add_parent(replacement_node)
for parent in replaced_node.parents:
parent.children.remove(replaced_node)
# Again, to avoid self-referencing.
if parent.key not in replaced:
replacement_node.add_parent(parent)
parent.add_child(replacement_node)
def remove_replacement_node(self, replacement, replaced):
"""
The inverse operation to `remove_replaced_nodes`. Almost. Remove the
replacement node `replacement` and remap its child nodes to `replaced`
- the list of nodes it would have replaced. Don't remap its parent
nodes as they are expected to be correct already.
"""
self.nodes.pop(replacement, None)
try:
replacement_node = self.node_map.pop(replacement)
except KeyError as err:
raise NodeNotFoundError(
"Unable to remove replacement node %r. It was either never added"
" to the migration graph, or has been removed already."
% (replacement,),
replacement,
) from err
replaced_nodes = set()
replaced_nodes_parents = set()
for key in replaced:
replaced_node = self.node_map.get(key)
if replaced_node:
replaced_nodes.add(replaced_node)
replaced_nodes_parents |= replaced_node.parents
# We're only interested in the latest replaced node, so filter out
# replaced nodes that are parents of other replaced nodes.
replaced_nodes -= replaced_nodes_parents
for child in replacement_node.children:
child.parents.remove(replacement_node)
for replaced_node in replaced_nodes:
replaced_node.add_child(child)
child.add_parent(replaced_node)
for parent in replacement_node.parents:
parent.children.remove(replacement_node)
# NOTE: There is no need to remap parent dependencies as we can
# assume the replaced nodes already have the correct ancestry.
def validate_consistency(self):
"""Ensure there are no dummy nodes remaining in the graph."""
[n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)]
def forwards_plan(self, target):
"""
Given a node, return a list of which previous nodes (dependencies) must
be applied, ending with the node itself. This is the list you would
follow if applying the migrations to a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target])
def backwards_plan(self, target):
"""
Given a node, return a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself. This is the list you
would follow if removing the migrations from a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target], forwards=False)
def iterative_dfs(self, start, forwards=True):
"""Iterative depth-first search for finding dependencies."""
visited = []
visited_set = set()
stack = [(start, False)]
while stack:
node, processed = stack.pop()
if node in visited_set:
pass
elif processed:
visited_set.add(node)
visited.append(node.key)
else:
stack.append((node, True))
stack += [
(n, False)
for n in sorted(node.parents if forwards else node.children)
]
return visited
def root_nodes(self, app=None):
"""
Return all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].parents) and (
not app or app == node[0]
):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Return all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].children) and (
not app or app == node[0]
):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self):
# Algo from GvR:
# https://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for child in self.node_map[top].children:
# Use child.key instead of child to speed up the frequent
# hashing.
node = child.key
if node in stack:
cycle = stack[stack.index(node) :]
raise CircularDependencyError(
", ".join("%s.%s" % n for n in cycle)
)
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return "Graph: %s nodes, %s edges" % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return "<%s: nodes=%s, edges=%s>" % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(
len(node.parents) for node in self.node_map.values()
)
def _generate_plan(self, nodes, at_end):
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan and (at_end or migration not in nodes):
plan.append(migration)
return plan
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, return a complete ProjectState for it.
If at_end is False, return the state before the migration has run.
If nodes is not provided, return the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if not nodes:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = self._generate_plan(nodes, at_end)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
|
42b4050bfac95c47542d7997948d715ce654d79b4bdff98a879655a39bb335d8 | import functools
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.utils import (
COMPILED_REGEX_TYPE,
RegexObject,
resolve_relation,
)
from django.utils.topological_sort import stable_topological_sort
class MigrationAutodetector:
"""
Take a pair of ProjectStates and compare them to see what the first would
need doing to make it match the second (the second usually being the
project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of applicable changes.
Take a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {key: self.deep_deconstruct(value) for key, value in obj.items()}
elif isinstance(obj, functools.partial):
return (
obj.func,
self.deep_deconstruct(obj.args),
self.deep_deconstruct(obj.keywords),
)
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance method,
# avoid treating this as being deconstructible itself - see #22951
return obj
elif hasattr(obj, "deconstruct"):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{key: self.deep_deconstruct(value) for key, value in kwargs.items()},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to. Used for detecting renames (as
the related fields change during renames).
"""
fields_def = []
for name, field in sorted(fields.items()):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
deconstruction[2].pop("to", None)
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Return a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# Then go through that list, order it, and split into migrations to
# resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
self.altered_indexes = {}
self.altered_constraints = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_model_keys = set()
self.old_proxy_keys = set()
self.old_unmanaged_keys = set()
self.new_model_keys = set()
self.new_proxy_keys = set()
self.new_unmanaged_keys = set()
for (app_label, model_name), model_state in self.from_state.models.items():
if not model_state.options.get("managed", True):
self.old_unmanaged_keys.add((app_label, model_name))
elif app_label not in self.from_state.real_apps:
if model_state.options.get("proxy"):
self.old_proxy_keys.add((app_label, model_name))
else:
self.old_model_keys.add((app_label, model_name))
for (app_label, model_name), model_state in self.to_state.models.items():
if not model_state.options.get("managed", True):
self.new_unmanaged_keys.add((app_label, model_name))
elif app_label not in self.from_state.real_apps or (
convert_apps and app_label in convert_apps
):
if model_state.options.get("proxy"):
self.new_proxy_keys.add((app_label, model_name))
else:
self.new_model_keys.add((app_label, model_name))
self.from_state.resolve_fields_and_relations()
self.to_state.resolve_fields_and_relations()
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Create the altered indexes and store them in self.altered_indexes.
# This avoids the same computation in generate_removed_indexes()
# and generate_added_indexes().
self.create_altered_indexes()
self.create_altered_constraints()
# Generate index removal operations before field is removed
self.generate_removed_constraints()
self.generate_removed_indexes()
# Generate field renaming operations.
self.generate_renamed_fields()
# Generate removal of foo together.
self.generate_removed_altered_unique_together()
self.generate_removed_altered_index_together()
# Generate field operations.
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_order_with_respect_to()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_added_indexes()
self.generate_added_constraints()
self.generate_altered_db_table()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists and a list of the fields that used through models
in the old state so dependencies can be made from the through model
deletion to the field that uses it.
"""
self.kept_model_keys = self.old_model_keys & self.new_model_keys
self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys
self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys
self.through_users = {}
self.old_field_keys = {
(app_label, model_name, field_name)
for app_label, model_name in self.kept_model_keys
for field_name in self.from_state.models[
app_label, self.renamed_models.get((app_label, model_name), model_name)
].fields
}
self.new_field_keys = {
(app_label, model_name, field_name)
for app_label, model_name in self.kept_model_keys
for field_name in self.to_state.models[app_label, model_name].fields
}
def _generate_through_model_map(self):
"""Through model map generation."""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields.items():
if hasattr(field, "remote_field") and getattr(
field.remote_field, "through", None
):
through_key = resolve_relation(
field.remote_field.through, app_label, model_name
)
self.through_users[through_key] = (
app_label,
old_model_name,
field_name,
)
@staticmethod
def _resolve_dependency(dependency):
"""
Return the resolved dependency and a boolean denoting whether or not
it was swappable.
"""
if dependency[0] != "__setting__":
return dependency, False
resolved_app_label, resolved_object_name = getattr(
settings, dependency[1]
).split(".")
return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True
def _build_migration_list(self, graph=None):
"""
Chop the lists of operations up into migrations with dependencies on
each other. Do this by going through an app's list of operations until
one is found that has an outgoing dependency that isn't in another
app's migration yet (hasn't been chopped off its list). Then chop off
the operations before it into a migration and move onto the next app.
If the loops completes without doing anything, there's a circular
dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
# Temporarily resolve the swappable dependency to
# prevent circular references. While keeping the
# dependency checks on the resolved model, add the
# swappable dependencies.
original_dep = dep
dep, is_swappable_dep = self._resolve_dependency(dep)
if dep[0] != app_label:
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(
dep[0], []
):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add(
(original_dep[0], original_dep[1])
)
elif dep[0] in self.migrations:
operation_dependencies.add(
(dep[0], self.migrations[dep[0]][-1].name)
)
else:
# If we can't find the other app, we add a
# first/last dependency, but only if we've
# already been through once and checked
# everything.
if chop_mode:
# If the app already exists, we add a
# dependency on the last migration, as
# we don't know which migration
# contains the target field. If it's
# not yet migrated or has no
# migrations, we use __first__.
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(
graph.leaf_nodes(dep[0])[0]
)
else:
operation_dependencies.add(
(dep[0], "__first__")
)
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
del self.generated_operations[app_label][0]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(
"Migration",
(Migration,),
{"operations": [], "dependencies": []},
)
instance = subclass(
"auto_%i" % (len(self.migrations.get(app_label, [])) + 1),
app_label,
)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = (
chopped + self.generated_operations[app_label]
)
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError(
"Cannot resolve operation dependencies: %r"
% self.generated_operations
)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. Reordering may be needed so FKs work
nicely inside the same app.
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
# Resolve intra-app dependencies to handle circular
# references involving a swappable model.
dep = self._resolve_dependency(dep)[0]
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(
ops, dependency_graph
)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for migrations in self.migrations.values():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(
migration.operations, app_label
)
def check_dependency(self, operation, dependency):
"""
Return True if the given operation depends on the given dependency,
False otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel)
and operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel)
and operation.name_lower == dependency[1].lower()
and any(dependency[2] == x for x, y in operation.fields)
) or (
isinstance(operation, operations.AddField)
and operation.model_name_lower == dependency[1].lower()
and operation.name_lower == dependency[2].lower()
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField)
and operation.model_name_lower == dependency[1].lower()
and operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel)
and operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField)
and operation.model_name_lower == dependency[1].lower()
and operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo)
and operation.name_lower == dependency[1].lower()
and (operation.order_with_respect_to or "").lower()
!= dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(
operation,
(operations.AlterUniqueTogether, operations.AlterIndexTogether),
)
and operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency,))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are
# (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Place potential swappable models first in lists of created models (only
real way to solve #22783).
"""
try:
model_state = self.to_state.models[item]
base_names = {
base if isinstance(base, str) else base.__name__
for base in model_state.bases
}
string_version = "%s.%s" % (item[0], item[1])
if (
model_state.options.get("swappable")
or "AbstractUser" in base_names
or "AbstractBaseUser" in base_names
or settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Find any renamed models, generate the operations for them, and remove
the old entry from the model lists. Must be run before other
model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = self.new_model_keys - self.old_model_keys
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = self.old_model_keys - self.new_model_keys
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[
rem_app_label, rem_model_name
]
rem_model_fields_def = self.only_relation_agnostic_fields(
rem_model_state.fields
)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(
rem_model_state, model_state
):
dependencies = []
fields = list(model_state.fields.values()) + [
field.remote_field
for relations in self.to_state.relations[
app_label, model_name
].values()
for field in relations.values()
]
for field in fields:
if field.is_relation:
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
)
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
),
dependencies=dependencies,
)
self.renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel_key = "%s.%s" % (
rem_model_state.app_label,
rem_model_state.name_lower,
)
self.renamed_models_rel[
renamed_models_rel_key
] = "%s.%s" % (
model_state.app_label,
model_state.name_lower,
)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.add((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (these are optimized later, if
possible).
Defer any model options that refer to collections of fields that might
be deferred (e.g. unique_together, index_together).
"""
old_keys = self.old_model_keys | self.old_unmanaged_keys
added_models = self.new_model_keys - old_keys
added_unmanaged_models = self.new_unmanaged_keys - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True),
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
# Gather related fields
related_fields = {}
primary_key_rel = None
for field_name, field in model_state.fields.items():
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field_name] = field
if getattr(field.remote_field, "through", None):
related_fields[field_name] = field
# Are there indexes/unique|index_together to defer?
indexes = model_state.options.pop("indexes")
constraints = model_state.options.pop("constraints")
unique_together = model_state.options.pop("unique_together", None)
index_together = model_state.options.pop("index_together", None)
order_with_respect_to = model_state.options.pop(
"order_with_respect_to", None
)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the removal of base fields if the new model has
# a field with the same name.
old_base_model_state = self.from_state.models.get(
(base_app_label, base_name)
)
new_base_model_state = self.to_state.models.get(
(base_app_label, base_name)
)
if old_base_model_state and new_base_model_state:
removed_base_fields = (
set(old_base_model_state.fields)
.difference(
new_base_model_state.fields,
)
.intersection(model_state.fields)
)
for removed_base_field in removed_base_fields:
dependencies.append(
(base_app_label, base_name, removed_base_field, False)
)
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append(
resolve_relation(
primary_key_rel,
app_label,
model_name,
)
+ (None, True)
)
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[
d
for d in model_state.fields.items()
if d[0] not in related_fields
],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_state.options.get("managed", True):
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
dependencies = self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
],
)
related_dependencies = [
(app_label, model_name, name, True) for name in sorted(related_fields)
]
related_dependencies.append((app_label, model_name, None, True))
for index in indexes:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
dependencies=related_dependencies,
)
for constraint in constraints:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
),
dependencies=related_dependencies,
)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies,
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies,
)
# Fix relationships if the model changed from a proxy model to a
# concrete model.
relations = self.to_state.relations
if (app_label, model_name) in self.old_proxy_keys:
for related_model_key, related_fields in relations[
app_label, model_name
].items():
related_model_state = self.to_state.models[related_model_key]
for related_field_name, related_field in related_fields.items():
self.add_operation(
related_model_state.app_label,
operations.AlterField(
model_name=related_model_state.name,
name=related_field_name,
field=related_field,
),
dependencies=[(app_label, model_name, None, True)],
)
def generate_created_proxies(self):
"""
Make CreateModel statements for proxy models. Use the same statements
as that way there's less code duplication, but for proxy models it's
safe to skip all the pointless field stuff and chuck out an operation.
"""
added = self.new_proxy_keys - self.old_proxy_keys
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (these are optimized later, if
possible).
Also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = self.new_model_keys | self.new_unmanaged_keys
deleted_models = self.old_model_keys - new_keys
deleted_unmanaged_models = self.old_unmanaged_keys - new_keys
all_deleted_models = chain(
sorted(deleted_models), sorted(deleted_unmanaged_models)
)
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
# Gather related fields
related_fields = {}
for field_name, field in model_state.fields.items():
if field.remote_field:
if field.remote_field.model:
related_fields[field_name] = field
if getattr(field.remote_field, "through", None):
related_fields[field_name] = field
# Generate option removal first
unique_together = model_state.options.pop("unique_together", None)
index_together = model_state.options.pop("index_together", None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
),
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
),
)
# Then remove each related field
for name in sorted(related_fields):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
),
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
relations = self.from_state.relations
for (
related_object_app_label,
object_name,
), relation_related_fields in relations[app_label, model_name].items():
for field_name, field in relation_related_fields.items():
dependencies.append(
(related_object_app_label, object_name, field_name, False),
)
if not field.many_to_many:
dependencies.append(
(
related_object_app_label,
object_name,
field_name,
"alter",
),
)
for name in sorted(related_fields):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append(
(through_user[0], through_user[1], through_user[2], False)
)
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""Make DeleteModel options for proxy models."""
deleted = self.old_proxy_keys - self.new_proxy_keys
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""Work out renamed fields."""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(
self.new_field_keys - self.old_field_keys
):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
field = new_model_state.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(
self.old_field_keys - self.new_field_keys
):
if rem_app_label == app_label and rem_model_name == model_name:
old_field = old_model_state.get_field(rem_field_name)
old_field_dec = self.deep_deconstruct(old_field)
if (
field.remote_field
and field.remote_field.model
and "to" in old_field_dec[2]
):
old_rel_to = old_field_dec[2]["to"]
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]["to"] = self.renamed_models_rel[old_rel_to]
old_field.set_attributes_from_name(rem_field_name)
old_db_column = old_field.get_attname_column()[1]
if old_field_dec == field_dec or (
# Was the field renamed and db_column equal to the
# old field's column added?
old_field_dec[0:2] == field_dec[0:2]
and dict(old_field_dec[2], db_column=old_db_column)
== field_dec[2]
):
if self.questioner.ask_rename(
model_name, rem_field_name, field_name, field
):
# A db_column mismatch requires a prior noop
# AlterField for the subsequent RenameField to be a
# noop on attempts at preserving the old name.
if old_field.db_column != field.db_column:
altered_field = field.clone()
altered_field.name = rem_field_name
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=rem_field_name,
field=altered_field,
),
)
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
),
)
self.old_field_keys.remove(
(rem_app_label, rem_model_name, rem_field_name)
)
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[
app_label, model_name, field_name
] = rem_field_name
break
def generate_added_fields(self):
"""Make AddField operations."""
for app_label, model_name, field_name in sorted(
self.new_field_keys - self.old_field_keys
):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.to_state.models[app_label, model_name].get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.remote_field and field.remote_field.model:
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
)
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
time_fields = (models.DateField, models.DateTimeField, models.TimeField)
preserve_default = (
field.null
or field.has_default()
or field.many_to_many
or (field.blank and field.empty_strings_allowed)
or (isinstance(field, time_fields) and field.auto_now)
)
if not preserve_default:
field = field.clone()
if isinstance(field, time_fields) and field.auto_now_add:
field.default = self.questioner.ask_auto_now_add_addition(
field_name, model_name
)
else:
field.default = self.questioner.ask_not_null_addition(
field_name, model_name
)
if (
field.unique
and field.default is not models.NOT_PROVIDED
and callable(field.default)
):
self.questioner.ask_unique_callable_default_addition(field_name, model_name)
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""Make RemoveField operations."""
for app_label, model_name, field_name in sorted(
self.old_field_keys - self.new_field_keys
):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Make AlterField operations, or possibly RemovedField/AddField if alter
isn't possible.
"""
for app_label, model_name, field_name in sorted(
self.old_field_keys & self.new_field_keys
):
# Did the field change?
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_field_name = self.renamed_fields.get(
(app_label, model_name, field_name), field_name
)
old_field = self.from_state.models[app_label, old_model_name].get_field(
old_field_name
)
new_field = self.to_state.models[app_label, model_name].get_field(
field_name
)
dependencies = []
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(
new_field.remote_field, "model", None
):
rename_key = resolve_relation(
new_field.remote_field.model, app_label, model_name
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
# Handle ForeignKey which can only have a single to_field.
remote_field_name = getattr(new_field.remote_field, "field_name", None)
if remote_field_name:
to_field_rename_key = rename_key + (remote_field_name,)
if to_field_rename_key in self.renamed_fields:
# Repoint both model and field name because to_field
# inclusion in ForeignKey.deconstruct() is based on
# both.
new_field.remote_field.model = old_field.remote_field.model
new_field.remote_field.field_name = (
old_field.remote_field.field_name
)
# Handle ForeignObjects which can have multiple from_fields/to_fields.
from_fields = getattr(new_field, "from_fields", None)
if from_fields:
from_rename_key = (app_label, model_name)
new_field.from_fields = tuple(
[
self.renamed_fields.get(
from_rename_key + (from_field,), from_field
)
for from_field in from_fields
]
)
new_field.to_fields = tuple(
[
self.renamed_fields.get(rename_key + (to_field,), to_field)
for to_field in new_field.to_fields
]
)
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
new_field,
self.to_state,
)
)
if hasattr(new_field, "remote_field") and getattr(
new_field.remote_field, "through", None
):
rename_key = resolve_relation(
new_field.remote_field.through, app_label, model_name
)
if rename_key in self.renamed_models:
new_field.remote_field.through = old_field.remote_field.through
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
# If the field was confirmed to be renamed it means that only
# db_column was allowed to change which generate_renamed_fields()
# already accounts for by adding an AlterField operation.
if old_field_dec != new_field_dec and old_field_name == field_name:
both_m2m = old_field.many_to_many and new_field.many_to_many
neither_m2m = not old_field.many_to_many and not new_field.many_to_many
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (
old_field.null
and not new_field.null
and not new_field.has_default()
and not new_field.many_to_many
):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(
field_name, model_name
)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def create_altered_indexes(self):
option_name = operations.AddIndex.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_indexes = old_model_state.options[option_name]
new_indexes = new_model_state.options[option_name]
add_idx = [idx for idx in new_indexes if idx not in old_indexes]
rem_idx = [idx for idx in old_indexes if idx not in new_indexes]
self.altered_indexes.update(
{
(app_label, model_name): {
"added_indexes": add_idx,
"removed_indexes": rem_idx,
}
}
)
def generate_added_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes["added_indexes"]:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
)
def generate_removed_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes["removed_indexes"]:
self.add_operation(
app_label,
operations.RemoveIndex(
model_name=model_name,
name=index.name,
),
)
def create_altered_constraints(self):
option_name = operations.AddConstraint.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_constraints = old_model_state.options[option_name]
new_constraints = new_model_state.options[option_name]
add_constraints = [c for c in new_constraints if c not in old_constraints]
rem_constraints = [c for c in old_constraints if c not in new_constraints]
self.altered_constraints.update(
{
(app_label, model_name): {
"added_constraints": add_constraints,
"removed_constraints": rem_constraints,
}
}
)
def generate_added_constraints(self):
for (
app_label,
model_name,
), alt_constraints in self.altered_constraints.items():
for constraint in alt_constraints["added_constraints"]:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
),
)
def generate_removed_constraints(self):
for (
app_label,
model_name,
), alt_constraints in self.altered_constraints.items():
for constraint in alt_constraints["removed_constraints"]:
self.add_operation(
app_label,
operations.RemoveConstraint(
model_name=model_name,
name=constraint.name,
),
)
@staticmethod
def _get_dependencies_for_foreign_key(app_label, model_name, field, project_state):
remote_field_model = None
if hasattr(field.remote_field, "model"):
remote_field_model = field.remote_field.model
else:
relations = project_state.relations[app_label, model_name]
for (remote_app_label, remote_model_name), fields in relations.items():
if any(
field == related_field.remote_field
for related_field in fields.values()
):
remote_field_model = f"{remote_app_label}.{remote_model_name}"
break
# Account for FKs to swappable models
swappable_setting = getattr(field, "swappable_setting", None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label, dep_object_name = resolve_relation(
remote_field_model,
app_label,
model_name,
)
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None):
through_app_label, through_object_name = resolve_relation(
remote_field_model,
app_label,
model_name,
)
dependencies.append((through_app_label, through_object_name, None, True))
return dependencies
def _get_altered_foo_together_operations(self, option_name):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name)
old_value = (
{
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
}
if old_value
else set()
)
new_value = new_model_state.options.get(option_name)
new_value = set(new_value) if new_value else set()
if old_value != new_value:
dependencies = []
for foo_togethers in new_value:
for field_name in foo_togethers:
field = new_model_state.get_field(field_name)
if field.remote_field and field.remote_field.model:
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
)
yield (
old_value,
new_value,
app_label,
model_name,
dependencies,
)
def _generate_removed_altered_foo_together(self, operation):
for (
old_value,
new_value,
app_label,
model_name,
dependencies,
) in self._get_altered_foo_together_operations(operation.option_name):
removal_value = new_value.intersection(old_value)
if removal_value or old_value:
self.add_operation(
app_label,
operation(
name=model_name, **{operation.option_name: removal_value}
),
dependencies=dependencies,
)
def generate_removed_altered_unique_together(self):
self._generate_removed_altered_foo_together(operations.AlterUniqueTogether)
def generate_removed_altered_index_together(self):
self._generate_removed_altered_foo_together(operations.AlterIndexTogether)
def _generate_altered_foo_together(self, operation):
for (
old_value,
new_value,
app_label,
model_name,
dependencies,
) in self._get_altered_foo_together_operations(operation.option_name):
removal_value = new_value.intersection(old_value)
if new_value != removal_value:
self.add_operation(
app_label,
operation(name=model_name, **{operation.option_name: new_value}),
dependencies=dependencies,
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys, self.kept_unmanaged_keys
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get("db_table")
new_db_table_name = new_model_state.options.get("db_table")
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
),
)
def generate_altered_options(self):
"""
Work out if any non-schema-affecting options have changed and make an
operation to represent them in state changes (in case Python code in
migrations needs them).
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys,
self.kept_unmanaged_keys,
# unmanaged converted to managed
self.old_unmanaged_keys & self.new_model_keys,
# managed converted to unmanaged
self.old_model_keys & self.new_unmanaged_keys,
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = {
key: value
for key, value in old_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
new_options = {
key: value
for key, value in new_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
),
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.options.get(
"order_with_respect_to"
) != new_model_state.options.get("order_with_respect_to"):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append(
(
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
)
)
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get(
"order_with_respect_to"
),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
),
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Take a result from changes() and a MigrationGraph, and fix the names
and dependencies of the changes so they extend the graph from the leaf
nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
new_name_parts = ["%04i" % next_number]
if migration_name:
new_name_parts.append(migration_name)
elif i == 0 and not app_leaf:
new_name_parts.append("initial")
else:
new_name_parts.append(migration.suggest_name()[:100])
new_name = "_".join(new_name_parts)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for migrations in changes.values():
for migration in migrations:
migration.dependencies = [
name_map.get(d, d) for d in migration.dependencies
]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Take changes from arrange_for_graph() and set of app labels, and return
a modified set of changes which trims out as many migrations that are
not in app_labels as possible. Note that some other migrations may
still be present as they may be required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
required_apps.update(
*[app_dependencies.get(app_label, ()) for app_label in required_apps]
)
# Remove all migrations that aren't needed
for app_label in list(changes):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def parse_number(cls, name):
"""
Given a migration name, try to extract a number from the beginning of
it. For a squashed migration such as '0001_squashed_0004…', return the
second number. If no number is found, return None.
"""
if squashed_match := re.search(r".*_squashed_(\d+)", name):
return int(squashed_match[1])
match = re.match(r"^\d+", name)
if match:
return int(match[0])
return None
|
80c9ca9888110bc2319fc7c63b8815bcfe44604994b71908fdc0a6911e4abd96 | from django.db.migrations.utils import get_migration_name_timestamp
from django.db.transaction import atomic
from .exceptions import IrreversibleError
class Migration:
"""
The base class for all migrations.
Migration files will import this from django.db.migrations.Migration
and subclass it as a class called Migration. It will have one or more
of the following attributes:
- operations: A list of Operation instances, probably from
django.db.migrations.operations
- dependencies: A list of tuples of (app_path, migration_name)
- run_before: A list of tuples of (app_path, migration_name)
- replaces: A list of migration_names
Note that all migrations come out of migrations and into the Loader or
Graph as instances, having been initialized with their app label and name.
"""
# Operations to apply during this migration, in order.
operations = []
# Other migrations that should be run before this migration.
# Should be a list of (app, migration_name).
dependencies = []
# Other migrations that should be run after this one (i.e. have
# this migration added to their dependencies). Useful to make third-party
# apps' migrations run after your AUTH_USER replacement, for example.
run_before = []
# Migration names in this app that this migration replaces. If this is
# non-empty, this migration will only be applied if all these migrations
# are not applied.
replaces = []
# Is this an initial migration? Initial migrations are skipped on
# --fake-initial if the table or fields already exist. If None, check if
# the migration has any dependencies to determine if there are dependencies
# to tell if db introspection needs to be done. If True, always perform
# introspection. If False, never perform introspection.
initial = None
# Whether to wrap the whole migration in a transaction. Only has an effect
# on database backends which support transactional DDL.
atomic = True
def __init__(self, name, app_label):
self.name = name
self.app_label = app_label
# Copy dependencies & other attrs as we might mutate them at runtime
self.operations = list(self.__class__.operations)
self.dependencies = list(self.__class__.dependencies)
self.run_before = list(self.__class__.run_before)
self.replaces = list(self.__class__.replaces)
def __eq__(self, other):
return (
isinstance(other, Migration)
and self.name == other.name
and self.app_label == other.app_label
)
def __repr__(self):
return "<Migration %s.%s>" % (self.app_label, self.name)
def __str__(self):
return "%s.%s" % (self.app_label, self.name)
def __hash__(self):
return hash("%s.%s" % (self.app_label, self.name))
def mutate_state(self, project_state, preserve=True):
"""
Take a ProjectState and return a new one with the migration's
operations applied to it. Preserve the original object state by
default and return a mutated state from a copy.
"""
new_state = project_state
if preserve:
new_state = project_state.clone()
for operation in self.operations:
operation.state_forwards(self.app_label, new_state)
return new_state
def apply(self, project_state, schema_editor, collect_sql=False):
"""
Take a project_state representing all migrations prior to this one
and a schema_editor for a live database and apply the migration
in a forwards order.
Return the resulting project state for efficient reuse by following
Migrations.
"""
for operation in self.operations:
# If this operation cannot be represented as SQL, place a comment
# there instead
if collect_sql:
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS "
"SQL:"
)
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
continue
# Save the state before the operation has run
old_state = project_state.clone()
operation.state_forwards(self.app_label, project_state)
# Run the operation
atomic_operation = operation.atomic or (
self.atomic and operation.atomic is not False
)
if not schema_editor.atomic_migration and atomic_operation:
# Force a transaction on a non-transactional-DDL backend or an
# atomic operation inside a non-atomic migration.
with atomic(schema_editor.connection.alias):
operation.database_forwards(
self.app_label, schema_editor, old_state, project_state
)
else:
# Normal behaviour
operation.database_forwards(
self.app_label, schema_editor, old_state, project_state
)
return project_state
def unapply(self, project_state, schema_editor, collect_sql=False):
"""
Take a project_state representing all migrations prior to this one
and a schema_editor for a live database and apply the migration
in a reverse order.
The backwards migration process consists of two phases:
1. The intermediate states from right before the first until right
after the last operation inside this migration are preserved.
2. The operations are applied in reverse order using the states
recorded in step 1.
"""
# Construct all the intermediate states we need for a reverse migration
to_run = []
new_state = project_state
# Phase 1
for operation in self.operations:
# If it's irreversible, error out
if not operation.reversible:
raise IrreversibleError(
"Operation %s in %s is not reversible" % (operation, self)
)
# Preserve new state from previous run to not tamper the same state
# over all operations
new_state = new_state.clone()
old_state = new_state.clone()
operation.state_forwards(self.app_label, new_state)
to_run.insert(0, (operation, old_state, new_state))
# Phase 2
for operation, to_state, from_state in to_run:
if collect_sql:
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS "
"SQL:"
)
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
continue
atomic_operation = operation.atomic or (
self.atomic and operation.atomic is not False
)
if not schema_editor.atomic_migration and atomic_operation:
# Force a transaction on a non-transactional-DDL backend or an
# atomic operation inside a non-atomic migration.
with atomic(schema_editor.connection.alias):
operation.database_backwards(
self.app_label, schema_editor, from_state, to_state
)
else:
# Normal behaviour
operation.database_backwards(
self.app_label, schema_editor, from_state, to_state
)
return project_state
def suggest_name(self):
"""
Suggest a name for the operations this migration might represent. Names
are not guaranteed to be unique, but put some effort into the fallback
name to avoid VCS conflicts if possible.
"""
if self.initial:
return "initial"
raw_fragments = [op.migration_name_fragment for op in self.operations]
fragments = [name for name in raw_fragments if name]
if not fragments or len(fragments) != len(self.operations):
return "auto_%s" % get_migration_name_timestamp()
name = fragments[0]
for fragment in fragments[1:]:
new_name = f"{name}_{fragment}"
if len(new_name) > 52:
name = f"{name}_and_more"
break
name = new_name
return name
class SwappableTuple(tuple):
"""
Subclass of tuple so Django can tell this was originally a swappable
dependency when it reads the migration file.
"""
def __new__(cls, value, setting):
self = tuple.__new__(cls, value)
self.setting = setting
return self
def swappable_dependency(value):
"""Turn a setting value into a dependency."""
return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
|
b9ab3fb5a848f1ee8780d2f835da8b2b539d09782a730af1b3ef39fe375ecdb5 | import datetime
import re
from collections import namedtuple
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
FieldReference = namedtuple("FieldReference", "to through")
COMPILED_REGEX_TYPE = type(re.compile(""))
class RegexObject:
def __init__(self, obj):
self.pattern = obj.pattern
self.flags = obj.flags
def __eq__(self, other):
return self.pattern == other.pattern and self.flags == other.flags
def get_migration_name_timestamp():
return datetime.datetime.now().strftime("%Y%m%d_%H%M")
def resolve_relation(model, app_label=None, model_name=None):
"""
Turn a model class or model reference string and return a model tuple.
app_label and model_name are used to resolve the scope of recursive and
unscoped model relationship.
"""
if isinstance(model, str):
if model == RECURSIVE_RELATIONSHIP_CONSTANT:
if app_label is None or model_name is None:
raise TypeError(
"app_label and model_name must be provided to resolve "
"recursive relationships."
)
return app_label, model_name
if "." in model:
app_label, model_name = model.split(".", 1)
return app_label, model_name.lower()
if app_label is None:
raise TypeError(
"app_label must be provided to resolve unscoped model relationships."
)
return app_label, model.lower()
return model._meta.app_label, model._meta.model_name
def field_references(
model_tuple,
field,
reference_model_tuple,
reference_field_name=None,
reference_field=None,
):
"""
Return either False or a FieldReference if `field` references provided
context.
False positives can be returned if `reference_field_name` is provided
without `reference_field` because of the introspection limitation it
incurs. This should not be an issue when this function is used to determine
whether or not an optimization can take place.
"""
remote_field = field.remote_field
if not remote_field:
return False
references_to = None
references_through = None
if resolve_relation(remote_field.model, *model_tuple) == reference_model_tuple:
to_fields = getattr(field, "to_fields", None)
if (
reference_field_name is None
or
# Unspecified to_field(s).
to_fields is None
or
# Reference to primary key.
(
None in to_fields
and (reference_field is None or reference_field.primary_key)
)
or
# Reference to field.
reference_field_name in to_fields
):
references_to = (remote_field, to_fields)
through = getattr(remote_field, "through", None)
if through and resolve_relation(through, *model_tuple) == reference_model_tuple:
through_fields = remote_field.through_fields
if (
reference_field_name is None
or
# Unspecified through_fields.
through_fields is None
or
# Reference to field.
reference_field_name in through_fields
):
references_through = (remote_field, through_fields)
if not (references_to or references_through):
return False
return FieldReference(references_to, references_through)
def get_references(state, model_tuple, field_tuple=()):
"""
Generator of (model_state, name, field, reference) referencing
provided context.
If field_tuple is provided only references to this particular field of
model_tuple will be generated.
"""
for state_model_tuple, model_state in state.models.items():
for name, field in model_state.fields.items():
reference = field_references(
state_model_tuple, field, model_tuple, *field_tuple
)
if reference:
yield model_state, name, field, reference
def field_is_referenced(state, model_tuple, field_tuple):
"""Return whether `field_tuple` is referenced by any state models."""
return next(get_references(state, model_tuple, field_tuple), None) is not None
|
dfabed8b1f7d0c01675a02906278f81bc550c0d7ce4143f6393b02fda7c03cd3 | from django.apps.registry import Apps
from django.db import DatabaseError, models
from django.utils.functional import classproperty
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder:
"""
Deal with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
_migration_class = None
@classproperty
def Migration(cls):
"""
Lazy load to avoid AppRegistryNotReady if installed apps import
MigrationRecorder.
"""
if cls._migration_class is None:
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
cls._migration_class = Migration
return cls._migration_class
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def has_table(self):
"""Return True if the django_migrations table exists."""
with self.connection.cursor() as cursor:
tables = self.connection.introspection.table_names(cursor)
return self.Migration._meta.db_table in tables
def ensure_schema(self):
"""Ensure the table exists and has the correct schema."""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.has_table():
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing(
"Unable to create the django_migrations table (%s)" % exc
)
def applied_migrations(self):
"""
Return a dict mapping (app_name, migration_name) to Migration instances
for all applied migrations.
"""
if self.has_table():
return {
(migration.app, migration.name): migration
for migration in self.migration_qs
}
else:
# If the django_migrations table doesn't exist, then no migrations
# are applied.
return {}
def record_applied(self, app, name):
"""Record that a migration was applied."""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""Record that a migration was unapplied."""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""Delete all migration records. Useful for testing migrations."""
self.migration_qs.all().delete()
|
734259e451a5b43fe09a1db47b9491e80db5abf0deeab50a7e40092b0997e0ab | class MigrationOptimizer:
"""
Power the optimization process, where you provide a list of Operations
and you are returned a list of equal or shorter length - operations
are merged into one if possible.
For example, a CreateModel and an AddField can be optimized into a
new CreateModel, and CreateModel and DeleteModel can be optimized into
nothing.
"""
def optimize(self, operations, app_label):
"""
Main optimization entry point. Pass in a list of Operation instances,
get out a new list of Operation instances.
Unfortunately, due to the scope of the optimization (two combinable
operations might be separated by several hundred others), this can't be
done as a peephole optimization with checks/output implemented on
the Operations themselves; instead, the optimizer looks at each
individual operation and scans forwards in the list to see if there
are any matches, stopping at boundaries - operations which can't
be optimized over (RunSQL, operations on the same field/model, etc.)
The inner loop is run until the starting list is the same as the result
list, and then the result is returned. This means that operation
optimization must be stable and always return an equal or shorter list.
"""
# Internal tracking variable for test assertions about # of loops
if app_label is None:
raise TypeError("app_label must be a str.")
self._iterations = 0
while True:
result = self.optimize_inner(operations, app_label)
self._iterations += 1
if result == operations:
return result
operations = result
def optimize_inner(self, operations, app_label):
"""Inner optimization loop."""
new_operations = []
for i, operation in enumerate(operations):
right = True # Should we reduce on the right or on the left.
# Compare it to each operation after it
for j, other in enumerate(operations[i + 1 :]):
result = operation.reduce(other, app_label)
if isinstance(result, list):
in_between = operations[i + 1 : i + j + 1]
if right:
new_operations.extend(in_between)
new_operations.extend(result)
elif all(op.reduce(other, app_label) is True for op in in_between):
# Perform a left reduction if all of the in-between
# operations can optimize through other.
new_operations.extend(result)
new_operations.extend(in_between)
else:
# Otherwise keep trying.
new_operations.append(operation)
break
new_operations.extend(operations[i + j + 2 :])
return new_operations
elif not result:
# Can't perform a right reduction.
right = False
else:
new_operations.append(operation)
return new_operations
|
d857eaf2e28fc510cafea1d6fd2926f02323d7f3b5a148a5aa91349e12c58c3a | import builtins
import collections.abc
import datetime
import decimal
import enum
import functools
import math
import os
import pathlib
import re
import types
import uuid
from django.conf import SettingsReference
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject
from django.utils.functional import LazyObject, Promise
from django.utils.timezone import utc
from django.utils.version import get_docs_version
class BaseSerializer:
def __init__(self, value):
self.value = value
def serialize(self):
raise NotImplementedError(
"Subclasses of BaseSerializer must implement the serialize() method."
)
class BaseSequenceSerializer(BaseSerializer):
def _format(self):
raise NotImplementedError(
"Subclasses of BaseSequenceSerializer must implement the _format() method."
)
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
value = self._format()
return value % (", ".join(strings)), imports
class BaseSimpleSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), set()
class ChoicesSerializer(BaseSerializer):
def serialize(self):
return serializer_factory(self.value.value).serialize()
class DateTimeSerializer(BaseSerializer):
"""For datetime.*, except datetime.datetime."""
def serialize(self):
return repr(self.value), {"import datetime"}
class DatetimeDatetimeSerializer(BaseSerializer):
"""For datetime.datetime."""
def serialize(self):
if self.value.tzinfo is not None and self.value.tzinfo != utc:
self.value = self.value.astimezone(utc)
imports = ["import datetime"]
if self.value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return repr(self.value).replace("datetime.timezone.utc", "utc"), set(imports)
class DecimalSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), {"from decimal import Decimal"}
class DeconstructableSerializer(BaseSerializer):
@staticmethod
def serialize_deconstructed(path, args, kwargs):
name, imports = DeconstructableSerializer._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = serializer_factory(arg).serialize()
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in sorted(kwargs.items()):
arg_string, arg_imports = serializer_factory(arg).serialize()
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@staticmethod
def _serialize_path(path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
def serialize(self):
return self.serialize_deconstructed(*self.value.deconstruct())
class DictionarySerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for k, v in sorted(self.value.items()):
k_string, k_imports = serializer_factory(k).serialize()
v_string, v_imports = serializer_factory(v).serialize()
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
class EnumSerializer(BaseSerializer):
def serialize(self):
enum_class = self.value.__class__
module = enum_class.__module__
return (
"%s.%s[%r]" % (module, enum_class.__qualname__, self.value.name),
{"import %s" % module},
)
class FloatSerializer(BaseSimpleSerializer):
def serialize(self):
if math.isnan(self.value) or math.isinf(self.value):
return 'float("{}")'.format(self.value), set()
return super().serialize()
class FrozensetSerializer(BaseSequenceSerializer):
def _format(self):
return "frozenset([%s])"
class FunctionTypeSerializer(BaseSerializer):
def serialize(self):
if getattr(self.value, "__self__", None) and isinstance(
self.value.__self__, type
):
klass = self.value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), {
"import %s" % module
}
# Further error checking
if self.value.__name__ == "<lambda>":
raise ValueError("Cannot serialize function: lambda")
if self.value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % self.value)
module_name = self.value.__module__
if "<" not in self.value.__qualname__: # Qualname can include <locals>
return "%s.%s" % (module_name, self.value.__qualname__), {
"import %s" % self.value.__module__
}
raise ValueError(
"Could not find function %s in %s.\n" % (self.value.__name__, module_name)
)
class FunctoolsPartialSerializer(BaseSerializer):
def serialize(self):
# Serialize functools.partial() arguments
func_string, func_imports = serializer_factory(self.value.func).serialize()
args_string, args_imports = serializer_factory(self.value.args).serialize()
keywords_string, keywords_imports = serializer_factory(
self.value.keywords
).serialize()
# Add any imports needed by arguments
imports = {"import functools", *func_imports, *args_imports, *keywords_imports}
return (
"functools.%s(%s, *%s, **%s)"
% (
self.value.__class__.__name__,
func_string,
args_string,
keywords_string,
),
imports,
)
class IterableSerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
value = "(%s)" if len(strings) != 1 else "(%s,)"
return value % (", ".join(strings)), imports
class ModelFieldSerializer(DeconstructableSerializer):
def serialize(self):
attr_name, path, args, kwargs = self.value.deconstruct()
return self.serialize_deconstructed(path, args, kwargs)
class ModelManagerSerializer(DeconstructableSerializer):
def serialize(self):
as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct()
if as_manager:
name, imports = self._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return self.serialize_deconstructed(manager_path, args, kwargs)
class OperationSerializer(BaseSerializer):
def serialize(self):
from django.db.migrations.writer import OperationWriter
string, imports = OperationWriter(self.value, indentation=0).serialize()
# Nested operation, trailing comma is handled in upper OperationWriter._write()
return string.rstrip(","), imports
class PathLikeSerializer(BaseSerializer):
def serialize(self):
return repr(os.fspath(self.value)), {}
class PathSerializer(BaseSerializer):
def serialize(self):
# Convert concrete paths to pure paths to avoid issues with migrations
# generated on one platform being used on a different platform.
prefix = "Pure" if isinstance(self.value, pathlib.Path) else ""
return "pathlib.%s%r" % (prefix, self.value), {"import pathlib"}
class RegexSerializer(BaseSerializer):
def serialize(self):
regex_pattern, pattern_imports = serializer_factory(
self.value.pattern
).serialize()
# Turn off default implicit flags (e.g. re.U) because regexes with the
# same implicit and explicit flags aren't equal.
flags = self.value.flags ^ re.compile("").flags
regex_flags, flag_imports = serializer_factory(flags).serialize()
imports = {"import re", *pattern_imports, *flag_imports}
args = [regex_pattern]
if flags:
args.append(regex_flags)
return "re.compile(%s)" % ", ".join(args), imports
class SequenceSerializer(BaseSequenceSerializer):
def _format(self):
return "[%s]"
class SetSerializer(BaseSequenceSerializer):
def _format(self):
# Serialize as a set literal except when value is empty because {}
# is an empty dict.
return "{%s}" if self.value else "set(%s)"
class SettingsReferenceSerializer(BaseSerializer):
def serialize(self):
return "settings.%s" % self.value.setting_name, {
"from django.conf import settings"
}
class TupleSerializer(BaseSequenceSerializer):
def _format(self):
# When len(value)==0, the empty tuple should be serialized as "()",
# not "(,)" because (,) is invalid Python syntax.
return "(%s)" if len(self.value) != 1 else "(%s,)"
class TypeSerializer(BaseSerializer):
def serialize(self):
special_cases = [
(models.Model, "models.Model", ["from django.db import models"]),
(type(None), "type(None)", []),
]
for case, string, imports in special_cases:
if case is self.value:
return string, set(imports)
if hasattr(self.value, "__module__"):
module = self.value.__module__
if module == builtins.__name__:
return self.value.__name__, set()
else:
return "%s.%s" % (module, self.value.__qualname__), {
"import %s" % module
}
class UUIDSerializer(BaseSerializer):
def serialize(self):
return "uuid.%s" % repr(self.value), {"import uuid"}
class Serializer:
_registry = {
# Some of these are order-dependent.
frozenset: FrozensetSerializer,
list: SequenceSerializer,
set: SetSerializer,
tuple: TupleSerializer,
dict: DictionarySerializer,
models.Choices: ChoicesSerializer,
enum.Enum: EnumSerializer,
datetime.datetime: DatetimeDatetimeSerializer,
(datetime.date, datetime.timedelta, datetime.time): DateTimeSerializer,
SettingsReference: SettingsReferenceSerializer,
float: FloatSerializer,
(bool, int, type(None), bytes, str, range): BaseSimpleSerializer,
decimal.Decimal: DecimalSerializer,
(functools.partial, functools.partialmethod): FunctoolsPartialSerializer,
(
types.FunctionType,
types.BuiltinFunctionType,
types.MethodType,
): FunctionTypeSerializer,
collections.abc.Iterable: IterableSerializer,
(COMPILED_REGEX_TYPE, RegexObject): RegexSerializer,
uuid.UUID: UUIDSerializer,
pathlib.PurePath: PathSerializer,
os.PathLike: PathLikeSerializer,
}
@classmethod
def register(cls, type_, serializer):
if not issubclass(serializer, BaseSerializer):
raise ValueError(
"'%s' must inherit from 'BaseSerializer'." % serializer.__name__
)
cls._registry[type_] = serializer
@classmethod
def unregister(cls, type_):
cls._registry.pop(type_)
def serializer_factory(value):
if isinstance(value, Promise):
value = str(value)
elif isinstance(value, LazyObject):
# The unwrapped value is returned as the first item of the arguments
# tuple.
value = value.__reduce__()[1][0]
if isinstance(value, models.Field):
return ModelFieldSerializer(value)
if isinstance(value, models.manager.BaseManager):
return ModelManagerSerializer(value)
if isinstance(value, Operation):
return OperationSerializer(value)
if isinstance(value, type):
return TypeSerializer(value)
# Anything that knows how to deconstruct itself.
if hasattr(value, "deconstruct"):
return DeconstructableSerializer(value)
for type_, serializer_cls in Serializer._registry.items():
if isinstance(value, type_):
return serializer_cls(value)
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
|
c7eb16528f8b7fc5c878fa2bc0ba1dc0ee3d282c5432747ea55cfc92ac51dea0 | """
Classes to represent the definitions of aggregate functions.
"""
from django.core.exceptions import FieldError
from django.db.models.expressions import Case, Func, Star, When
from django.db.models.fields import IntegerField
from django.db.models.functions.comparison import Coalesce
from django.db.models.functions.mixins import (
FixDurationInputMixin,
NumericOutputFieldMixin,
)
__all__ = [
"Aggregate",
"Avg",
"Count",
"Max",
"Min",
"StdDev",
"Sum",
"Variance",
]
class Aggregate(Func):
template = "%(function)s(%(distinct)s%(expressions)s)"
contains_aggregate = True
name = None
filter_template = "%s FILTER (WHERE %%(filter)s)"
window_compatible = True
allow_distinct = False
empty_result_set_value = None
def __init__(
self, *expressions, distinct=False, filter=None, default=None, **extra
):
if distinct and not self.allow_distinct:
raise TypeError("%s does not allow distinct." % self.__class__.__name__)
if default is not None and self.empty_result_set_value is not None:
raise TypeError(f"{self.__class__.__name__} does not allow default.")
self.distinct = distinct
self.filter = filter
self.default = default
super().__init__(*expressions, **extra)
def get_source_fields(self):
# Don't return the filter expression since it's not a source field.
return [e._output_field_or_none for e in super().get_source_expressions()]
def get_source_expressions(self):
source_expressions = super().get_source_expressions()
if self.filter:
return source_expressions + [self.filter]
return source_expressions
def set_source_expressions(self, exprs):
self.filter = self.filter and exprs.pop()
return super().set_source_expressions(exprs)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# Aggregates are not allowed in UPDATE queries, so ignore for_save
c = super().resolve_expression(query, allow_joins, reuse, summarize)
c.filter = c.filter and c.filter.resolve_expression(
query, allow_joins, reuse, summarize
)
if not summarize:
# Call Aggregate.get_source_expressions() to avoid
# returning self.filter and including that in this loop.
expressions = super(Aggregate, c).get_source_expressions()
for index, expr in enumerate(expressions):
if expr.contains_aggregate:
before_resolved = self.get_source_expressions()[index]
name = (
before_resolved.name
if hasattr(before_resolved, "name")
else repr(before_resolved)
)
raise FieldError(
"Cannot compute %s('%s'): '%s' is an aggregate"
% (c.name, name, name)
)
if (default := c.default) is None:
return c
if hasattr(default, "resolve_expression"):
default = default.resolve_expression(query, allow_joins, reuse, summarize)
c.default = None # Reset the default argument before wrapping.
coalesce = Coalesce(c, default, output_field=c._output_field_or_none)
coalesce.is_summary = c.is_summary
return coalesce
@property
def default_alias(self):
expressions = self.get_source_expressions()
if len(expressions) == 1 and hasattr(expressions[0], "name"):
return "%s__%s" % (expressions[0].name, self.name.lower())
raise TypeError("Complex expressions require an alias")
def get_group_by_cols(self, alias=None):
return []
def as_sql(self, compiler, connection, **extra_context):
extra_context["distinct"] = "DISTINCT " if self.distinct else ""
if self.filter:
if connection.features.supports_aggregate_filter_clause:
filter_sql, filter_params = self.filter.as_sql(compiler, connection)
template = self.filter_template % extra_context.get(
"template", self.template
)
sql, params = super().as_sql(
compiler,
connection,
template=template,
filter=filter_sql,
**extra_context,
)
return sql, (*params, *filter_params)
else:
copy = self.copy()
copy.filter = None
source_expressions = copy.get_source_expressions()
condition = When(self.filter, then=source_expressions[0])
copy.set_source_expressions([Case(condition)] + source_expressions[1:])
return super(Aggregate, copy).as_sql(
compiler, connection, **extra_context
)
return super().as_sql(compiler, connection, **extra_context)
def _get_repr_options(self):
options = super()._get_repr_options()
if self.distinct:
options["distinct"] = self.distinct
if self.filter:
options["filter"] = self.filter
return options
class Avg(FixDurationInputMixin, NumericOutputFieldMixin, Aggregate):
function = "AVG"
name = "Avg"
allow_distinct = True
class Count(Aggregate):
function = "COUNT"
name = "Count"
output_field = IntegerField()
allow_distinct = True
empty_result_set_value = 0
def __init__(self, expression, filter=None, **extra):
if expression == "*":
expression = Star()
if isinstance(expression, Star) and filter is not None:
raise ValueError("Star cannot be used with filter. Please specify a field.")
super().__init__(expression, filter=filter, **extra)
class Max(Aggregate):
function = "MAX"
name = "Max"
class Min(Aggregate):
function = "MIN"
name = "Min"
class StdDev(NumericOutputFieldMixin, Aggregate):
name = "StdDev"
def __init__(self, expression, sample=False, **extra):
self.function = "STDDEV_SAMP" if sample else "STDDEV_POP"
super().__init__(expression, **extra)
def _get_repr_options(self):
return {**super()._get_repr_options(), "sample": self.function == "STDDEV_SAMP"}
class Sum(FixDurationInputMixin, Aggregate):
function = "SUM"
name = "Sum"
allow_distinct = True
class Variance(NumericOutputFieldMixin, Aggregate):
name = "Variance"
def __init__(self, expression, sample=False, **extra):
self.function = "VAR_SAMP" if sample else "VAR_POP"
super().__init__(expression, **extra)
def _get_repr_options(self):
return {**super()._get_repr_options(), "sample": self.function == "VAR_SAMP"}
|
e24515b9b5431a9859036cb945592e75e3a69d9bc4e2201c1094f9cda296f9cf | from django.db.backends.utils import names_digest, split_identifier
from django.db.models.expressions import Col, ExpressionList, F, Func, OrderBy
from django.db.models.functions import Collate
from django.db.models.query_utils import Q
from django.db.models.sql import Query
from django.utils.functional import partition
__all__ = ["Index"]
class Index:
suffix = "idx"
# The max length of the name of the index (restricted to 30 for
# cross-database compatibility with Oracle)
max_name_length = 30
def __init__(
self,
*expressions,
fields=(),
name=None,
db_tablespace=None,
opclasses=(),
condition=None,
include=None,
):
if opclasses and not name:
raise ValueError("An index must be named to use opclasses.")
if not isinstance(condition, (type(None), Q)):
raise ValueError("Index.condition must be a Q instance.")
if condition and not name:
raise ValueError("An index must be named to use condition.")
if not isinstance(fields, (list, tuple)):
raise ValueError("Index.fields must be a list or tuple.")
if not isinstance(opclasses, (list, tuple)):
raise ValueError("Index.opclasses must be a list or tuple.")
if not expressions and not fields:
raise ValueError(
"At least one field or expression is required to define an index."
)
if expressions and fields:
raise ValueError(
"Index.fields and expressions are mutually exclusive.",
)
if expressions and not name:
raise ValueError("An index must be named to use expressions.")
if expressions and opclasses:
raise ValueError(
"Index.opclasses cannot be used with expressions. Use "
"django.contrib.postgres.indexes.OpClass() instead."
)
if opclasses and len(fields) != len(opclasses):
raise ValueError(
"Index.fields and Index.opclasses must have the same number of "
"elements."
)
if fields and not all(isinstance(field, str) for field in fields):
raise ValueError("Index.fields must contain only strings with field names.")
if include and not name:
raise ValueError("A covering index must be named.")
if not isinstance(include, (type(None), list, tuple)):
raise ValueError("Index.include must be a list or tuple.")
self.fields = list(fields)
# A list of 2-tuple with the field name and ordering ('' or 'DESC').
self.fields_orders = [
(field_name[1:], "DESC") if field_name.startswith("-") else (field_name, "")
for field_name in self.fields
]
self.name = name or ""
self.db_tablespace = db_tablespace
self.opclasses = opclasses
self.condition = condition
self.include = tuple(include) if include else ()
self.expressions = tuple(
F(expression) if isinstance(expression, str) else expression
for expression in expressions
)
@property
def contains_expressions(self):
return bool(self.expressions)
def _get_condition_sql(self, model, schema_editor):
if self.condition is None:
return None
query = Query(model=model, alias_cols=False)
where = query.build_where(self.condition)
compiler = query.get_compiler(connection=schema_editor.connection)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def create_sql(self, model, schema_editor, using="", **kwargs):
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
condition = self._get_condition_sql(model, schema_editor)
if self.expressions:
index_expressions = []
for expression in self.expressions:
index_expression = IndexExpression(expression)
index_expression.set_wrapper_classes(schema_editor.connection)
index_expressions.append(index_expression)
expressions = ExpressionList(*index_expressions).resolve_expression(
Query(model, alias_cols=False),
)
fields = None
col_suffixes = None
else:
fields = [
model._meta.get_field(field_name)
for field_name, _ in self.fields_orders
]
col_suffixes = [order[1] for order in self.fields_orders]
expressions = None
return schema_editor._create_index_sql(
model,
fields=fields,
name=self.name,
using=using,
db_tablespace=self.db_tablespace,
col_suffixes=col_suffixes,
opclasses=self.opclasses,
condition=condition,
include=include,
expressions=expressions,
**kwargs,
)
def remove_sql(self, model, schema_editor, **kwargs):
return schema_editor._delete_index_sql(model, self.name, **kwargs)
def deconstruct(self):
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
path = path.replace("django.db.models.indexes", "django.db.models")
kwargs = {"name": self.name}
if self.fields:
kwargs["fields"] = self.fields
if self.db_tablespace is not None:
kwargs["db_tablespace"] = self.db_tablespace
if self.opclasses:
kwargs["opclasses"] = self.opclasses
if self.condition:
kwargs["condition"] = self.condition
if self.include:
kwargs["include"] = self.include
return (path, self.expressions, kwargs)
def clone(self):
"""Create a copy of this Index."""
_, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def set_name_with_model(self, model):
"""
Generate a unique name for the index.
The name is divided into 3 parts - table name (12 chars), field name
(8 chars) and unique hash + suffix (10 chars). Each part is made to
fit its size by truncating the excess length.
"""
_, table_name = split_identifier(model._meta.db_table)
column_names = [
model._meta.get_field(field_name).column
for field_name, order in self.fields_orders
]
column_names_with_order = [
(("-%s" if order else "%s") % column_name)
for column_name, (field_name, order) in zip(
column_names, self.fields_orders
)
]
# The length of the parts of the name is based on the default max
# length of 30 characters.
hash_data = [table_name] + column_names_with_order + [self.suffix]
self.name = "%s_%s_%s" % (
table_name[:11],
column_names[0][:7],
"%s_%s" % (names_digest(*hash_data, length=6), self.suffix),
)
if len(self.name) > self.max_name_length:
raise ValueError(
"Index too long for multiple database support. Is self.suffix "
"longer than 3 characters?"
)
if self.name[0] == "_" or self.name[0].isdigit():
self.name = "D%s" % self.name[1:]
def __repr__(self):
return "<%s:%s%s%s%s%s%s%s>" % (
self.__class__.__qualname__,
"" if not self.fields else " fields=%s" % repr(self.fields),
"" if not self.expressions else " expressions=%s" % repr(self.expressions),
"" if not self.name else " name=%s" % repr(self.name),
""
if self.db_tablespace is None
else " db_tablespace=%s" % repr(self.db_tablespace),
"" if self.condition is None else " condition=%s" % self.condition,
"" if not self.include else " include=%s" % repr(self.include),
"" if not self.opclasses else " opclasses=%s" % repr(self.opclasses),
)
def __eq__(self, other):
if self.__class__ == other.__class__:
return self.deconstruct() == other.deconstruct()
return NotImplemented
class IndexExpression(Func):
"""Order and wrap expressions for CREATE INDEX statements."""
template = "%(expressions)s"
wrapper_classes = (OrderBy, Collate)
def set_wrapper_classes(self, connection=None):
# Some databases (e.g. MySQL) treats COLLATE as an indexed expression.
if connection and connection.features.collate_as_index_expression:
self.wrapper_classes = tuple(
[
wrapper_cls
for wrapper_cls in self.wrapper_classes
if wrapper_cls is not Collate
]
)
@classmethod
def register_wrappers(cls, *wrapper_classes):
cls.wrapper_classes = wrapper_classes
def resolve_expression(
self,
query=None,
allow_joins=True,
reuse=None,
summarize=False,
for_save=False,
):
expressions = list(self.flatten())
# Split expressions and wrappers.
index_expressions, wrappers = partition(
lambda e: isinstance(e, self.wrapper_classes),
expressions,
)
wrapper_types = [type(wrapper) for wrapper in wrappers]
if len(wrapper_types) != len(set(wrapper_types)):
raise ValueError(
"Multiple references to %s can't be used in an indexed "
"expression."
% ", ".join(
[wrapper_cls.__qualname__ for wrapper_cls in self.wrapper_classes]
)
)
if expressions[1 : len(wrappers) + 1] != wrappers:
raise ValueError(
"%s must be topmost expressions in an indexed expression."
% ", ".join(
[wrapper_cls.__qualname__ for wrapper_cls in self.wrapper_classes]
)
)
# Wrap expressions in parentheses if they are not column references.
root_expression = index_expressions[1]
resolve_root_expression = root_expression.resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
if not isinstance(resolve_root_expression, Col):
root_expression = Func(root_expression, template="(%(expressions)s)")
if wrappers:
# Order wrappers and set their expressions.
wrappers = sorted(
wrappers,
key=lambda w: self.wrapper_classes.index(type(w)),
)
wrappers = [wrapper.copy() for wrapper in wrappers]
for i, wrapper in enumerate(wrappers[:-1]):
wrapper.set_source_expressions([wrappers[i + 1]])
# Set the root expression on the deepest wrapper.
wrappers[-1].set_source_expressions([root_expression])
self.set_source_expressions([wrappers[0]])
else:
# Use the root expression, if there are no wrappers.
self.set_source_expressions([root_expression])
return super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
def as_sqlite(self, compiler, connection, **extra_context):
# Casting to numeric is unnecessary.
return self.as_sql(compiler, connection, **extra_context)
|
49d9ad83daa6409c700a5f303f211e518e39dfd7f639f02391b07ced8c63ee7c | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import operator
import warnings
from itertools import chain, islice
import django
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY,
IntegrityError,
NotSupportedError,
connections,
router,
transaction,
)
from django.db.models import AutoField, DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP, OnConflict
from django.db.models.deletion import Collector
from django.db.models.expressions import Case, F, Ref, Value, When
from django.db.models.functions import Cast, Trunc
from django.db.models.query_utils import FilteredRelation, Q
from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
from django.db.models.utils import create_namedtuple_class, resolve_callables
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property, partition
# The maximum number of results to fetch in a get() query.
MAX_GET_RESULTS = 21
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
class BaseIterable:
def __init__(
self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
self.chunk_size = chunk_size
class ModelIterable(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
)
select, klass_info, annotation_col_map = (
compiler.select,
compiler.klass_info,
compiler.annotation_col_map,
)
model_cls = klass_info["model"]
select_fields = klass_info["select_fields"]
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [
f[0].target.attname for f in select[model_fields_start:model_fields_end]
]
related_populators = get_related_populators(klass_info, select, db)
known_related_objects = [
(
field,
related_objs,
operator.attrgetter(
*[
field.attname
if from_field == "self"
else queryset.model._meta.get_field(from_field).attname
for from_field in field.from_fields
]
),
)
for field, related_objs in queryset._known_related_objects.items()
]
for row in compiler.results_iter(results):
obj = model_cls.from_db(
db, init_list, row[model_fields_start:model_fields_end]
)
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model.
for field, rel_objs, rel_getter in known_related_objects:
# Avoid overwriting objects loaded by, e.g., select_related().
if field.is_cached(obj):
continue
rel_obj_id = rel_getter(obj)
try:
rel_obj = rel_objs[rel_obj_id]
except KeyError:
pass # May happen in qs1 | qs2 scenarios.
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
indexes = range(len(names))
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield {names[i]: row[i] for i in indexes}
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if queryset._fields:
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
fields = [
*queryset._fields,
*(f for f in query.annotation_select if f not in queryset._fields),
]
if fields != names:
# Reorder according to fields.
index_map = {name: idx for idx, name in enumerate(names)}
rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
return map(
rowfactory,
compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
),
)
return compiler.results_iter(
tuple_expected=True,
chunked_fetch=self.chunked_fetch,
chunk_size=self.chunk_size,
)
class NamedValuesListIterable(ValuesListIterable):
"""
Iterable returned by QuerySet.values_list(named=True) that yields a
namedtuple for each row.
"""
def __iter__(self):
queryset = self.queryset
if queryset._fields:
names = queryset._fields
else:
query = queryset.query
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
tuple_class = create_namedtuple_class(*names)
new = tuple.__new__
for row in super().__iter__():
yield new(tuple_class, row)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield row[0]
class QuerySet:
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self._query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
self._defer_next_filter = False
self._deferred_filter = None
@property
def query(self):
if self._deferred_filter:
negate, args, kwargs = self._deferred_filter
self._filter_or_exclude_inplace(negate, args, kwargs)
self._deferred_filter = None
return self._query
@query.setter
def query(self, value):
if value.values_select:
self._iterable_class = ValuesIterable
self._query = value
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == "_result_cache":
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__}
def __setstate__(self, state):
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
if pickled_version != django.__version__:
warnings.warn(
"Pickled queryset instance's Django version %s does not "
"match the current version %s."
% (pickled_version, django.__version__),
RuntimeWarning,
stacklevel=2,
)
else:
warnings.warn(
"Pickled queryset instance's Django version is not specified.",
RuntimeWarning,
stacklevel=2,
)
self.__dict__.update(state)
def __repr__(self):
data = list(self[: REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return "<%s %r>" % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError(
"QuerySet indices must be integers or slices, not %s."
% type(k).__name__
)
if (isinstance(k, int) and k < 0) or (
isinstance(k, slice)
and (
(k.start is not None and k.start < 0)
or (k.stop is not None and k.stop < 0)
)
):
raise ValueError("Negative indexing is not supported.")
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[:: k.step] if k.step else qs
qs = self._chain()
qs.query.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0]
def __class_getitem__(cls, *args, **kwargs):
return cls
def __and__(self, other):
self._check_operator_queryset(other, "&")
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._chain()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._check_operator_queryset(other, "|")
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = (
self
if self.query.can_filter()
else self.model._base_manager.filter(pk__in=self.values("pk"))
)
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values("pk"))
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def _iterator(self, use_chunked_fetch, chunk_size):
iterable = self._iterable_class(
self,
chunked_fetch=use_chunked_fetch,
chunk_size=chunk_size or 2000,
)
if not self._prefetch_related_lookups or chunk_size is None:
yield from iterable
return
iterator = iter(iterable)
while results := list(islice(iterator, chunk_size)):
prefetch_related_objects(results, *self._prefetch_related_lookups)
yield from results
def iterator(self, chunk_size=None):
"""
An iterator over the results from applying this QuerySet to the
database. chunk_size must be provided for QuerySets that prefetch
related objects. Otherwise, a default chunk_size of 2000 is supplied.
"""
if chunk_size is None:
if self._prefetch_related_lookups:
# When the deprecation ends, replace with:
# raise ValueError(
# 'chunk_size must be provided when using '
# 'QuerySet.iterator() after prefetch_related().'
# )
warnings.warn(
"Using QuerySet.iterator() after prefetch_related() "
"without specifying chunk_size is deprecated.",
category=RemovedInDjango50Warning,
stacklevel=2,
)
elif chunk_size <= 0:
raise ValueError("Chunk size must be strictly positive.")
use_chunked_fetch = not connections[self.db].settings_dict.get(
"DISABLE_SERVER_SIDE_CURSORS"
)
return self._iterator(use_chunked_fetch, chunk_size)
def aggregate(self, *args, **kwargs):
"""
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions(
(*args, *kwargs.values()), method_name="aggregate"
)
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.chain()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
annotation = query.annotations[alias]
if not annotation.contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
for expr in annotation.get_source_expressions():
if (
expr.contains_aggregate
and isinstance(expr, Ref)
and expr.refs in kwargs
):
name = expr.refs
raise exceptions.FieldError(
"Cannot compute %s('%s'): '%s' is an aggregate"
% (annotation.name, name, name)
)
return query.get_aggregation(self.db, kwargs)
def count(self):
"""
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Perform the query and return a single object matching the given
keyword arguments.
"""
if self.query.combinator and (args or kwargs):
raise NotSupportedError(
"Calling QuerySet.get(...) with filters after %s() is not "
"supported." % self.query.combinator
)
clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
limit = None
if (
not clone.query.select_for_update
or connections[clone.db].features.supports_select_for_update_with_limit
):
limit = MAX_GET_RESULTS
clone.query.set_limits(high=limit)
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." % self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!"
% (
self.model._meta.object_name,
num if not limit or num < limit else "more than %s" % (limit - 1),
)
)
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _prepare_for_bulk_create(self, objs):
for obj in objs:
if obj.pk is None:
# Populate new PK values.
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
obj._prepare_related_fields_for_save(operation_name="bulk_create")
def _check_bulk_create_options(
self, ignore_conflicts, update_conflicts, update_fields, unique_fields
):
if ignore_conflicts and update_conflicts:
raise ValueError(
"ignore_conflicts and update_conflicts are mutually exclusive."
)
db_features = connections[self.db].features
if ignore_conflicts:
if not db_features.supports_ignore_conflicts:
raise NotSupportedError(
"This database backend does not support ignoring conflicts."
)
return OnConflict.IGNORE
elif update_conflicts:
if not db_features.supports_update_conflicts:
raise NotSupportedError(
"This database backend does not support updating conflicts."
)
if not update_fields:
raise ValueError(
"Fields that will be updated when a row insertion fails "
"on conflicts must be provided."
)
if unique_fields and not db_features.supports_update_conflicts_with_target:
raise NotSupportedError(
"This database backend does not support updating "
"conflicts with specifying unique fields that can trigger "
"the upsert."
)
if not unique_fields and db_features.supports_update_conflicts_with_target:
raise ValueError(
"Unique fields that can trigger the upsert must be provided."
)
# Updating primary keys and non-concrete fields is forbidden.
update_fields = [self.model._meta.get_field(name) for name in update_fields]
if any(not f.concrete or f.many_to_many for f in update_fields):
raise ValueError(
"bulk_create() can only be used with concrete fields in "
"update_fields."
)
if any(f.primary_key for f in update_fields):
raise ValueError(
"bulk_create() cannot be used with primary keys in "
"update_fields."
)
if unique_fields:
# Primary key is allowed in unique_fields.
unique_fields = [
self.model._meta.get_field(name)
for name in unique_fields
if name != "pk"
]
if any(not f.concrete or f.many_to_many for f in unique_fields):
raise ValueError(
"bulk_create() can only be used with concrete fields "
"in unique_fields."
)
return OnConflict.UPDATE
return None
def bulk_create(
self,
objs,
batch_size=None,
ignore_conflicts=False,
update_conflicts=False,
update_fields=None,
unique_fields=None,
):
"""
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if features.can_return_rows_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_rows_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
if batch_size is not None and batch_size <= 0:
raise ValueError("Batch size must be a positive integer.")
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
on_conflict = self._check_bulk_create_options(
ignore_conflicts,
update_conflicts,
update_fields,
unique_fields,
)
self._for_write = True
opts = self.model._meta
fields = opts.concrete_fields
objs = list(objs)
self._prepare_for_bulk_create(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
returned_columns = self._batched_insert(
objs_with_pk,
fields,
batch_size,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
for obj_with_pk, results in zip(objs_with_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
if field != opts.pk:
setattr(obj_with_pk, field.attname, result)
for obj_with_pk in objs_with_pk:
obj_with_pk._state.adding = False
obj_with_pk._state.db = self.db
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
returned_columns = self._batched_insert(
objs_without_pk,
fields,
batch_size,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
connection = connections[self.db]
if (
connection.features.can_return_rows_from_bulk_insert
and on_conflict is None
):
assert len(returned_columns) == len(objs_without_pk)
for obj_without_pk, results in zip(objs_without_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
setattr(obj_without_pk, field.attname, result)
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def bulk_update(self, objs, fields, batch_size=None):
"""
Update the given fields in each of the given objects in the database.
"""
if batch_size is not None and batch_size < 0:
raise ValueError("Batch size must be a positive integer.")
if not fields:
raise ValueError("Field names must be given to bulk_update().")
objs = tuple(objs)
if any(obj.pk is None for obj in objs):
raise ValueError("All bulk_update() objects must have a primary key set.")
fields = [self.model._meta.get_field(name) for name in fields]
if any(not f.concrete or f.many_to_many for f in fields):
raise ValueError("bulk_update() can only be used with concrete fields.")
if any(f.primary_key for f in fields):
raise ValueError("bulk_update() cannot be used with primary key fields.")
if not objs:
return 0
for obj in objs:
obj._prepare_related_fields_for_save(
operation_name="bulk_update", fields=fields
)
# PK is used twice in the resulting update query, once in the filter
# and once in the WHEN. Each field will also have one CAST.
self._for_write = True
connection = connections[self.db]
max_batch_size = connection.ops.bulk_batch_size(["pk", "pk"] + fields, objs)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
requires_casting = connection.features.requires_casted_case_in_updates
batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size))
updates = []
for batch_objs in batches:
update_kwargs = {}
for field in fields:
when_statements = []
for obj in batch_objs:
attr = getattr(obj, field.attname)
if not hasattr(attr, "resolve_expression"):
attr = Value(attr, output_field=field)
when_statements.append(When(pk=obj.pk, then=attr))
case_statement = Case(*when_statements, output_field=field)
if requires_casting:
case_statement = Cast(case_statement, output_field=field)
update_kwargs[field.attname] = case_statement
updates.append(([obj.pk for obj in batch_objs], update_kwargs))
rows_updated = 0
queryset = self.using(self.db)
with transaction.atomic(using=self.db, savepoint=False):
for pks, update_kwargs in updates:
rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs)
return rows_updated
bulk_update.alters_data = True
def get_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
# Try to create an object using passed params.
try:
with transaction.atomic(using=self.db):
params = dict(resolve_callables(params))
return self.create(**params), True
except IntegrityError:
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
pass
raise
def update_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
self._for_write = True
with transaction.atomic(using=self.db):
# Lock the row so that a concurrent update is blocked until
# update_or_create() has performed its save.
obj, created = self.select_for_update().get_or_create(defaults, **kwargs)
if created:
return obj, created
for k, v in resolve_callables(defaults):
setattr(obj, k, v)
obj.save(using=self.db)
return obj, False
def _extract_model_params(self, defaults, **kwargs):
"""
Prepare `params` for creating a model instance based on the given
kwargs; for use by get_or_create().
"""
defaults = defaults or {}
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
property_names = self.model._meta._property_names
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
# It's okay to use a model's property if it has a setter.
if not (param in property_names and getattr(self.model, param).fset):
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'."
% (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
)
)
return params
def _earliest(self, *fields):
"""
Return the earliest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if fields:
order_by = fields
else:
order_by = getattr(self.model._meta, "get_latest_by")
if order_by and not isinstance(order_by, (tuple, list)):
order_by = (order_by,)
if order_by is None:
raise ValueError(
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta."
)
obj = self._chain()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force=True)
obj.query.add_ordering(*order_by)
return obj.get()
def earliest(self, *fields):
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
return self._earliest(*fields)
def latest(self, *fields):
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
return self.reverse()._earliest(*fields)
def first(self):
"""Return the first object of a query or None if no match is found."""
for obj in (self if self.ordered else self.order_by("pk"))[:1]:
return obj
def last(self):
"""Return the last object of a query or None if no match is found."""
for obj in (self.reverse() if self.ordered else self.order_by("-pk"))[:1]:
return obj
def in_bulk(self, id_list=None, *, field_name="pk"):
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
if self.query.is_sliced:
raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().")
opts = self.model._meta
unique_fields = [
constraint.fields[0]
for constraint in opts.total_unique_constraints
if len(constraint.fields) == 1
]
if (
field_name != "pk"
and not opts.get_field(field_name).unique
and field_name not in unique_fields
and self.query.distinct_fields != (field_name,)
):
raise ValueError(
"in_bulk()'s field_name must be a unique field but %r isn't."
% field_name
)
if id_list is not None:
if not id_list:
return {}
filter_key = "{}__in".format(field_name)
batch_size = connections[self.db].features.max_query_params
id_list = tuple(id_list)
# If the database has a limit on the number of query parameters
# (e.g. SQLite), retrieve objects in batches if necessary.
if batch_size and batch_size < len(id_list):
qs = ()
for offset in range(0, len(id_list), batch_size):
batch = id_list[offset : offset + batch_size]
qs += tuple(self.filter(**{filter_key: batch}).order_by())
else:
qs = self.filter(**{filter_key: id_list}).order_by()
else:
qs = self._chain()
return {getattr(obj, field_name): obj for obj in qs}
def delete(self):
"""Delete the records in the current QuerySet."""
self._not_support_combined_queries("delete")
if self.query.is_sliced:
raise TypeError("Cannot use 'limit' or 'offset' with delete().")
if self.query.distinct or self.query.distinct_fields:
raise TypeError("Cannot call delete() after .distinct().")
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._chain()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force=True)
collector = Collector(using=del_query.db, origin=self)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
query = self.query.clone()
query.__class__ = sql.DeleteQuery
cursor = query.get_compiler(using).execute_sql(CURSOR)
if cursor:
with cursor:
return cursor.rowcount
return 0
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
self._not_support_combined_queries("update")
if self.query.is_sliced:
raise TypeError("Cannot update a query once a slice has been taken.")
self._for_write = True
query = self.query.chain(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
with transaction.mark_for_rollback_on_error(using=self.db):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update() that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
if self.query.is_sliced:
raise TypeError("Cannot update a query once a slice has been taken.")
query = self.query.chain(sql.UpdateQuery)
query.add_update_fields(values)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def contains(self, obj):
"""Return True if the queryset contains an object."""
self._not_support_combined_queries("contains")
if self._fields is not None:
raise TypeError(
"Cannot call QuerySet.contains() after .values() or .values_list()."
)
try:
if obj._meta.concrete_model != self.model._meta.concrete_model:
return False
except AttributeError:
raise TypeError("'obj' must be a model instance.")
if obj.pk is None:
raise ValueError("QuerySet.contains() cannot be used on unsaved objects.")
if self._result_cache is not None:
return obj in self._result_cache
return self.filter(pk=obj.pk).exists()
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def explain(self, *, format=None, **options):
return self.query.explain(using=self.db, format=format, **options)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=(), translations=None, using=None):
if using is None:
using = self.db
qs = RawQuerySet(
raw_query,
model=self.model,
params=params,
translations=translations,
using=using,
)
qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
return qs
def _values(self, *fields, **expressions):
clone = self._chain()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False, named=False):
if flat and named:
raise TypeError("'flat' and 'named' can't be used together.")
if flat and len(fields) > 1:
raise TypeError(
"'flat' is not valid when values_list is called with more than one "
"field."
)
field_names = {f for f in fields if not hasattr(f, "resolve_expression")}
_fields = []
expressions = {}
counter = 1
for field in fields:
if hasattr(field, "resolve_expression"):
field_id_prefix = getattr(
field, "default_alias", field.__class__.__name__.lower()
)
while True:
field_id = field_id_prefix + str(counter)
counter += 1
if field_id not in field_names:
break
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = (
NamedValuesListIterable
if named
else FlatValuesListIterable
if flat
else ValuesListIterable
)
return clone
def dates(self, field_name, kind, order="ASC"):
"""
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
if kind not in ("year", "month", "week", "day"):
raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.")
if order not in ("ASC", "DESC"):
raise ValueError("'order' must be either 'ASC' or 'DESC'.")
return (
self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name),
)
.values_list("datefield", flat=True)
.distinct()
.filter(plain_field__isnull=False)
.order_by(("-" if order == "DESC" else "") + "datefield")
)
# RemovedInDjango50Warning: when the deprecation ends, remove is_dst
# argument.
def datetimes(
self, field_name, kind, order="ASC", tzinfo=None, is_dst=timezone.NOT_PASSED
):
"""
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
if kind not in ("year", "month", "week", "day", "hour", "minute", "second"):
raise ValueError(
"'kind' must be one of 'year', 'month', 'week', 'day', "
"'hour', 'minute', or 'second'."
)
if order not in ("ASC", "DESC"):
raise ValueError("'order' must be either 'ASC' or 'DESC'.")
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return (
self.annotate(
datetimefield=Trunc(
field_name,
kind,
output_field=DateTimeField(),
tzinfo=tzinfo,
is_dst=is_dst,
),
plain_field=F(field_name),
)
.values_list("datetimefield", flat=True)
.distinct()
.filter(plain_field__isnull=False)
.order_by(("-" if order == "DESC" else "") + "datetimefield")
)
def none(self):
"""Return an empty QuerySet."""
clone = self._chain()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._chain()
def filter(self, *args, **kwargs):
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
self._not_support_combined_queries("filter")
return self._filter_or_exclude(False, args, kwargs)
def exclude(self, *args, **kwargs):
"""
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
self._not_support_combined_queries("exclude")
return self._filter_or_exclude(True, args, kwargs)
def _filter_or_exclude(self, negate, args, kwargs):
if (args or kwargs) and self.query.is_sliced:
raise TypeError("Cannot filter a query once a slice has been taken.")
clone = self._chain()
if self._defer_next_filter:
self._defer_next_filter = False
clone._deferred_filter = negate, args, kwargs
else:
clone._filter_or_exclude_inplace(negate, args, kwargs)
return clone
def _filter_or_exclude_inplace(self, negate, args, kwargs):
if negate:
self._query.add_q(~Q(*args, **kwargs))
else:
self._query.add_q(Q(*args, **kwargs))
def complex_filter(self, filter_obj):
"""
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object or a dictionary of keyword lookup
arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q):
clone = self._chain()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(False, args=(), kwargs=filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._chain()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(force=True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(
qs.query for qs in other_qs
)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
# If the query is an EmptyQuerySet, combine all nonempty querysets.
if isinstance(self, EmptyQuerySet):
qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
if not qs:
return self
if len(qs) == 1:
return qs[0]
return qs[0]._combinator_query("union", *qs[1:], all=all)
return self._combinator_query("union", *other_qs, all=all)
def intersection(self, *other_qs):
# If any query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
for other in other_qs:
if isinstance(other, EmptyQuerySet):
return other
return self._combinator_query("intersection", *other_qs)
def difference(self, *other_qs):
# If the query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
return self._combinator_query("difference", *other_qs)
def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):
"""
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError("The nowait option cannot be used with skip_locked.")
obj = self._chain()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
obj.query.select_for_update_of = of
obj.query.select_for_no_key_update = no_key
return obj
def select_related(self, *fields):
"""
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, clear the list.
"""
self._not_support_combined_queries("select_related")
if self._fields is not None:
raise TypeError(
"Cannot call select_related() after .values() or .values_list()"
)
obj = self._chain()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
self._not_support_combined_queries("prefetch_related")
clone = self._chain()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
for lookup in lookups:
if isinstance(lookup, Prefetch):
lookup = lookup.prefetch_to
lookup = lookup.split(LOOKUP_SEP, 1)[0]
if lookup in self.query._filtered_relations:
raise ValueError(
"prefetch_related() is not supported with FilteredRelation."
)
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
self._not_support_combined_queries("annotate")
return self._annotate(args, kwargs, select=True)
def alias(self, *args, **kwargs):
"""
Return a query set with added aliases for extra data or aggregations.
"""
self._not_support_combined_queries("alias")
return self._annotate(args, kwargs, select=False)
def _annotate(self, args, kwargs, select=True):
self._validate_values_are_expressions(
args + tuple(kwargs.values()), method_name="annotate"
)
annotations = {}
for arg in args:
# The default_alias property may raise a TypeError.
try:
if arg.default_alias in kwargs:
raise ValueError(
"The named annotation '%s' conflicts with the "
"default name for another annotation." % arg.default_alias
)
except TypeError:
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._chain()
names = self._fields
if names is None:
names = set(
chain.from_iterable(
(field.name, field.attname)
if hasattr(field, "attname")
else (field.name,)
for field in self.model._meta.get_fields()
)
)
for alias, annotation in annotations.items():
if alias in names:
raise ValueError(
"The annotation '%s' conflicts with a field on "
"the model." % alias
)
if isinstance(annotation, FilteredRelation):
clone.query.add_filtered_relation(annotation, alias)
else:
clone.query.add_annotation(
annotation,
alias,
is_summary=False,
select=select,
)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""Return a new QuerySet instance with the ordering changed."""
if self.query.is_sliced:
raise TypeError("Cannot reorder a query once a slice has been taken.")
obj = self._chain()
obj.query.clear_ordering(force=True, clear_default=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Return a new QuerySet instance that will select only distinct results.
"""
self._not_support_combined_queries("distinct")
if self.query.is_sliced:
raise TypeError(
"Cannot create distinct fields once a slice has been taken."
)
obj = self._chain()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(
self,
select=None,
where=None,
params=None,
tables=None,
order_by=None,
select_params=None,
):
"""Add extra SQL fragments to the query."""
self._not_support_combined_queries("extra")
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
clone = self._chain()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""Reverse the ordering of the QuerySet."""
if self.query.is_sliced:
raise TypeError("Cannot reverse a query once a slice has been taken.")
clone = self._chain()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case removal all deferrals.
"""
self._not_support_combined_queries("defer")
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._chain()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
self._not_support_combined_queries("only")
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
for field in fields:
field = field.split(LOOKUP_SEP, 1)[0]
if field in self.query._filtered_relations:
raise ValueError("only() is not supported with FilteredRelation.")
clone = self._chain()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""Select which database this QuerySet should execute against."""
clone = self._chain()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model (or is empty).
"""
if isinstance(self, EmptyQuerySet):
return True
if self.query.extra_order_by or self.query.order_by:
return True
elif (
self.query.default_ordering
and self.query.get_meta().ordering
and
# A default ordering doesn't affect GROUP BY queries.
not self.query.group_by
):
return True
else:
return False
@property
def db(self):
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(
self,
objs,
fields,
returning_fields=None,
raw=False,
using=None,
on_conflict=None,
update_fields=None,
unique_fields=None,
):
"""
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(
self.model,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(returning_fields)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(
self,
objs,
fields,
batch_size,
on_conflict=None,
update_fields=None,
unique_fields=None,
):
"""
Helper method for bulk_create() to insert objs one batch at a time.
"""
connection = connections[self.db]
ops = connection.ops
max_batch_size = max(ops.bulk_batch_size(fields, objs), 1)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
inserted_rows = []
bulk_return = connection.features.can_return_rows_from_bulk_insert
for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]:
if bulk_return and on_conflict is None:
inserted_rows.extend(
self._insert(
item,
fields=fields,
using=self.db,
returning_fields=self.model._meta.db_returning_fields,
)
)
else:
self._insert(
item,
fields=fields,
using=self.db,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
return inserted_rows
def _chain(self):
"""
Return a copy of the current QuerySet that's ready for another
operation.
"""
obj = self._clone()
if obj._sticky_filter:
obj.query.filter_is_sticky = True
obj._sticky_filter = False
return obj
def _clone(self):
"""
Return a copy of the current QuerySet. A lightweight alternative
to deepcopy().
"""
c = self.__class__(
model=self.model,
query=self.query.chain(),
using=self._db,
hints=self._hints,
)
c._sticky_filter = self._sticky_filter
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c._iterable_class = self._iterable_class
c._fields = self._fields
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select)
or set(self.query.extra_select) != set(other.query.extra_select)
or set(self.query.annotation_select) != set(other.query.annotation_select)
):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def resolve_expression(self, *args, **kwargs):
if self._fields and len(self._fields) > 1:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
raise TypeError("Cannot use multi-field values as a filter value.")
query = self.query.resolve_expression(*args, **kwargs)
query._db = self._db
return query
resolve_expression.queryset_only = True
def _add_hints(self, **hints):
"""
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
self._hints.update(hints)
def _has_filters(self):
"""
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
@staticmethod
def _validate_values_are_expressions(values, method_name):
invalid_args = sorted(
str(arg) for arg in values if not hasattr(arg, "resolve_expression")
)
if invalid_args:
raise TypeError(
"QuerySet.%s() received non-expression(s): %s."
% (
method_name,
", ".join(invalid_args),
)
)
def _not_support_combined_queries(self, operation_name):
if self.query.combinator:
raise NotSupportedError(
"Calling QuerySet.%s() after %s() is not supported."
% (operation_name, self.query.combinator)
)
def _check_operator_queryset(self, other, operator_):
if self.query.combinator or other.query.combinator:
raise TypeError(f"Cannot use {operator_} operator with combined queryset.")
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(
self,
raw_query,
model=None,
query=None,
params=(),
translations=None,
using=None,
hints=None,
):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params
self.translations = translations or {}
self._result_cache = None
self._prefetch_related_lookups = ()
self._prefetch_done = False
def resolve_model_init_order(self):
"""Resolve the init field names and value positions."""
converter = connections[self.db].introspection.identifier_converter
model_init_fields = [
f for f in self.model._meta.fields if converter(f.column) in self.columns
]
annotation_fields = [
(column, pos)
for pos, column in enumerate(self.columns)
if column not in self.model_fields
]
model_init_order = [
self.columns.index(converter(f.column)) for f in model_init_fields
]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def prefetch_related(self, *lookups):
"""Same as QuerySet.prefetch_related()"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def _prefetch_related_objects(self):
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def _clone(self):
"""Same as QuerySet._clone()"""
c = self.__class__(
self.raw_query,
model=self.model,
query=self.query,
params=self.params,
translations=self.translations,
using=self._db,
hints=self._hints,
)
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def iterator(self):
# Cache some things for performance reasons outside the loop.
db = self.db
connection = connections[db]
compiler = connection.ops.compiler("SQLCompiler")(self.query, connection, db)
query = iter(self.query)
try:
(
model_init_names,
model_init_pos,
annotation_fields,
) = self.resolve_model_init_order()
if self.model._meta.pk.attname not in model_init_names:
raise exceptions.FieldDoesNotExist(
"Raw query must include the primary key"
)
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters(
[f.get_col(f.model._meta.db_table) if f else None for f in fields]
)
if converters:
query = compiler.apply_converters(query, converters)
for values in query:
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, "cursor") and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query,
model=self.model,
query=self.query.chain(using=alias),
params=self.params,
translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
# Ignore translations for nonexistent column names
try:
index = columns.index(query_name)
except ValueError:
pass
else:
columns[index] = model_name
return columns
@cached_property
def model_fields(self):
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.identifier_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and (
isinstance(queryset, RawQuerySet)
or (
hasattr(queryset, "_iterable_class")
and not issubclass(queryset._iterable_class, ModelIterable)
)
):
raise ValueError(
"Prefetch querysets cannot use raw(), values(), and values_list()."
)
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(
lookup.split(LOOKUP_SEP)[:-1] + [to_attr]
)
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
queryset = self.queryset._chain()
# Prevent the QuerySet from being evaluated
queryset._result_cache = []
queryset._prefetch_done = True
obj_dict["queryset"] = queryset
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if not isinstance(other, Prefetch):
return NotImplemented
return self.prefetch_to == other.prefetch_to
def __hash__(self):
return hash((self.__class__, self.prefetch_to))
def normalize_prefetch_lookups(lookups, prefix=None):
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if not model_instances:
return # nothing to do
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
while all_lookups:
lookup = all_lookups.pop()
if lookup.prefetch_to in done_queries:
if lookup.queryset is not None:
raise ValueError(
"'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups."
% lookup.prefetch_to
)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if not obj_list:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, "_prefetched_objects_cache"):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(
first_obj, through_attr, to_attr
)
if not attr_found:
raise AttributeError(
"Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()"
% (
through_attr,
first_obj.__class__.__name__,
lookup.prefetch_through,
)
)
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError(
"'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through
)
obj_to_fetch = None
if prefetcher is not None:
obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)]
if obj_to_fetch:
obj_list, additional_lookups = prefetch_one_level(
obj_to_fetch,
prefetcher,
lookup,
level,
)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (
prefetch_to in done_queries
and lookup in auto_lookups
and descriptor in followed_descriptors
):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(
reversed(additional_lookups), prefetch_to
)
auto_lookups.update(new_lookups)
all_lookups.extend(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
if through_attr in getattr(obj, "_prefetched_objects_cache", ()):
# If related objects have been prefetched, use the
# cache rather than the object's through_attr.
new_obj = list(obj._prefetched_objects_cache.get(through_attr))
else:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_queryset().
Return a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a function that takes an instance and returns a boolean that is True if
the attribute has already been fetched for that instance)
"""
def has_to_attr_attribute(instance):
return hasattr(instance, to_attr)
prefetcher = None
is_fetched = has_to_attr_attribute
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, "get_prefetch_queryset"):
prefetcher = rel_obj_descriptor
is_fetched = rel_obj_descriptor.is_cached
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, "get_prefetch_queryset"):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(
getattr(instance.__class__, to_attr, None), cached_property
):
def has_cached_property(instance):
return to_attr in instance.__dict__
is_fetched = has_cached_property
else:
def in_prefetched_cache(instance):
return through_attr in instance._prefetched_objects_cache
is_fetched = in_prefetched_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects().
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache or field name to assign to,
# boolean that is True when the previous argument is a cache name vs a field name).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
(
rel_qs,
rel_obj_attr,
instance_attr,
single,
cache_name,
is_descriptor,
) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup)
for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = "to_attr={} conflicts with a field on the {} model."
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
if as_attr:
# A to_attr has been given for the prefetch.
setattr(obj, to_attr, val)
elif is_descriptor:
# cache_name points to a field name in obj.
# This field is a descriptor for a related object.
setattr(obj, cache_name, val)
else:
# No to_attr has been given for this prefetch operation and the
# cache_name does not point to a descriptor. Store the value of
# the field in the object's field cache.
obj._state.fields_cache[cache_name] = val
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - local_setter, remote_setter: Methods to set cached values on
# the object being populated and on the remote object. Usually
# these are Field.set_cached_value() methods.
select_fields = klass_info["select_fields"]
from_parent = klass_info["from_parent"]
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start : self.cols_end]
]
self.reorder_for_init = None
else:
attname_indexes = {
select[idx][0].target.attname: idx for idx in select_fields
}
model_init_attnames = (
f.attname for f in klass_info["model"]._meta.concrete_fields
)
self.init_list = [
attname for attname in model_init_attnames if attname in attname_indexes
]
self.reorder_for_init = operator.itemgetter(
*[attname_indexes[attname] for attname in self.init_list]
)
self.model_cls = klass_info["model"]
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
self.local_setter = klass_info["local_setter"]
self.remote_setter = klass_info["remote_setter"]
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start : self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
self.local_setter(from_obj, obj)
if obj is not None:
self.remote_setter(obj, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get("related_klass_infos", [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
|
081d027c33f531c74c44d7c6b83b3539a27b5f0f9baf6b42fc42234dc1f9d57e | from django.core.exceptions import ObjectDoesNotExist
from django.db.models import signals
from django.db.models.aggregates import * # NOQA
from django.db.models.aggregates import __all__ as aggregates_all
from django.db.models.constraints import * # NOQA
from django.db.models.constraints import __all__ as constraints_all
from django.db.models.deletion import (
CASCADE,
DO_NOTHING,
PROTECT,
RESTRICT,
SET,
SET_DEFAULT,
SET_NULL,
ProtectedError,
RestrictedError,
)
from django.db.models.enums import * # NOQA
from django.db.models.enums import __all__ as enums_all
from django.db.models.expressions import (
Case,
Exists,
Expression,
ExpressionList,
ExpressionWrapper,
F,
Func,
OrderBy,
OuterRef,
RowRange,
Subquery,
Value,
ValueRange,
When,
Window,
WindowFrame,
)
from django.db.models.fields import * # NOQA
from django.db.models.fields import __all__ as fields_all
from django.db.models.fields.files import FileField, ImageField
from django.db.models.fields.json import JSONField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.indexes import * # NOQA
from django.db.models.indexes import __all__ as indexes_all
from django.db.models.lookups import Lookup, Transform
from django.db.models.manager import Manager
from django.db.models.query import Prefetch, QuerySet, prefetch_related_objects
from django.db.models.query_utils import FilteredRelation, Q
# Imports that would create circular imports if sorted
from django.db.models.base import DEFERRED, Model # isort:skip
from django.db.models.fields.related import ( # isort:skip
ForeignKey,
ForeignObject,
OneToOneField,
ManyToManyField,
ForeignObjectRel,
ManyToOneRel,
ManyToManyRel,
OneToOneRel,
)
__all__ = aggregates_all + constraints_all + enums_all + fields_all + indexes_all
__all__ += [
"ObjectDoesNotExist",
"signals",
"CASCADE",
"DO_NOTHING",
"PROTECT",
"RESTRICT",
"SET",
"SET_DEFAULT",
"SET_NULL",
"ProtectedError",
"RestrictedError",
"Case",
"Exists",
"Expression",
"ExpressionList",
"ExpressionWrapper",
"F",
"Func",
"OrderBy",
"OuterRef",
"RowRange",
"Subquery",
"Value",
"ValueRange",
"When",
"Window",
"WindowFrame",
"FileField",
"ImageField",
"JSONField",
"OrderWrt",
"Lookup",
"Transform",
"Manager",
"Prefetch",
"Q",
"QuerySet",
"prefetch_related_objects",
"DEFERRED",
"Model",
"FilteredRelation",
"ForeignKey",
"ForeignObject",
"OneToOneField",
"ManyToManyField",
"ForeignObjectRel",
"ManyToOneRel",
"ManyToManyRel",
"OneToOneRel",
]
|
56954cad44670f5656b2cca5e84fe906c1a2093caf8a08c5b12c4065c908e46a | import bisect
import copy
import inspect
from collections import defaultdict
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured
from django.db import connections
from django.db.models import AutoField, Manager, OrderWrt, UniqueConstraint
from django.db.models.query_utils import PathInfo
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.text import camel_case_to_spaces, format_lazy
from django.utils.translation import override
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = ()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this "
"list for your own use, make a copy first."
)
DEFAULT_NAMES = (
"verbose_name",
"verbose_name_plural",
"db_table",
"ordering",
"unique_together",
"permissions",
"get_latest_by",
"order_with_respect_to",
"app_label",
"db_tablespace",
"abstract",
"managed",
"proxy",
"swappable",
"auto_created",
"index_together",
"apps",
"default_permissions",
"select_on_save",
"default_related_name",
"required_db_features",
"required_db_vendor",
"base_manager_name",
"default_manager_name",
"indexes",
"constraints",
)
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = option_together[0]
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
class Options:
FORWARD_PROPERTIES = {
"fields",
"many_to_many",
"concrete_fields",
"local_concrete_fields",
"_forward_fields_map",
"managers",
"managers_map",
"base_manager",
"default_manager",
}
REVERSE_PROPERTIES = {"related_objects", "fields_map", "_relation_tree"}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.private_fields = []
self.local_managers = []
self.base_manager_name = None
self.default_manager_name = None
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ""
self.ordering = []
self._ordering_clash = False
self.indexes = []
self.constraints = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ("add", "change", "delete", "view")
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = {}
self.auto_created = False
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return "%s.%s" % (self.app_label, self.object_name)
@property
def label_lower(self):
return "%s.%s" % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith("_"):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# App label/class name interpolation for names of constraints and
# indexes.
if not getattr(cls._meta, "abstract", False):
for attr_name in {"constraints", "indexes"}:
objs = getattr(self, attr_name, [])
setattr(self, attr_name, self._format_names_with_class(cls, objs))
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = format_lazy("{}s", self.verbose_name)
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError(
"'class Meta' got invalid attribute(s): %s" % ",".join(meta_attrs)
)
else:
self.verbose_name_plural = format_lazy("{}s", self.verbose_name)
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(
self.db_table, connection.ops.max_name_length()
)
def _format_names_with_class(self, cls, objs):
"""App label/class name interpolation for object names."""
new_objs = []
for obj in objs:
obj = obj.clone()
obj.name = obj.name % {
"app_label": cls._meta.app_label.lower(),
"class": cls.__name__.lower(),
}
new_objs.append(obj)
return new_objs
def _get_default_pk_class(self):
pk_class_path = getattr(
self.app_config,
"default_auto_field",
settings.DEFAULT_AUTO_FIELD,
)
if self.app_config and self.app_config._is_default_auto_field_overridden:
app_config_class = type(self.app_config)
source = (
f"{app_config_class.__module__}."
f"{app_config_class.__qualname__}.default_auto_field"
)
else:
source = "DEFAULT_AUTO_FIELD"
if not pk_class_path:
raise ImproperlyConfigured(f"{source} must not be empty.")
try:
pk_class = import_string(pk_class_path)
except ImportError as e:
msg = (
f"{source} refers to the module '{pk_class_path}' that could "
f"not be imported."
)
raise ImproperlyConfigured(msg) from e
if not issubclass(pk_class, AutoField):
raise ValueError(
f"Primary key '{pk_class_path}' referred by {source} must "
f"subclass AutoField."
)
return pk_class
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f
for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist(
"%s has no field named '%s'" % (self.object_name, query)
)
self.ordering = ("_order",)
if not any(
isinstance(field, OrderWrt) for field in model._meta.local_fields
):
model.add_to_class("_order", OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(iter(self.parents.values()))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [
fld for fld in self.local_fields if fld.name == field.name
]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
pk_class = self._get_default_pk_class()
auto = pk_class(verbose_name="ID", primary_key=True, auto_created=True)
model.add_to_class("id", auto)
def add_manager(self, manager):
self.local_managers.append(manager)
self._expire_cache()
def add_field(self, field, private=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if private:
self.private_fields.append(field)
elif field.is_relation and field.many_to_many:
bisect.insort(self.local_many_to_many, field)
else:
bisect.insort(self.local_fields, field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if (
field.is_relation
and hasattr(field.remote_field, "model")
and field.remote_field.model
):
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Do the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return "<Options for %s>" % self.object_name
def __str__(self):
return self.label_lower
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, str):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(
getattr(connection.features, feat, False)
for feat in self.required_db_features
)
return True
@property
def verbose_name_raw(self):
"""Return the untranslated verbose name."""
with override(None):
return str(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split(".")
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if (
"%s.%s" % (swapped_label, swapped_object.lower())
!= self.label_lower
):
return swapped_for
return None
@cached_property
def managers(self):
managers = []
seen_managers = set()
bases = (b for b in self.model.mro() if hasattr(b, "_meta"))
for depth, base in enumerate(bases):
for manager in base._meta.local_managers:
if manager.name in seen_managers:
continue
manager = copy.copy(manager)
manager.model = self.model
seen_managers.add(manager.name)
managers.append((depth, manager.creation_counter, manager))
return make_immutable_fields_list(
"managers",
(m[2] for m in sorted(managers)),
)
@cached_property
def managers_map(self):
return {manager.name: manager for manager in self.managers}
@cached_property
def base_manager(self):
base_manager_name = self.base_manager_name
if not base_manager_name:
# Get the first parent's base_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, "_meta"):
if parent._base_manager.name != "_base_manager":
base_manager_name = parent._base_manager.name
break
if base_manager_name:
try:
return self.managers_map[base_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r"
% (
self.object_name,
base_manager_name,
)
)
manager = Manager()
manager.name = "_base_manager"
manager.model = self.model
manager.auto_created = True
return manager
@cached_property
def default_manager(self):
default_manager_name = self.default_manager_name
if not default_manager_name and not self.local_managers:
# Get the first parent's default_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, "_meta"):
default_manager_name = parent._meta.default_manager_name
break
if default_manager_name:
try:
return self.managers_map[default_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r"
% (
self.object_name,
default_manager_name,
)
)
if self.managers:
return self.managers[0]
@cached_property
def fields(self):
"""
Return a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not private or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
def is_not_an_m2m_field(f):
return not (f.is_relation and f.many_to_many)
def is_not_a_generic_relation(f):
return not (f.is_relation and f.one_to_many)
def is_not_a_generic_foreign_key(f):
return not (
f.is_relation
and f.many_to_one
and not (hasattr(f.remote_field, "model") and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(
f
for f in self._get_fields(reverse=False)
if is_not_an_m2m_field(f)
and is_not_a_generic_relation(f)
and is_not_a_generic_foreign_key(f)
),
)
@cached_property
def concrete_fields(self):
"""
Return a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Return a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Return a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(
f
for f in self._get_fields(reverse=False)
if f.is_relation and f.many_to_many
),
)
@cached_property
def related_objects(self):
"""
Return all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(
forward=False, reverse=True, include_hidden=True
)
return make_immutable_fields_list(
"related_objects",
(
obj
for obj in all_related_fields
if not obj.hidden or obj.field.many_to_many
),
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named '%s'. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist(
"%s has no field named '%s'" % (self.object_name, field_name)
)
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Return all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Return the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Return None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_path_to_parent(self, parent):
"""
Return a list of PathInfos containing the path from the current
model to the parent model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
# Skip the chain of proxy to the concrete proxied model.
proxied_model = self.concrete_model
path = []
opts = self
for int_model in self.get_base_chain(parent):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(
PathInfo(
from_opts=final_field.model._meta,
to_opts=opts,
target_fields=targets,
join_field=final_field,
m2m=False,
direct=True,
filtered_relation=None,
)
)
return path
def get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent)
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.reverse_path_infos)
return path
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
opts = model._meta
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if opts.abstract:
continue
fields_with_relations = (
f
for f in opts._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, str):
remote_label = f.remote_field.model._meta.concrete_model._meta.label
related_objects_graph[remote_label].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[
model._meta.concrete_model._meta.label
]
model._meta.__dict__["_relation_tree"] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get("_relation_tree", EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
if forward:
for cache_key in self.FORWARD_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
if reverse and not self.abstract:
for cache_key in self.REVERSE_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(
include_parents=include_parents, include_hidden=include_hidden
)
def _get_fields(
self,
forward=True,
reverse=True,
include_parents=True,
include_hidden=False,
seen_models=None,
):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError(
"Invalid argument for include_parents: %s" % (include_parents,)
)
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = seen_models is None
if topmost_call:
seen_models = set()
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (
parent._meta.concrete_model != self.concrete_model
and include_parents == PROXY_PARENTS
):
continue
for obj in parent._meta._get_fields(
forward=forward,
reverse=reverse,
include_parents=include_parents,
include_hidden=include_hidden,
seen_models=seen_models,
):
if (
not getattr(obj, "parent_link", False)
or obj.model == self.concrete_model
):
fields.append(obj)
if reverse and not self.proxy:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields += self.local_fields
fields += self.local_many_to_many
# Private fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the private fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields += self.private_fields
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
@cached_property
def total_unique_constraints(self):
"""
Return a list of total unique constraints. Useful for determining set
of fields guaranteed to be unique for all rows.
"""
return [
constraint
for constraint in self.constraints
if (
isinstance(constraint, UniqueConstraint)
and constraint.condition is None
and not constraint.contains_expressions
)
]
@cached_property
def _property_names(self):
"""Return a set of the names of the properties defined on the model."""
names = []
for name in dir(self.model):
attr = inspect.getattr_static(self.model, name)
if isinstance(attr, property):
names.append(name)
return frozenset(names)
@cached_property
def db_returning_fields(self):
"""
Private API intended only to be used by Django itself.
Fields to be returned after a database insert.
"""
return [
field
for field in self._get_fields(
forward=True, reverse=False, include_parents=PROXY_PARENTS
)
if getattr(field, "db_returning", False)
]
|
12b7fe48cbbd083d5a65fab8c5cb7759da0e8e3308669fef9636ba1722507f2c | import enum
from types import DynamicClassAttribute
from django.utils.functional import Promise
__all__ = ["Choices", "IntegerChoices", "TextChoices"]
class ChoicesMeta(enum.EnumMeta):
"""A metaclass for creating a enum choices."""
def __new__(metacls, classname, bases, classdict, **kwds):
labels = []
for key in classdict._member_names:
value = classdict[key]
if (
isinstance(value, (list, tuple))
and len(value) > 1
and isinstance(value[-1], (Promise, str))
):
*value, label = value
value = tuple(value)
else:
label = key.replace("_", " ").title()
labels.append(label)
# Use dict.__setitem__() to suppress defenses against double
# assignment in enum's classdict.
dict.__setitem__(classdict, key, value)
cls = super().__new__(metacls, classname, bases, classdict, **kwds)
for member, label in zip(cls.__members__.values(), labels):
member._label_ = label
return enum.unique(cls)
def __contains__(cls, member):
if not isinstance(member, enum.Enum):
# Allow non-enums to match against member values.
return any(x.value == member for x in cls)
return super().__contains__(member)
@property
def names(cls):
empty = ["__empty__"] if hasattr(cls, "__empty__") else []
return empty + [member.name for member in cls]
@property
def choices(cls):
empty = [(None, cls.__empty__)] if hasattr(cls, "__empty__") else []
return empty + [(member.value, member.label) for member in cls]
@property
def labels(cls):
return [label for _, label in cls.choices]
@property
def values(cls):
return [value for value, _ in cls.choices]
class Choices(enum.Enum, metaclass=ChoicesMeta):
"""Class for creating enumerated choices."""
@DynamicClassAttribute
def label(self):
return self._label_
@property
def do_not_call_in_templates(self):
return True
def __str__(self):
"""
Use value when cast to str, so that Choices set as model instance
attributes are rendered as expected in templates and similar contexts.
"""
return str(self.value)
# A similar format was proposed for Python 3.10.
def __repr__(self):
return f"{self.__class__.__qualname__}.{self._name_}"
class IntegerChoices(int, Choices):
"""Class for creating enumerated integer choices."""
pass
class TextChoices(str, Choices):
"""Class for creating enumerated string choices."""
def _generate_next_value_(name, start, count, last_values):
return name
|
f56e902b281e38bdab10c2bc9d1eaa749b020cb07d4ca4bc3005ddf5ead14378 | import copy
import inspect
import warnings
from functools import partialmethod
from itertools import chain
import django
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS,
FieldDoesNotExist,
FieldError,
MultipleObjectsReturned,
ObjectDoesNotExist,
ValidationError,
)
from django.db import (
DJANGO_VERSION_PICKLE_KEY,
DatabaseError,
connection,
connections,
router,
transaction,
)
from django.db.models import NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value
from django.db.models.constants import LOOKUP_SEP
from django.db.models.constraints import CheckConstraint, UniqueConstraint
from django.db.models.deletion import CASCADE, Collector
from django.db.models.fields.related import (
ForeignObjectRel,
OneToOneField,
lazy_related_operation,
resolve_relation,
)
from django.db.models.functions import Coalesce
from django.db.models.manager import Manager
from django.db.models.options import Options
from django.db.models.query import F, Q
from django.db.models.signals import (
class_prepared,
post_init,
post_save,
pre_init,
pre_save,
)
from django.db.models.utils import make_model_tuple
from django.utils.encoding import force_str
from django.utils.hashable import make_hashable
from django.utils.text import capfirst, get_text_list
from django.utils.translation import gettext_lazy as _
class Deferred:
def __repr__(self):
return "<Deferred field>"
def __str__(self):
return "<Deferred field>"
DEFERRED = Deferred()
def subclass_exception(name, bases, module, attached_to):
"""
Create exception subclass. Used by ModelBase below.
The exception is created in a way that allows it to be pickled, assuming
that the returned exception class will be added as an attribute to the
'attached_to' class.
"""
return type(
name,
bases,
{
"__module__": module,
"__qualname__": "%s.%s" % (attached_to.__qualname__, name),
},
)
def _has_contribute_to_class(value):
# Only call contribute_to_class() if it's bound.
return not inspect.isclass(value) and hasattr(value, "contribute_to_class")
class ModelBase(type):
"""Metaclass for all models."""
def __new__(cls, name, bases, attrs, **kwargs):
super_new = super().__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop("__module__")
new_attrs = {"__module__": module}
classcell = attrs.pop("__classcell__", None)
if classcell is not None:
new_attrs["__classcell__"] = classcell
attr_meta = attrs.pop("Meta", None)
# Pass all attrs without a (Django-specific) contribute_to_class()
# method to type.__new__() so that they're properly initialized
# (i.e. __set_name__()).
contributable_attrs = {}
for obj_name, obj in attrs.items():
if _has_contribute_to_class(obj):
contributable_attrs[obj_name] = obj
else:
new_attrs[obj_name] = obj
new_class = super_new(cls, name, bases, new_attrs, **kwargs)
abstract = getattr(attr_meta, "abstract", False)
meta = attr_meta or getattr(new_class, "Meta", None)
base_meta = getattr(new_class, "_meta", None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, "app_label", None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
new_class.add_to_class("_meta", Options(meta, app_label))
if not abstract:
new_class.add_to_class(
"DoesNotExist",
subclass_exception(
"DoesNotExist",
tuple(
x.DoesNotExist
for x in parents
if hasattr(x, "_meta") and not x._meta.abstract
)
or (ObjectDoesNotExist,),
module,
attached_to=new_class,
),
)
new_class.add_to_class(
"MultipleObjectsReturned",
subclass_exception(
"MultipleObjectsReturned",
tuple(
x.MultipleObjectsReturned
for x in parents
if hasattr(x, "_meta") and not x._meta.abstract
)
or (MultipleObjectsReturned,),
module,
attached_to=new_class,
),
)
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, "ordering"):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, "get_latest_by"):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError(
"%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)
)
# Add remaining attributes (those with a contribute_to_class() method)
# to the class.
for obj_name, obj in contributable_attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.private_fields,
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, "_meta")]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is None:
base = parent
elif parent._meta.concrete_model is not base._meta.concrete_model:
raise TypeError(
"Proxy model '%s' has more than one non-abstract model base "
"class." % name
)
if base is None:
raise TypeError(
"Proxy model '%s' has no non-abstract model base class." % name
)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, "_meta"):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField) and field.remote_field.parent_link:
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Track fields inherited from base models.
inherited_attributes = set()
# Do the appropriate setup for any model parents.
for base in new_class.mro():
if base not in parents or not hasattr(base, "_meta"):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
inherited_attributes.update(base.__dict__)
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
if not base._meta.abstract:
# Check for clashes between locally declared fields and those
# on the base classes.
for field in parent_fields:
if field.name in field_names:
raise FieldError(
"Local field %r in class %r clashes with field of "
"the same name from base class %r."
% (
field.name,
name,
base.__name__,
)
)
else:
inherited_attributes.add(field.name)
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = "%s_ptr" % base._meta.model_name
field = OneToOneField(
base,
on_delete=CASCADE,
name=attr_name,
auto_created=True,
parent_link=True,
)
if attr_name in field_names:
raise FieldError(
"Auto-generated field '%s' in class %r for "
"parent_link to base class %r clashes with "
"declared field of the same name."
% (
attr_name,
name,
base.__name__,
)
)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
base_parents = base._meta.parents.copy()
# Add fields from abstract base class if it wasn't overridden.
for field in parent_fields:
if (
field.name not in field_names
and field.name not in new_class.__dict__
and field.name not in inherited_attributes
):
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Replace parent links defined on this base by the new
# field. It will be appropriately resolved if required.
if field.one_to_one:
for parent, parent_link in base_parents.items():
if field == parent_link:
base_parents[parent] = new_field
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base_parents)
# Inherit private fields (like GenericForeignKey) from the parent
# class
for field in base._meta.private_fields:
if field.name in field_names:
if not base._meta.abstract:
raise FieldError(
"Local field %r in class %r clashes with field of "
"the same name from base class %r."
% (
field.name,
name,
base.__name__,
)
)
else:
field = copy.deepcopy(field)
if not base._meta.abstract:
field.mti_inherited = True
new_class.add_to_class(field.name, field)
# Copy indexes so that index names are unique when models extend an
# abstract model.
new_class._meta.indexes = [
copy.deepcopy(idx) for idx in new_class._meta.indexes
]
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def add_to_class(cls, name, value):
if _has_contribute_to_class(value):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""Create some methods once self._meta has been populated."""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = partialmethod(
cls._get_next_or_previous_in_order, is_next=True
)
cls.get_previous_in_order = partialmethod(
cls._get_next_or_previous_in_order, is_next=False
)
# Defer creating accessors on the foreign class until it has been
# created and registered. If remote_field is None, we're ordering
# with respect to a GenericForeignKey and don't know what the
# foreign class is - we'll add those accessors later in
# contribute_to_class().
if opts.order_with_respect_to.remote_field:
wrt = opts.order_with_respect_to
remote = wrt.remote_field.model
lazy_related_operation(make_foreign_order_accessors, cls, remote)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (
cls.__name__,
", ".join(f.name for f in opts.fields),
)
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(
opts.label_lower
)
if get_absolute_url_override:
setattr(cls, "get_absolute_url", get_absolute_url_override)
if not opts.managers:
if any(f.name == "objects" for f in opts.fields):
raise ValueError(
"Model %s must specify a custom Manager, because it has a "
"field named 'objects'." % cls.__name__
)
manager = Manager()
manager.auto_created = True
cls.add_to_class("objects", manager)
# Set the name of _meta.indexes. This can't be done in
# Options.contribute_to_class() because fields haven't been added to
# the model at that point.
for index in cls._meta.indexes:
if not index.name:
index.set_name_with_model(cls)
class_prepared.send(sender=cls)
@property
def _base_manager(cls):
return cls._meta.base_manager
@property
def _default_manager(cls):
return cls._meta.default_manager
class ModelStateCacheDescriptor:
"""
Upon first access, replace itself with an empty dictionary on the instance.
"""
def __set_name__(self, owner, name):
self.attribute_name = name
def __get__(self, instance, cls=None):
if instance is None:
return self
res = instance.__dict__[self.attribute_name] = {}
return res
class ModelState:
"""Store model instance state."""
db = None
# If true, uniqueness validation checks will consider this a new, unsaved
# object. Necessary for correct validation of new instances of objects with
# explicit (non-auto) PKs. This impacts validation only; it has no effect
# on the actual save.
adding = True
fields_cache = ModelStateCacheDescriptor()
related_managers_cache = ModelStateCacheDescriptor()
def __getstate__(self):
state = self.__dict__.copy()
if "fields_cache" in state:
state["fields_cache"] = self.fields_cache.copy()
# Manager instances stored in related_managers_cache won't necessarily
# be deserializable if they were dynamically created via an inner
# scope, e.g. create_forward_many_to_many_manager() and
# create_generic_related_manager().
if "related_managers_cache" in state:
state["related_managers_cache"] = {}
return state
class Model(metaclass=ModelBase):
def __init__(self, *args, **kwargs):
# Alias some things as locals to avoid repeat global lookups
cls = self.__class__
opts = self._meta
_setattr = setattr
_DEFERRED = DEFERRED
if opts.abstract:
raise TypeError("Abstract models cannot be instantiated.")
pre_init.send(sender=cls, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
if len(args) > len(opts.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(opts.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(opts.fields)
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
if kwargs.pop(field.name, NOT_PROVIDED) is not NOT_PROVIDED:
raise TypeError(
f"{cls.__qualname__}() got both positional and "
f"keyword arguments for field '{field.name}'."
)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# Virtual field
if field.attname not in kwargs and field.column is None:
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
if rel_obj is not _DEFERRED:
_setattr(self, field.name, rel_obj)
else:
if val is not _DEFERRED:
_setattr(self, field.attname, val)
if kwargs:
property_names = opts._property_names
unexpected = ()
for prop, value in kwargs.items():
# Any remaining kwargs must correspond to properties or virtual
# fields.
if prop in property_names:
if value is not _DEFERRED:
_setattr(self, prop, value)
else:
try:
opts.get_field(prop)
except FieldDoesNotExist:
unexpected += (prop,)
else:
if value is not _DEFERRED:
_setattr(self, prop, value)
if unexpected:
unexpected_names = ", ".join(repr(n) for n in unexpected)
raise TypeError(
f"{cls.__name__}() got unexpected keyword arguments: "
f"{unexpected_names}"
)
super().__init__()
post_init.send(sender=cls, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if len(values) != len(cls._meta.concrete_fields):
values_iter = iter(values)
values = [
next(values_iter) if f.attname in field_names else DEFERRED
for f in cls._meta.concrete_fields
]
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def __str__(self):
return "%s object (%s)" % (self.__class__.__name__, self.pk)
def __eq__(self, other):
if not isinstance(other, Model):
return NotImplemented
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self.pk
if my_pk is None:
return self is other
return my_pk == other.pk
def __hash__(self):
if self.pk is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self.pk)
def __reduce__(self):
data = self.__getstate__()
data[DJANGO_VERSION_PICKLE_KEY] = django.__version__
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id,), data
def __getstate__(self):
"""Hook to allow choosing the attributes to pickle."""
state = self.__dict__.copy()
state["_state"] = copy.copy(state["_state"])
# memoryview cannot be pickled, so cast it to bytes and store
# separately.
_memoryview_attrs = []
for attr, value in state.items():
if isinstance(value, memoryview):
_memoryview_attrs.append((attr, bytes(value)))
if _memoryview_attrs:
state["_memoryview_attrs"] = _memoryview_attrs
for attr, value in _memoryview_attrs:
state.pop(attr)
return state
def __setstate__(self, state):
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
if pickled_version != django.__version__:
warnings.warn(
"Pickled model instance's Django version %s does not "
"match the current version %s."
% (pickled_version, django.__version__),
RuntimeWarning,
stacklevel=2,
)
else:
warnings.warn(
"Pickled model instance's Django version is not specified.",
RuntimeWarning,
stacklevel=2,
)
if "_memoryview_attrs" in state:
for attr, value in state.pop("_memoryview_attrs"):
state[attr] = memoryview(value)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
meta = meta or self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
for parent_link in self._meta.parents.values():
if parent_link and parent_link != self._meta.pk:
setattr(self, parent_link.target_field.attname, value)
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Return a set containing names of deferred fields for this instance.
"""
return {
f.attname
for f in self._meta.concrete_fields
if f.attname not in self.__dict__
}
def refresh_from_db(self, using=None, fields=None):
"""
Reload field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is None:
self._prefetched_objects_cache = {}
else:
prefetched_objects_cache = getattr(self, "_prefetched_objects_cache", ())
for field in fields:
if field in prefetched_objects_cache:
del prefetched_objects_cache[field]
fields.remove(field)
if not fields:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
"are not allowed in fields." % LOOKUP_SEP
)
hints = {"instance": self}
db_instance_qs = self.__class__._base_manager.db_manager(
using, hints=hints
).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
deferred_fields = self.get_deferred_fields()
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif deferred_fields:
fields = [
f.attname
for f in self._meta.concrete_fields
if f.attname not in deferred_fields
]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Clear cached foreign keys.
if field.is_relation and field.is_cached(self):
field.delete_cached_value(self)
# Clear cached relations.
for field in self._meta.related_objects:
if field.is_cached(self):
field.delete_cached_value(self)
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Return the value of the field name for this instance. If the field is
a foreign key, return the id value instead of the object. If there's
no Field object with this name on the model, return the model
attribute's value.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(
self, force_insert=False, force_update=False, using=None, update_fields=None
):
"""
Save the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
self._prepare_related_fields_for_save(operation_name="save")
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
deferred_fields = self.get_deferred_fields()
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if not update_fields:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError(
"The following fields do not exist in this model, are m2m "
"fields, or are non-concrete fields: %s"
% ", ".join(non_model_fields)
)
# If saving to the same database, and this model is deferred, then
# automatically do an "update_fields" save on the loaded fields.
elif not force_insert and deferred_fields and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, "through"):
field_names.add(field.attname)
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(
using=using,
force_insert=force_insert,
force_update=force_update,
update_fields=update_fields,
)
save.alters_data = True
def save_base(
self,
raw=False,
force_insert=False,
force_update=False,
using=None,
update_fields=None,
):
"""
Handle the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or update_fields
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
pre_save.send(
sender=origin,
instance=self,
raw=raw,
using=using,
update_fields=update_fields,
)
# A transaction isn't needed if one query is issued.
if meta.parents:
context_manager = transaction.atomic(using=using, savepoint=False)
else:
context_manager = transaction.mark_for_rollback_on_error(using=using)
with context_manager:
parent_inserted = False
if not raw:
parent_inserted = self._save_parents(cls, using, update_fields)
updated = self._save_table(
raw,
cls,
force_insert or parent_inserted,
force_update,
using,
update_fields,
)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
post_save.send(
sender=origin,
instance=self,
created=(not updated),
update_fields=update_fields,
raw=raw,
using=using,
)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""Save all the parents of cls using values from self."""
meta = cls._meta
inserted = False
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (
field
and getattr(self, parent._meta.pk.attname) is None
and getattr(self, field.attname) is not None
):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
parent_inserted = self._save_parents(
cls=parent, using=using, update_fields=update_fields
)
updated = self._save_table(
cls=parent,
using=using,
update_fields=update_fields,
force_insert=parent_inserted,
)
if not updated:
inserted = True
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
if field.is_cached(self):
field.delete_cached_value(self)
return inserted
def _save_table(
self,
raw=False,
cls=None,
force_insert=False,
force_update=False,
using=None,
update_fields=None,
):
"""
Do the heavy-lifting involved in saving. Update or insert the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [
f
for f in non_pks
if f.name in update_fields or f.attname in update_fields
]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# Skip an UPDATE when adding an instance and primary key has a default.
if (
not raw
and not force_insert
and self._state.adding
and meta.pk.default
and meta.pk.default is not NOT_PROVIDED
):
force_insert = True
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [
(
f,
None,
(getattr(self, f.attname) if raw else f.pre_save(self, False)),
)
for f in non_pks
]
forced_update = update_fields or force_update
updated = self._do_update(
base_qs, using, pk_val, values, update_fields, forced_update
)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
filter_args = field.get_filter_kwargs_for_object(self)
self._order = (
cls._base_manager.using(using)
.filter(**filter_args)
.aggregate(
_order__max=Coalesce(
ExpressionWrapper(
Max("_order") + Value(1), output_field=IntegerField()
),
Value(0),
),
)["_order__max"]
)
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if f is not meta.auto_field]
returning_fields = meta.db_returning_fields
results = self._do_insert(
cls._base_manager, using, fields, returning_fields, raw
)
if results:
for value, field in zip(results[0], returning_fields):
setattr(self, field.attname, value)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
Try to update the model. Return True if the model was updated (if an
update query was done and a matching row was found in the DB).
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
return (
filtered.exists()
and
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
(filtered._update(values) > 0 or filtered.exists())
)
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, returning_fields, raw):
"""
Do an INSERT. If returning_fields is defined then this method should
return the newly created data for the model.
"""
return manager._insert(
[self],
fields=fields,
returning_fields=returning_fields,
using=using,
raw=raw,
)
def _prepare_related_fields_for_save(self, operation_name, fields=None):
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save would result in silent data loss.
for field in self._meta.concrete_fields:
if fields and field not in fields:
continue
# If the related field isn't cached, then an instance hasn't been
# assigned and there's no need to worry about this check.
if field.is_relation and field.is_cached(self):
obj = getattr(self, field.name, None)
if not obj:
continue
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj.pk is None:
# Remove the object from a related instance cache.
if not field.remote_field.multiple:
field.remote_field.delete_cached_value(obj)
raise ValueError(
"%s() prohibited to prevent data loss due to unsaved "
"related object '%s'." % (operation_name, field.name)
)
elif getattr(self, field.attname) in field.empty_values:
# Use pk from related object if it has been saved after
# an assignment.
setattr(self, field.attname, obj.pk)
# If the relationship's pk/to_field was changed, clear the
# cached relationship.
if getattr(obj, field.target_field.attname) != getattr(
self, field.attname
):
field.delete_cached_value(self)
def delete(self, using=None, keep_parents=False):
if self.pk is None:
raise ValueError(
"%s object can't be deleted because its %s attribute is set "
"to None." % (self._meta.object_name, self._meta.pk.attname)
)
using = using or router.db_for_write(self.__class__, instance=self)
collector = Collector(using=using, origin=self)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
choices_dict = dict(make_hashable(field.flatchoices))
# force_str() to coerce lazy strings.
return force_str(
choices_dict.get(make_hashable(value), value), strings_only=True
)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = "gt" if is_next else "lt"
order = "" if is_next else "-"
param = getattr(self, field.attname)
q = Q((field.name, param), (f"pk__{op}", self.pk), _connector=Q.AND)
q = Q(q, (f"{field.name}__{op}", param), _connector=Q.OR)
qs = (
self.__class__._default_manager.using(self._state.db)
.filter(**kwargs)
.filter(q)
.order_by("%s%s" % (order, field.name), "%spk" % order)
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist(
"%s matching query does not exist." % self.__class__._meta.object_name
)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = "gt" if is_next else "lt"
order = "_order" if is_next else "-_order"
order_field = self._meta.order_with_respect_to
filter_args = order_field.get_filter_kwargs_for_object(self)
obj = (
self.__class__._default_manager.filter(**filter_args)
.filter(
**{
"_order__%s"
% op: self.__class__._default_manager.values("_order").filter(
**{self._meta.pk.name: self.pk}
)
}
)
.order_by(order)[:1]
.get()
)
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError(
"Unsaved model instance %r cannot be used in an ORM query." % self
)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Check unique constraints on the model and raise ValidationError if any
failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Return a list of checks to perform. Since validate_unique() could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check. Fields that did not validate should also be excluded,
but they need to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
constraints = [(self.__class__, self._meta.total_unique_constraints)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append(
(parent_class, parent_class._meta.unique_together)
)
if parent_class._meta.total_unique_constraints:
constraints.append(
(parent_class, parent_class._meta.total_unique_constraints)
)
for model_class, unique_together in unique_togethers:
for check in unique_together:
if not any(name in exclude for name in check):
# Add the check if the field isn't excluded.
unique_checks.append((model_class, tuple(check)))
for model_class, model_constraints in constraints:
for constraint in model_constraints:
if not any(name in exclude for name in constraint.fields):
unique_checks.append((model_class, constraint.fields))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, "date", name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, "year", name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, "month", name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
# TODO: Handle multiple backends with different feature flags.
if lookup_value is None or (
lookup_value == ""
and connection.features.interprets_empty_strings_as_nulls
):
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(
self.unique_error_message(model_class, unique_check)
)
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == "date":
lookup_kwargs["%s__day" % unique_for] = date.day
lookup_kwargs["%s__month" % unique_for] = date.month
lookup_kwargs["%s__year" % unique_for] = date.year
else:
lookup_kwargs["%s__%s" % (unique_for, lookup_type)] = getattr(
date, lookup_type
)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages["unique_for_date"],
code="unique_for_date",
params={
"model": self,
"model_name": capfirst(opts.verbose_name),
"lookup_type": lookup_type,
"field": field_name,
"field_label": capfirst(field.verbose_name),
"date_field": unique_for,
"date_field_label": capfirst(opts.get_field(unique_for).verbose_name),
},
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
"model": self,
"model_class": model_class,
"model_name": capfirst(opts.verbose_name),
"unique_check": unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params["field_label"] = capfirst(field.verbose_name)
return ValidationError(
message=field.error_messages["unique"],
code="unique",
params=params,
)
# unique_together
else:
field_labels = [
capfirst(opts.get_field(f).verbose_name) for f in unique_check
]
params["field_labels"] = get_text_list(field_labels, _("and"))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code="unique_together",
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Call clean_fields(), clean(), and validate_unique() on the model.
Raise a ValidationError for any errors that occur.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors:
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Clean all fields and raise a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = [
*cls._check_swappable(),
*cls._check_model(),
*cls._check_managers(**kwargs),
]
if not cls._meta.swapped:
databases = kwargs.get("databases") or []
errors += [
*cls._check_fields(**kwargs),
*cls._check_m2m_through_same_relationship(),
*cls._check_long_column_names(databases),
]
clash_errors = (
*cls._check_id_field(),
*cls._check_field_name_clashes(),
*cls._check_model_name_db_lookup_clashes(),
*cls._check_property_name_related_field_accessor_clashes(),
*cls._check_single_primary_key(),
)
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors += [
*cls._check_index_together(),
*cls._check_unique_together(),
*cls._check_indexes(databases),
*cls._check_ordering(),
*cls._check_constraints(databases),
*cls._check_default_pk(),
]
return errors
@classmethod
def _check_default_pk(cls):
if (
not cls._meta.abstract
and cls._meta.pk.auto_created
and
# Inherited PKs are checked in parents models.
not (
isinstance(cls._meta.pk, OneToOneField)
and cls._meta.pk.remote_field.parent_link
)
and not settings.is_overridden("DEFAULT_AUTO_FIELD")
and cls._meta.app_config
and not cls._meta.app_config._is_default_auto_field_overridden
):
return [
checks.Warning(
f"Auto-created primary key used when not defining a "
f"primary key type, by default "
f"'{settings.DEFAULT_AUTO_FIELD}'.",
hint=(
f"Configure the DEFAULT_AUTO_FIELD setting or the "
f"{cls._meta.app_config.__class__.__qualname__}."
f"default_auto_field attribute to point to a subclass "
f"of AutoField, e.g. 'django.db.models.BigAutoField'."
),
obj=cls,
id="models.W042",
),
]
return []
@classmethod
def _check_swappable(cls):
"""Check if the swapped model exists."""
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'."
% cls._meta.swappable,
id="models.E001",
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split(".")
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract."
% (cls._meta.swappable, app_label, model_name),
id="models.E002",
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
id="models.E017",
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
"""Perform all manager checks."""
errors = []
for manager in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
"""Perform all field checks."""
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
"""Check if no relationship model is used by more than one m2m field."""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (
f.remote_field.model,
cls,
f.remote_field.through,
f.remote_field.through_fields,
)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two identical many-to-many relations "
"through the intermediate model '%s'."
% f.remote_field.through._meta.label,
obj=cls,
id="models.E003",
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
"""Check if `id` field is a primary key."""
fields = [
f for f in cls._meta.local_fields if f.name == "id" and f != cls._meta.pk
]
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == "id":
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
obj=cls,
id="models.E004",
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
"""Forbid field shadowing in multi-table inheritance."""
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'."
% (clash.name, clash.model._meta, f.name, f.model._meta),
obj=cls,
id="models.E005",
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents, including auto-generated fields like multi-table inheritance
# child accessors.
for parent in cls._meta.get_parent_list():
for f in parent._meta.get_fields():
if f not in used_fields:
used_fields[f.name] = f
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = (
f.name == "id" and clash and clash.name == "id" and clash.model == cls
)
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (f.name, clash.name, clash.model._meta),
obj=f,
id="models.E006",
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id="models.E007",
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_model_name_db_lookup_clashes(cls):
errors = []
model_name = cls.__name__
if model_name.startswith("_") or model_name.endswith("_"):
errors.append(
checks.Error(
"The model name '%s' cannot start or end with an underscore "
"as it collides with the query lookup syntax." % model_name,
obj=cls,
id="models.E023",
)
)
elif LOOKUP_SEP in model_name:
errors.append(
checks.Error(
"The model name '%s' cannot contain double underscores as "
"it collides with the query lookup syntax." % model_name,
obj=cls,
id="models.E024",
)
)
return errors
@classmethod
def _check_property_name_related_field_accessor_clashes(cls):
errors = []
property_names = cls._meta._property_names
related_field_accessors = (
f.get_attname()
for f in cls._meta._get_fields(reverse=False)
if f.is_relation and f.related_model is not None
)
for accessor in related_field_accessors:
if accessor in property_names:
errors.append(
checks.Error(
"The property '%s' clashes with a related field "
"accessor." % accessor,
obj=cls,
id="models.E025",
)
)
return errors
@classmethod
def _check_single_primary_key(cls):
errors = []
if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1:
errors.append(
checks.Error(
"The model cannot have more than one field with "
"'primary_key=True'.",
obj=cls,
id="models.E026",
)
)
return errors
@classmethod
def _check_index_together(cls):
"""Check the value of "index_together" option."""
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
obj=cls,
id="models.E008",
)
]
elif any(
not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together
):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
obj=cls,
id="models.E009",
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
"""Check the value of "unique_together" option."""
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
obj=cls,
id="models.E010",
)
]
elif any(
not isinstance(fields, (tuple, list))
for fields in cls._meta.unique_together
):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
obj=cls,
id="models.E011",
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_indexes(cls, databases):
"""Check fields, names, and conditions of indexes."""
errors = []
references = set()
for index in cls._meta.indexes:
# Index name can't start with an underscore or a number, restricted
# for cross-database compatibility with Oracle.
if index.name[0] == "_" or index.name[0].isdigit():
errors.append(
checks.Error(
"The index name '%s' cannot start with an underscore "
"or a number." % index.name,
obj=cls,
id="models.E033",
),
)
if len(index.name) > index.max_name_length:
errors.append(
checks.Error(
"The index name '%s' cannot be longer than %d "
"characters." % (index.name, index.max_name_length),
obj=cls,
id="models.E034",
),
)
if index.contains_expressions:
for expression in index.expressions:
references.update(
ref[0] for ref in cls._get_expr_references(expression)
)
for db in databases:
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
if not (
connection.features.supports_partial_indexes
or "supports_partial_indexes" in cls._meta.required_db_features
) and any(index.condition is not None for index in cls._meta.indexes):
errors.append(
checks.Warning(
"%s does not support indexes with conditions."
% connection.display_name,
hint=(
"Conditions will be ignored. Silence this warning "
"if you don't care about it."
),
obj=cls,
id="models.W037",
)
)
if not (
connection.features.supports_covering_indexes
or "supports_covering_indexes" in cls._meta.required_db_features
) and any(index.include for index in cls._meta.indexes):
errors.append(
checks.Warning(
"%s does not support indexes with non-key columns."
% connection.display_name,
hint=(
"Non-key columns will be ignored. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W040",
)
)
if not (
connection.features.supports_expression_indexes
or "supports_expression_indexes" in cls._meta.required_db_features
) and any(index.contains_expressions for index in cls._meta.indexes):
errors.append(
checks.Warning(
"%s does not support indexes on expressions."
% connection.display_name,
hint=(
"An index won't be created. Silence this warning "
"if you don't care about it."
),
obj=cls,
id="models.W043",
)
)
fields = [
field for index in cls._meta.indexes for field, _ in index.fields_orders
]
fields += [include for index in cls._meta.indexes for include in index.include]
fields += references
errors.extend(cls._check_local_fields(fields, "indexes"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {}
for field in cls._meta._get_fields(reverse=False):
forward_fields_map[field.name] = field
if hasattr(field, "attname"):
forward_fields_map[field.attname] = field
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the nonexistent field '%s'."
% (
option,
field_name,
),
obj=cls,
id="models.E012",
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'."
% (
option,
field_name,
option,
),
obj=cls,
id="models.E013",
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
"'%s' refers to field '%s' which is not local to model "
"'%s'." % (option, field_name, cls._meta.object_name),
hint="This issue may be caused by multi-table inheritance.",
obj=cls,
id="models.E016",
)
)
return errors
@classmethod
def _check_ordering(cls):
"""
Check "ordering" option -- is it a list of strings and do all fields
exist?
"""
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=cls,
id="models.E021",
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
"'ordering' must be a tuple or list (even if you want to order by "
"only one field).",
obj=cls,
id="models.E014",
)
]
errors = []
fields = cls._meta.ordering
# Skip expressions and '?' fields.
fields = (f for f in fields if isinstance(f, str) and f != "?")
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith("-") else f) for f in fields)
# Separate related fields and non-related fields.
_fields = []
related_fields = []
for f in fields:
if LOOKUP_SEP in f:
related_fields.append(f)
else:
_fields.append(f)
fields = _fields
# Check related fields.
for field in related_fields:
_cls = cls
fld = None
for part in field.split(LOOKUP_SEP):
try:
# pk is an alias that won't be found by opts.get_field.
if part == "pk":
fld = _cls._meta.pk
else:
fld = _cls._meta.get_field(part)
if fld.is_relation:
_cls = fld.path_infos[-1].to_opts.model
else:
_cls = None
except (FieldDoesNotExist, AttributeError):
if fld is None or (
fld.get_transform(part) is None and fld.get_lookup(part) is None
):
errors.append(
checks.Error(
"'ordering' refers to the nonexistent field, "
"related field, or lookup '%s'." % field,
obj=cls,
id="models.E015",
)
)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != "pk"}
# Check for invalid or nonexistent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(
chain.from_iterable(
(f.name, f.attname)
if not (f.auto_created and not f.concrete)
else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
)
)
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the nonexistent field, related "
"field, or lookup '%s'." % invalid_field,
obj=cls,
id="models.E015",
)
)
return errors
@classmethod
def _check_long_column_names(cls, databases):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
if not databases:
return []
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in databases:
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if (
f.db_column is None
and column_name is not None
and len(column_name) > allowed_len
):
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id="models.E018",
)
)
for f in cls._meta.local_many_to_many:
# Skip nonexistent models.
if isinstance(f.remote_field.through, str):
continue
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if (
m2m.db_column is None
and rel_name is not None
and len(rel_name) > allowed_len
):
errors.append(
checks.Error(
"Autogenerated column name too long for M2M field "
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=(
"Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."
),
obj=cls,
id="models.E019",
)
)
return errors
@classmethod
def _get_expr_references(cls, expr):
if isinstance(expr, Q):
for child in expr.children:
if isinstance(child, tuple):
lookup, value = child
yield tuple(lookup.split(LOOKUP_SEP))
yield from cls._get_expr_references(value)
else:
yield from cls._get_expr_references(child)
elif isinstance(expr, F):
yield tuple(expr.name.split(LOOKUP_SEP))
elif hasattr(expr, "get_source_expressions"):
for src_expr in expr.get_source_expressions():
yield from cls._get_expr_references(src_expr)
@classmethod
def _check_constraints(cls, databases):
errors = []
for db in databases:
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
if not (
connection.features.supports_table_check_constraints
or "supports_table_check_constraints" in cls._meta.required_db_features
) and any(
isinstance(constraint, CheckConstraint)
for constraint in cls._meta.constraints
):
errors.append(
checks.Warning(
"%s does not support check constraints."
% connection.display_name,
hint=(
"A constraint won't be created. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W027",
)
)
if not (
connection.features.supports_partial_indexes
or "supports_partial_indexes" in cls._meta.required_db_features
) and any(
isinstance(constraint, UniqueConstraint)
and constraint.condition is not None
for constraint in cls._meta.constraints
):
errors.append(
checks.Warning(
"%s does not support unique constraints with "
"conditions." % connection.display_name,
hint=(
"A constraint won't be created. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W036",
)
)
if not (
connection.features.supports_deferrable_unique_constraints
or "supports_deferrable_unique_constraints"
in cls._meta.required_db_features
) and any(
isinstance(constraint, UniqueConstraint)
and constraint.deferrable is not None
for constraint in cls._meta.constraints
):
errors.append(
checks.Warning(
"%s does not support deferrable unique constraints."
% connection.display_name,
hint=(
"A constraint won't be created. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W038",
)
)
if not (
connection.features.supports_covering_indexes
or "supports_covering_indexes" in cls._meta.required_db_features
) and any(
isinstance(constraint, UniqueConstraint) and constraint.include
for constraint in cls._meta.constraints
):
errors.append(
checks.Warning(
"%s does not support unique constraints with non-key "
"columns." % connection.display_name,
hint=(
"A constraint won't be created. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W039",
)
)
if not (
connection.features.supports_expression_indexes
or "supports_expression_indexes" in cls._meta.required_db_features
) and any(
isinstance(constraint, UniqueConstraint)
and constraint.contains_expressions
for constraint in cls._meta.constraints
):
errors.append(
checks.Warning(
"%s does not support unique constraints on "
"expressions." % connection.display_name,
hint=(
"A constraint won't be created. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W044",
)
)
fields = set(
chain.from_iterable(
(*constraint.fields, *constraint.include)
for constraint in cls._meta.constraints
if isinstance(constraint, UniqueConstraint)
)
)
references = set()
for constraint in cls._meta.constraints:
if isinstance(constraint, UniqueConstraint):
if (
connection.features.supports_partial_indexes
or "supports_partial_indexes"
not in cls._meta.required_db_features
) and isinstance(constraint.condition, Q):
references.update(
cls._get_expr_references(constraint.condition)
)
if (
connection.features.supports_expression_indexes
or "supports_expression_indexes"
not in cls._meta.required_db_features
) and constraint.contains_expressions:
for expression in constraint.expressions:
references.update(cls._get_expr_references(expression))
elif isinstance(constraint, CheckConstraint):
if (
connection.features.supports_table_check_constraints
or "supports_table_check_constraints"
not in cls._meta.required_db_features
) and isinstance(constraint.check, Q):
references.update(cls._get_expr_references(constraint.check))
for field_name, *lookups in references:
# pk is an alias that won't be found by opts.get_field.
if field_name != "pk":
fields.add(field_name)
if not lookups:
# If it has no lookups it cannot result in a JOIN.
continue
try:
if field_name == "pk":
field = cls._meta.pk
else:
field = cls._meta.get_field(field_name)
if not field.is_relation or field.many_to_many or field.one_to_many:
continue
except FieldDoesNotExist:
continue
# JOIN must happen at the first lookup.
first_lookup = lookups[0]
if (
hasattr(field, "get_transform")
and hasattr(field, "get_lookup")
and field.get_transform(first_lookup) is None
and field.get_lookup(first_lookup) is None
):
errors.append(
checks.Error(
"'constraints' refers to the joined field '%s'."
% LOOKUP_SEP.join([field_name] + lookups),
obj=cls,
id="models.E041",
)
)
errors.extend(cls._check_local_fields(fields, "constraints"))
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(self, ordered_obj, id_list, using=None):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update(
[ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list)],
["_order"],
)
def method_get_order(self, ordered_obj):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
pk_name = ordered_obj._meta.pk.name
return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True)
def make_foreign_order_accessors(model, related_model):
setattr(
related_model,
"get_%s_order" % model.__name__.lower(),
partialmethod(method_get_order, model),
)
setattr(
related_model,
"set_%s_order" % model.__name__.lower(),
partialmethod(method_set_order, model),
)
########
# MISC #
########
def model_unpickle(model_id):
"""Used to unpickle Model subclasses with deferred fields."""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
return model.__new__(model)
model_unpickle.__safe_for_unpickle__ = True
|
6d32dd33479de6413b4f9414b4b2f1da5be183f285c41cc3e48fff472ce7cf1a | import copy
import inspect
from importlib import import_module
from django.db import router
from django.db.models.query import QuerySet
class BaseManager:
# To retain order, track each time a Manager instance is created.
creation_counter = 0
# Set to True for the 'objects' managers that are automatically created.
auto_created = False
#: If set to True the manager will be serialized into migrations and will
#: thus be available in e.g. RunPython operations.
use_in_migrations = False
def __new__(cls, *args, **kwargs):
# Capture the arguments to make returning them trivial.
obj = super().__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def __init__(self):
super().__init__()
self._set_creation_counter()
self.model = None
self.name = None
self._db = None
self._hints = {}
def __str__(self):
"""Return "app_label.model_label.manager_name"."""
return "%s.%s" % (self.model._meta.label, self.name)
def __class_getitem__(cls, *args, **kwargs):
return cls
def deconstruct(self):
"""
Return a 5-tuple of the form (as_manager (True), manager_class,
queryset_class, args, kwargs).
Raise a ValueError if the manager is dynamically generated.
"""
qs_class = self._queryset_class
if getattr(self, "_built_with_as_manager", False):
# using MyQuerySet.as_manager()
return (
True, # as_manager
None, # manager_class
"%s.%s" % (qs_class.__module__, qs_class.__name__), # qs_class
None, # args
None, # kwargs
)
else:
module_name = self.__module__
name = self.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find manager %s in %s.\n"
"Please note that you need to inherit from managers you "
"dynamically generated with 'from_queryset()'."
% (name, module_name)
)
return (
False, # as_manager
"%s.%s" % (module_name, name), # manager_class
None, # qs_class
self._constructor_args[0], # args
self._constructor_args[1], # kwargs
)
def check(self, **kwargs):
return []
@classmethod
def _get_queryset_methods(cls, queryset_class):
def create_method(name, method):
def manager_method(self, *args, **kwargs):
return getattr(self.get_queryset(), name)(*args, **kwargs)
manager_method.__name__ = method.__name__
manager_method.__doc__ = method.__doc__
return manager_method
new_methods = {}
for name, method in inspect.getmembers(
queryset_class, predicate=inspect.isfunction
):
# Only copy missing methods.
if hasattr(cls, name):
continue
# Only copy public methods or methods with the attribute
# queryset_only=False.
queryset_only = getattr(method, "queryset_only", None)
if queryset_only or (queryset_only is None and name.startswith("_")):
continue
# Copy the method onto the manager.
new_methods[name] = create_method(name, method)
return new_methods
@classmethod
def from_queryset(cls, queryset_class, class_name=None):
if class_name is None:
class_name = "%sFrom%s" % (cls.__name__, queryset_class.__name__)
return type(
class_name,
(cls,),
{
"_queryset_class": queryset_class,
**cls._get_queryset_methods(queryset_class),
},
)
def contribute_to_class(self, cls, name):
self.name = self.name or name
self.model = cls
setattr(cls, name, ManagerDescriptor(self))
cls._meta.add_manager(self)
def _set_creation_counter(self):
"""
Set the creation counter value for this instance and increment the
class-level copy.
"""
self.creation_counter = BaseManager.creation_counter
BaseManager.creation_counter += 1
def db_manager(self, using=None, hints=None):
obj = copy.copy(self)
obj._db = using or self._db
obj._hints = hints or self._hints
return obj
@property
def db(self):
return self._db or router.db_for_read(self.model, **self._hints)
#######################
# PROXIES TO QUERYSET #
#######################
def get_queryset(self):
"""
Return a new QuerySet object. Subclasses can override this method to
customize the behavior of the Manager.
"""
return self._queryset_class(model=self.model, using=self._db, hints=self._hints)
def all(self):
# We can't proxy this method through the `QuerySet` like we do for the
# rest of the `QuerySet` methods. This is because `QuerySet.all()`
# works by creating a "copy" of the current queryset and in making said
# copy, all the cached `prefetch_related` lookups are lost. See the
# implementation of `RelatedManager.get_queryset()` for a better
# understanding of how this comes into play.
return self.get_queryset()
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self._constructor_args == other._constructor_args
)
def __hash__(self):
return id(self)
class Manager(BaseManager.from_queryset(QuerySet)):
pass
class ManagerDescriptor:
def __init__(self, manager):
self.manager = manager
def __get__(self, instance, cls=None):
if instance is not None:
raise AttributeError(
"Manager isn't accessible via %s instances" % cls.__name__
)
if cls._meta.abstract:
raise AttributeError(
"Manager isn't available; %s is abstract" % (cls._meta.object_name,)
)
if cls._meta.swapped:
raise AttributeError(
"Manager isn't available; '%s' has been swapped for '%s'"
% (
cls._meta.label,
cls._meta.swapped,
)
)
return cls._meta.managers_map[self.manager.name]
class EmptyManager(Manager):
def __init__(self, model):
super().__init__()
self.model = model
def get_queryset(self):
return super().get_queryset().none()
|
abc6f1a82f05bfccee5a02ae0bc01c3c5a2cc3c0501e6c85f5b2f322e7056a8d | import functools
from collections import namedtuple
def make_model_tuple(model):
"""
Take a model or a string of the form "app_label.ModelName" and return a
corresponding ("app_label", "modelname") tuple. If a tuple is passed in,
assume it's a valid model tuple already and return it unchanged.
"""
try:
if isinstance(model, tuple):
model_tuple = model
elif isinstance(model, str):
app_label, model_name = model.split(".")
model_tuple = app_label, model_name.lower()
else:
model_tuple = model._meta.app_label, model._meta.model_name
assert len(model_tuple) == 2
return model_tuple
except (ValueError, AssertionError):
raise ValueError(
"Invalid model reference '%s'. String model references "
"must be of the form 'app_label.ModelName'." % model
)
def resolve_callables(mapping):
"""
Generate key/value pairs for the given mapping where the values are
evaluated if they're callable.
"""
for k, v in mapping.items():
yield k, v() if callable(v) else v
def unpickle_named_row(names, values):
return create_namedtuple_class(*names)(*values)
@functools.lru_cache
def create_namedtuple_class(*names):
# Cache type() with @lru_cache since it's too slow to be called for every
# QuerySet evaluation.
def __reduce__(self):
return unpickle_named_row, (names, tuple(self))
return type(
"Row",
(namedtuple("Row", names),),
{"__reduce__": __reduce__, "__slots__": ()},
)
|
986ea1c555ae82bfe6d2e8135365c010c8aa96ed85278081b866b7e1d2c9bc44 | from functools import partial
from django.db.models.utils import make_model_tuple
from django.dispatch import Signal
class_prepared = Signal()
class ModelSignal(Signal):
"""
Signal subclass that allows the sender to be lazily specified as a string
of the `app_label.ModelName` form.
"""
def _lazy_method(self, method, apps, receiver, sender, **kwargs):
from django.db.models.options import Options
# This partial takes a single optional argument named "sender".
partial_method = partial(method, receiver, **kwargs)
if isinstance(sender, str):
apps = apps or Options.default_apps
apps.lazy_model_operation(partial_method, make_model_tuple(sender))
else:
return partial_method(sender)
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None, apps=None):
self._lazy_method(
super().connect,
apps,
receiver,
sender,
weak=weak,
dispatch_uid=dispatch_uid,
)
def disconnect(self, receiver=None, sender=None, dispatch_uid=None, apps=None):
return self._lazy_method(
super().disconnect, apps, receiver, sender, dispatch_uid=dispatch_uid
)
pre_init = ModelSignal(use_caching=True)
post_init = ModelSignal(use_caching=True)
pre_save = ModelSignal(use_caching=True)
post_save = ModelSignal(use_caching=True)
pre_delete = ModelSignal(use_caching=True)
post_delete = ModelSignal(use_caching=True)
m2m_changed = ModelSignal(use_caching=True)
pre_migrate = Signal()
post_migrate = Signal()
|
9d549dc822542f0cd84ba2ed66bd2f3a881bdc58e76e5db0fc1248b34bc226db | """
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
import copy
import functools
import inspect
from collections import namedtuple
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple(
"PathInfo",
"from_opts to_opts target_fields join_field m2m direct filtered_relation",
)
def subclasses(cls):
yield cls
for subclass in cls.__subclasses__():
yield from subclasses(subclass)
class Q(tree.Node):
"""
Encapsulate filters as objects that can then be combined logically (using
`&` and `|`).
"""
# Connection types
AND = "AND"
OR = "OR"
default = AND
conditional = True
def __init__(self, *args, _connector=None, _negated=False, **kwargs):
super().__init__(
children=[*args, *sorted(kwargs.items())],
connector=_connector,
negated=_negated,
)
def _combine(self, other, conn):
if not (isinstance(other, Q) or getattr(other, "conditional", False) is True):
raise TypeError(other)
if not self:
return other.copy() if hasattr(other, "copy") else copy.copy(other)
elif isinstance(other, Q) and not other:
_, args, kwargs = self.deconstruct()
return type(self)(*args, **kwargs)
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(
self,
reuse,
allow_joins=allow_joins,
split_subq=False,
check_filterable=False,
)
query.promote_joins(joins)
return clause
def deconstruct(self):
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.query_utils"):
path = path.replace("django.db.models.query_utils", "django.db.models")
args = tuple(self.children)
kwargs = {}
if self.connector != self.default:
kwargs["_connector"] = self.connector
if self.negated:
kwargs["_negated"] = True
return path, args, kwargs
class DeferredAttribute:
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, cls=None):
"""
Retrieve and caches the value from the datastore on the first lookup.
Return the cached value.
"""
if instance is None:
return self
data = instance.__dict__
field_name = self.field.attname
if field_name not in data:
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance)
if val is None:
instance.refresh_from_db(fields=[field_name])
else:
data[field_name] = val
return data[field_name]
def _check_parent_chain(self, instance):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
link_field = opts.get_ancestor_link(self.field.model)
if self.field.primary_key and self.field != link_field:
return getattr(instance, link_field.attname)
return None
class RegisterLookupMixin:
@classmethod
def _get_lookup(cls, lookup_name):
return cls.get_lookups().get(lookup_name, None)
@classmethod
@functools.lru_cache(maxsize=None)
def get_lookups(cls):
class_lookups = [
parent.__dict__.get("class_lookups", {}) for parent in inspect.getmro(cls)
]
return cls.merge_dicts(class_lookups)
def get_lookup(self, lookup_name):
from django.db.models.lookups import Lookup
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, "output_field"):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
from django.db.models.lookups import Transform
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, "output_field"):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@staticmethod
def merge_dicts(dicts):
"""
Merge dicts in reverse to preference the order of the original list. e.g.,
merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'.
"""
merged = {}
for d in reversed(dicts):
merged.update(d)
return merged
@classmethod
def _clear_cached_lookups(cls):
for subclass in subclasses(cls):
subclass.get_lookups.cache_clear()
@classmethod
def register_lookup(cls, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if "class_lookups" not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup_name] = lookup
cls._clear_cached_lookups()
return lookup
@classmethod
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name]
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Return True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.remote_field:
return False
if field.remote_field.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.attname not in load_fields:
if restricted and field.name in requested:
msg = (
"Field %s.%s cannot be both deferred and traversed using "
"select_related at the same time."
) % (field.model._meta.object_name, field.name)
raise FieldError(msg)
return True
def refs_expression(lookup_parts, annotations):
"""
Check if the lookup_parts contains references to the given annotations set.
Because the LOOKUP_SEP is contained in the default annotation names, check
each prefix of the lookup_parts for a match.
"""
for n in range(1, len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in annotations and annotations[level_n_lookup]:
return annotations[level_n_lookup], lookup_parts[n:]
return False, ()
def check_rel_lookup_compatibility(model, target_opts, field):
"""
Check that self.model is compatible with target_opts. Compatibility
is OK if:
1) model and opts match (where proxy inheritance is removed)
2) model is parent of opts' model or the other way around
"""
def check(opts):
return (
model._meta.concrete_model == opts.concrete_model
or opts.concrete_model in model._meta.get_parent_list()
or model in opts.get_parent_list()
)
# If the field is a primary key, then doing a query against the field's
# model is ok, too. Consider the case:
# class Restaurant(models.Model):
# place = OneToOneField(Place, primary_key=True):
# Restaurant.objects.filter(pk__in=Restaurant.objects.all()).
# If we didn't have the primary key check, then pk__in (== place__in) would
# give Place's opts as the target opts, but Restaurant isn't compatible
# with that. This logic applies only to primary keys, as when doing __in=qs,
# we are going to turn this into __in=qs.values('pk') later on.
return check(target_opts) or (
getattr(field, "primary_key", False) and check(field.model._meta)
)
class FilteredRelation:
"""Specify custom filtering in the ON clause of SQL joins."""
def __init__(self, relation_name, *, condition=Q()):
if not relation_name:
raise ValueError("relation_name cannot be empty.")
self.relation_name = relation_name
self.alias = None
if not isinstance(condition, Q):
raise ValueError("condition argument must be a Q() instance.")
self.condition = condition
self.path = []
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.relation_name == other.relation_name
and self.alias == other.alias
and self.condition == other.condition
)
def clone(self):
clone = FilteredRelation(self.relation_name, condition=self.condition)
clone.alias = self.alias
clone.path = self.path[:]
return clone
def resolve_expression(self, *args, **kwargs):
"""
QuerySet.annotate() only accepts expression-like arguments
(with a resolve_expression() method).
"""
raise NotImplementedError("FilteredRelation.resolve_expression() is unused.")
def as_sql(self, compiler, connection):
# Resolve the condition in Join.filtered_relation.
query = compiler.query
where = query.build_filtered_relation_q(self.condition, reuse=set(self.path))
return compiler.compile(where)
|
c9f84b8deb737e9285a9de69208b882cbdebda9983fa78512fee15ceb658438c | """
Constants used across the ORM in general.
"""
from enum import Enum
# Separator used to split filter strings apart.
LOOKUP_SEP = "__"
class OnConflict(Enum):
IGNORE = "ignore"
UPDATE = "update"
|
683e626f39fc2fc99d4a4183a0620167ceb38eb9551dc0cc03600eb736be6ccf | import copy
import datetime
import functools
import inspect
from decimal import Decimal
from uuid import UUID
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q
from django.utils.deconstruct import deconstructible
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class SQLiteNumericMixin:
"""
Some expressions with output_field=DecimalField() must be cast to
numeric to be properly filtered.
"""
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
try:
if self.output_field.get_internal_type() == "DecimalField":
sql = "CAST(%s AS NUMERIC)" % sql
except FieldError:
pass
return sql, params
class Combinable:
"""
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = "+"
SUB = "-"
MUL = "*"
DIV = "/"
POW = "^"
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = "%%"
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = "&"
BITOR = "|"
BITLEFTSHIFT = "<<"
BITRIGHTSHIFT = ">>"
BITXOR = "#"
def _combine(self, other, connector, reversed):
if not hasattr(other, "resolve_expression"):
# everything must be resolvable to an expression
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __neg__(self):
return self._combine(-1, self.MUL, False)
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) & Q(other)
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def bitxor(self, other):
return self._combine(other, self.BITXOR, False)
def __or__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) | Q(other)
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression:
"""Base class for all query expressions."""
empty_result_set_value = NotImplemented
# aggregate specific fields
is_summary = False
_output_field_resolved_to_none = False
# Can the expression be used in a WHERE clause?
filterable = True
# Can the expression can be used as a source expression in Window?
window_compatible = False
def __init__(self, output_field=None):
if output_field is not None:
self.output_field = output_field
def __getstate__(self):
state = self.__dict__.copy()
state.pop("convert_value", None)
return state
def get_db_converters(self, connection):
return (
[]
if self.convert_value is self._convert_value_noop
else [self.convert_value]
) + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert not exprs
def _parse_expressions(self, *expressions):
return [
arg
if hasattr(arg, "resolve_expression")
else (F(arg) if isinstance(arg, str) else Value(arg))
for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
return any(
expr and expr.contains_aggregate for expr in self.get_source_expressions()
)
@cached_property
def contains_over_clause(self):
return any(
expr and expr.contains_over_clause for expr in self.get_source_expressions()
)
@cached_property
def contains_column_references(self):
return any(
expr and expr.contains_column_references
for expr in self.get_source_expressions()
)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
"""
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions(
[
expr.resolve_expression(query, allow_joins, reuse, summarize)
if expr
else None
for expr in c.get_source_expressions()
]
)
return c
@property
def conditional(self):
return isinstance(self.output_field, fields.BooleanField)
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""Return the output type of this expressions."""
output_field = self._resolve_output_field()
if output_field is None:
self._output_field_resolved_to_none = True
raise FieldError("Cannot resolve expression type, unknown output_field")
return output_field
@cached_property
def _output_field_or_none(self):
"""
Return the output field of this expression, or None if
_resolve_output_field() didn't return an output type.
"""
try:
return self.output_field
except FieldError:
if not self._output_field_resolved_to_none:
raise
def _resolve_output_field(self):
"""
Attempt to infer the output type of the expression. If the output
fields of all source fields match then, simply infer the same type
here. This isn't always correct, but it makes sense most of the time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source's output field resolves to None, exclude it from this check.
If all sources are None, then an error is raised higher up the stack in
the output_field property.
"""
sources_iter = (
source for source in self.get_source_fields() if source is not None
)
for output_field in sources_iter:
for source in sources_iter:
if not isinstance(output_field, source.__class__):
raise FieldError(
"Expression contains mixed types: %s, %s. You must "
"set output_field."
% (
output_field.__class__.__name__,
source.__class__.__name__,
)
)
return output_field
@staticmethod
def _convert_value_noop(value, expression, connection):
return value
@cached_property
def convert_value(self):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if internal_type == "FloatField":
return (
lambda value, expression, connection: None
if value is None
else float(value)
)
elif internal_type.endswith("IntegerField"):
return (
lambda value, expression, connection: None
if value is None
else int(value)
)
elif internal_type == "DecimalField":
return (
lambda value, expression, connection: None
if value is None
else Decimal(value)
)
return self._convert_value_noop
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[
e.relabeled_clone(change_map) if e is not None else None
for e in self.get_source_expressions()
]
)
return clone
def copy(self):
return copy.copy(self)
def get_group_by_cols(self, alias=None):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
if hasattr(expr, "flatten"):
yield from expr.flatten()
else:
yield expr
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, EXISTS expressions need
to be wrapped in CASE WHEN on Oracle.
"""
if hasattr(self.output_field, "select_format"):
return self.output_field.select_format(compiler, sql, params)
return sql, params
@deconstructible
class Expression(BaseExpression, Combinable):
"""An expression that can be combined with other expressions."""
@cached_property
def identity(self):
constructor_signature = inspect.signature(self.__init__)
args, kwargs = self._constructor_args
signature = constructor_signature.bind_partial(*args, **kwargs)
signature.apply_defaults()
arguments = signature.arguments.items()
identity = [self.__class__]
for arg, value in arguments:
if isinstance(value, fields.Field):
if value.name and value.model:
value = (value.model._meta.label, value.name)
else:
value = type(value)
else:
value = make_hashable(value)
identity.append((arg, value))
return tuple(identity)
def __eq__(self, other):
if not isinstance(other, Expression):
return NotImplemented
return other.identity == self.identity
def __hash__(self):
return hash(self.identity)
_connector_combinators = {
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
(fields.IntegerField, fields.DecimalField, fields.DecimalField),
(fields.DecimalField, fields.IntegerField, fields.DecimalField),
(fields.IntegerField, fields.FloatField, fields.FloatField),
(fields.FloatField, fields.IntegerField, fields.FloatField),
]
for connector in (Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV)
}
@functools.lru_cache(maxsize=128)
def _resolve_combined_type(connector, lhs_type, rhs_type):
combinators = _connector_combinators.get(connector, ())
for combinator_lhs_type, combinator_rhs_type, combined_type in combinators:
if issubclass(lhs_type, combinator_lhs_type) and issubclass(
rhs_type, combinator_rhs_type
):
return combined_type
class CombinedExpression(SQLiteNumericMixin, Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def _resolve_output_field(self):
try:
return super()._resolve_output_field()
except FieldError:
combined_type = _resolve_combined_type(
self.connector,
type(self.lhs.output_field),
type(self.rhs.output_field),
)
if combined_type is None:
raise
return combined_type()
def as_sql(self, compiler, connection):
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
lhs = self.lhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
rhs = self.rhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
if not isinstance(self, (DurationExpression, TemporalSubtraction)):
try:
lhs_type = lhs.output_field.get_internal_type()
except (AttributeError, FieldError):
lhs_type = None
try:
rhs_type = rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
rhs_type = None
if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type:
return DurationExpression(
self.lhs, self.connector, self.rhs
).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
datetime_fields = {"DateField", "DateTimeField", "TimeField"}
if (
self.connector == self.SUB
and lhs_type in datetime_fields
and lhs_type == rhs_type
):
return TemporalSubtraction(self.lhs, self.rhs).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
c = self.copy()
c.is_summary = summarize
c.lhs = lhs
c.rhs = rhs
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == "DurationField":
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
if self.connector in {Combinable.MUL, Combinable.DIV}:
try:
lhs_type = self.lhs.output_field.get_internal_type()
rhs_type = self.rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
pass
else:
allowed_fields = {
"DecimalField",
"DurationField",
"FloatField",
"IntegerField",
}
if lhs_type not in allowed_fields or rhs_type not in allowed_fields:
raise DatabaseError(
f"Invalid arguments for operator {self.connector}."
)
return sql, params
class TemporalSubtraction(CombinedExpression):
output_field = fields.DurationField()
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs)
rhs = compiler.compile(self.rhs)
return connection.ops.subtract_temporals(
self.lhs.output_field.get_internal_type(), lhs, rhs
)
@deconstructible(path="django.db.models.F")
class F(Combinable):
"""An object capable of resolving references to existing query objects."""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
contains_aggregate = False
def as_sql(self, *args, **kwargs):
raise ValueError(
"This queryset contains a reference to an outer query and may "
"only be used in a subquery."
)
def resolve_expression(self, *args, **kwargs):
col = super().resolve_expression(*args, **kwargs)
# FIXME: Rename possibly_multivalued to multivalued and fix detection
# for non-multivalued JOINs (e.g. foreign key fields). This should take
# into account only many-to-many and one-to-many relationships.
col.possibly_multivalued = LOOKUP_SEP in self.name
return col
def relabeled_clone(self, relabels):
return self
def get_group_by_cols(self, alias=None):
return []
class OuterRef(F):
contains_aggregate = False
def resolve_expression(self, *args, **kwargs):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def relabeled_clone(self, relabels):
return self
@deconstructible(path="django.db.models.Func")
class Func(SQLiteNumericMixin, Expression):
"""An SQL function call."""
function = None
template = "%(function)s(%(expressions)s)"
arg_joiner = ", "
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)"
% (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = {**self.extra, **self._get_repr_options()}
if extra:
extra = ", ".join(
str(key) + "=" + str(val) for key, val in sorted(extra.items())
)
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def _get_repr_options(self):
"""Return a dict of extra __init__() options to include in the repr."""
return {}
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(
self,
compiler,
connection,
function=None,
template=None,
arg_joiner=None,
**extra_context,
):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
try:
arg_sql, arg_params = compiler.compile(arg)
except EmptyResultSet:
empty_result_set_value = getattr(
arg, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
raise
arg_sql, arg_params = compiler.compile(Value(empty_result_set_value))
sql_parts.append(arg_sql)
params.extend(arg_params)
data = {**self.extra, **extra_context}
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data["function"] = function
else:
data.setdefault("function", self.function)
template = template or data.get("template", self.template)
arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner)
data["expressions"] = data["field"] = arg_joiner.join(sql_parts)
return template % data, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
@deconstructible(path="django.db.models.Value")
class Value(SQLiteNumericMixin, Expression):
"""Represent a wrapped value as a node within an expression."""
# Provide a default value for `for_save` in order to allow unresolved
# instances to be compiled until a decision is taken in #25425.
for_save = False
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return f"{self.__class__.__name__}({self.value!r})"
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
output_field = self._output_field_or_none
if output_field is not None:
if self.for_save:
val = output_field.get_db_prep_save(val, connection=connection)
else:
val = output_field.get_db_prep_value(val, connection=connection)
if hasattr(output_field, "get_placeholder"):
return output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return "NULL", []
return "%s", [val]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self, alias=None):
return []
def _resolve_output_field(self):
if isinstance(self.value, str):
return fields.CharField()
if isinstance(self.value, bool):
return fields.BooleanField()
if isinstance(self.value, int):
return fields.IntegerField()
if isinstance(self.value, float):
return fields.FloatField()
if isinstance(self.value, datetime.datetime):
return fields.DateTimeField()
if isinstance(self.value, datetime.date):
return fields.DateField()
if isinstance(self.value, datetime.time):
return fields.TimeField()
if isinstance(self.value, datetime.timedelta):
return fields.DurationField()
if isinstance(self.value, Decimal):
return fields.DecimalField()
if isinstance(self.value, bytes):
return fields.BinaryField()
if isinstance(self.value, UUID):
return fields.UUIDField()
@property
def empty_result_set_value(self):
return self.value
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return "(%s)" % self.sql, self.params
def get_group_by_cols(self, alias=None):
return [self]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# Resolve parents fields used in raw SQL.
for parent in query.model._meta.get_parent_list():
for parent_field in parent._meta.local_fields:
_, column_name = parent_field.get_attname_column()
if column_name.lower() in self.sql.lower():
query.resolve_ref(parent_field.name, allow_joins, reuse, summarize)
break
return super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return "*", []
class Col(Expression):
contains_column_references = True
possibly_multivalued = False
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
alias, target = self.alias, self.target
identifiers = (alias, str(target)) if alias else (str(target),)
return "{}({})".format(self.__class__.__name__, ", ".join(identifiers))
def as_sql(self, compiler, connection):
alias, column = self.alias, self.target.column
identifiers = (alias, column) if alias else (column,)
sql = ".".join(map(compiler.quote_name_unless_alias, identifiers))
return sql, []
def relabeled_clone(self, relabels):
if self.alias is None:
return self
return self.__class__(
relabels.get(self.alias, self.alias), self.target, self.output_field
)
def get_group_by_cols(self, alias=None):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return self.output_field.get_db_converters(
connection
) + self.target.get_db_converters(connection)
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
(self.source,) = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return connection.ops.quote_name(self.refs), []
def get_group_by_cols(self, alias=None):
return [self]
class ExpressionList(Func):
"""
An expression containing multiple expressions. Can be used to provide a
list of expressions as an argument to another expression, like a partition
clause.
"""
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if not expressions:
raise ValueError(
"%s requires at least one expression." % self.__class__.__name__
)
super().__init__(*expressions, **extra)
def __str__(self):
return self.arg_joiner.join(str(arg) for arg in self.source_expressions)
def as_sqlite(self, compiler, connection, **extra_context):
# Casting to numeric is unnecessary.
return self.as_sql(compiler, connection, **extra_context)
class OrderByList(Func):
template = "ORDER BY %(expressions)s"
def __init__(self, *expressions, **extra):
expressions = (
(
OrderBy(F(expr[1:]), descending=True)
if isinstance(expr, str) and expr[0] == "-"
else expr
)
for expr in expressions
)
super().__init__(*expressions, **extra)
def as_sql(self, *args, **kwargs):
if not self.source_expressions:
return "", ()
return super().as_sql(*args, **kwargs)
@deconstructible(path="django.db.models.ExpressionWrapper")
class ExpressionWrapper(SQLiteNumericMixin, Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def get_group_by_cols(self, alias=None):
if isinstance(self.expression, Expression):
expression = self.expression.copy()
expression.output_field = self.output_field
return expression.get_group_by_cols(alias=alias)
# For non-expressions e.g. an SQL WHERE clause, the entire
# `expression` must be included in the GROUP BY clause.
return super().get_group_by_cols()
def as_sql(self, compiler, connection):
return compiler.compile(self.expression)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
@deconstructible(path="django.db.models.When")
class When(Expression):
template = "WHEN %(condition)s THEN %(result)s"
# This isn't a complete conditional expression, must be used in Case().
conditional = False
def __init__(self, condition=None, then=None, **lookups):
if lookups:
if condition is None:
condition, lookups = Q(**lookups), None
elif getattr(condition, "conditional", False):
condition, lookups = Q(condition, **lookups), None
if condition is None or not getattr(condition, "conditional", False) or lookups:
raise TypeError(
"When() supports a Q object, a boolean expression, or lookups "
"as a condition."
)
if isinstance(condition, Q) and not condition:
raise ValueError("An empty Q() can't be used as a When() condition.")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, "resolve_expression"):
c.condition = c.condition.resolve_expression(
query, allow_joins, reuse, summarize, False
)
c.result = c.result.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params["condition"] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params["result"] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self, alias=None):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
@deconstructible(path="django.db.models.Case")
class Case(SQLiteNumericMixin, Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = "CASE %(cases)s ELSE %(default)s END"
case_joiner = " "
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (
", ".join(str(c) for c in self.cases),
self.default,
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
*self.cases, self.default = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
c.default = c.default.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(
self, compiler, connection, template=None, case_joiner=None, **extra_context
):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = {**self.extra, **extra_context}
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params["cases"] = case_joiner.join(case_parts)
template_params["default"] = default_sql
sql_params.extend(default_params)
template = template or template_params.get("template", self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def get_group_by_cols(self, alias=None):
if not self.cases:
return self.default.get_group_by_cols(alias)
return super().get_group_by_cols(alias)
class Subquery(BaseExpression, Combinable):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = "(%(subquery)s)"
contains_aggregate = False
empty_result_set_value = None
def __init__(self, queryset, output_field=None, **extra):
# Allow the usage of both QuerySet and sql.Query objects.
self.query = getattr(queryset, "query", queryset).clone()
self.query.subquery = True
self.extra = extra
super().__init__(output_field)
def get_source_expressions(self):
return [self.query]
def set_source_expressions(self, exprs):
self.query = exprs[0]
def _resolve_output_field(self):
return self.query.output_field
def copy(self):
clone = super().copy()
clone.query = clone.query.clone()
return clone
@property
def external_aliases(self):
return self.query.external_aliases
def get_external_cols(self):
return self.query.get_external_cols()
def as_sql(self, compiler, connection, template=None, query=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
query = query or self.query
subquery_sql, sql_params = query.as_sql(compiler, connection)
template_params["subquery"] = subquery_sql[1:-1]
template = template or template_params.get("template", self.template)
sql = template % template_params
return sql, sql_params
def get_group_by_cols(self, alias=None):
# If this expression is referenced by an alias for an explicit GROUP BY
# through values() a reference to this expression and not the
# underlying .query must be returned to ensure external column
# references are not grouped against as well.
if alias:
return [Ref(alias, self)]
return self.query.get_group_by_cols()
class Exists(Subquery):
template = "EXISTS(%(subquery)s)"
output_field = fields.BooleanField()
def __init__(self, queryset, negated=False, **kwargs):
self.negated = negated
super().__init__(queryset, **kwargs)
def __invert__(self):
clone = self.copy()
clone.negated = not self.negated
return clone
def as_sql(self, compiler, connection, template=None, **extra_context):
query = self.query.exists(using=connection.alias)
try:
sql, params = super().as_sql(
compiler,
connection,
template=template,
query=query,
**extra_context,
)
except EmptyResultSet:
if self.negated:
features = compiler.connection.features
if not features.supports_boolean_expr_in_select_clause:
return "1=1", ()
return compiler.compile(Value(True))
raise
if self.negated:
sql = "NOT {}".format(sql)
return sql, params
def select_format(self, compiler, sql, params):
# Wrap EXISTS() with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql)
return sql, params
@deconstructible(path="django.db.models.OrderBy")
class OrderBy(Expression):
template = "%(expression)s %(ordering)s"
conditional = False
def __init__(
self, expression, descending=False, nulls_first=False, nulls_last=False
):
if nulls_first and nulls_last:
raise ValueError("nulls_first and nulls_last are mutually exclusive")
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, "resolve_expression"):
raise ValueError("expression must be an expression type")
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending
)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
template = template or self.template
if connection.features.supports_order_by_nulls_modifier:
if self.nulls_last:
template = "%s NULLS LAST" % template
elif self.nulls_first:
template = "%s NULLS FIRST" % template
else:
if self.nulls_last and not (
self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NULL, %s" % template
elif self.nulls_first and not (
not self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NOT NULL, %s" % template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
"expression": expression_sql,
"ordering": "DESC" if self.descending else "ASC",
**extra_context,
}
params *= template.count("%(expression)s")
return (template % placeholders).rstrip(), params
def as_oracle(self, compiler, connection):
# Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped
# in a CASE WHEN.
if connection.ops.conditional_expression_supported_in_where_clause(
self.expression
):
copy = self.copy()
copy.expression = Case(
When(self.expression, then=True),
default=False,
)
return copy.as_sql(compiler, connection)
return self.as_sql(compiler, connection)
def get_group_by_cols(self, alias=None):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
if self.nulls_first or self.nulls_last:
self.nulls_first = not self.nulls_first
self.nulls_last = not self.nulls_last
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
class Window(SQLiteNumericMixin, Expression):
template = "%(expression)s OVER (%(window)s)"
# Although the main expression may either be an aggregate or an
# expression with an aggregate function, the GROUP BY that will
# be introduced in the query as a result is not desired.
contains_aggregate = False
contains_over_clause = True
filterable = False
def __init__(
self,
expression,
partition_by=None,
order_by=None,
frame=None,
output_field=None,
):
self.partition_by = partition_by
self.order_by = order_by
self.frame = frame
if not getattr(expression, "window_compatible", False):
raise ValueError(
"Expression '%s' isn't compatible with OVER clauses."
% expression.__class__.__name__
)
if self.partition_by is not None:
if not isinstance(self.partition_by, (tuple, list)):
self.partition_by = (self.partition_by,)
self.partition_by = ExpressionList(*self.partition_by)
if self.order_by is not None:
if isinstance(self.order_by, (list, tuple)):
self.order_by = OrderByList(*self.order_by)
elif isinstance(self.order_by, (BaseExpression, str)):
self.order_by = OrderByList(self.order_by)
else:
raise ValueError(
"Window.order_by must be either a string reference to a "
"field, an expression, or a list or tuple of them."
)
super().__init__(output_field=output_field)
self.source_expression = self._parse_expressions(expression)[0]
def _resolve_output_field(self):
return self.source_expression.output_field
def get_source_expressions(self):
return [self.source_expression, self.partition_by, self.order_by, self.frame]
def set_source_expressions(self, exprs):
self.source_expression, self.partition_by, self.order_by, self.frame = exprs
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
if not connection.features.supports_over_clause:
raise NotSupportedError("This backend does not support window expressions.")
expr_sql, params = compiler.compile(self.source_expression)
window_sql, window_params = [], []
if self.partition_by is not None:
sql_expr, sql_params = self.partition_by.as_sql(
compiler=compiler,
connection=connection,
template="PARTITION BY %(expressions)s",
)
window_sql.append(sql_expr)
window_params.extend(sql_params)
if self.order_by is not None:
order_sql, order_params = compiler.compile(self.order_by)
window_sql.append(order_sql)
window_params.extend(order_params)
if self.frame:
frame_sql, frame_params = compiler.compile(self.frame)
window_sql.append(frame_sql)
window_params.extend(frame_params)
params.extend(window_params)
template = template or self.template
return (
template % {"expression": expr_sql, "window": " ".join(window_sql).strip()},
params,
)
def as_sqlite(self, compiler, connection):
if isinstance(self.output_field, fields.DecimalField):
# Casting to numeric must be outside of the window expression.
copy = self.copy()
source_expressions = copy.get_source_expressions()
source_expressions[0].output_field = fields.FloatField()
copy.set_source_expressions(source_expressions)
return super(Window, copy).as_sqlite(compiler, connection)
return self.as_sql(compiler, connection)
def __str__(self):
return "{} OVER ({}{}{})".format(
str(self.source_expression),
"PARTITION BY " + str(self.partition_by) if self.partition_by else "",
str(self.order_by or ""),
str(self.frame or ""),
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
return []
class WindowFrame(Expression):
"""
Model the frame clause in window expressions. There are two types of frame
clauses which are subclasses, however, all processing and validation (by no
means intended to be complete) is done here. Thus, providing an end for a
frame is optional (the default is UNBOUNDED FOLLOWING, which is the last
row in the frame).
"""
template = "%(frame_type)s BETWEEN %(start)s AND %(end)s"
def __init__(self, start=None, end=None):
self.start = Value(start)
self.end = Value(end)
def set_source_expressions(self, exprs):
self.start, self.end = exprs
def get_source_expressions(self):
return [self.start, self.end]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
start, end = self.window_frame_start_end(
connection, self.start.value, self.end.value
)
return (
self.template
% {
"frame_type": self.frame_type,
"start": start,
"end": end,
},
[],
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
return []
def __str__(self):
if self.start.value is not None and self.start.value < 0:
start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING)
elif self.start.value is not None and self.start.value == 0:
start = connection.ops.CURRENT_ROW
else:
start = connection.ops.UNBOUNDED_PRECEDING
if self.end.value is not None and self.end.value > 0:
end = "%d %s" % (self.end.value, connection.ops.FOLLOWING)
elif self.end.value is not None and self.end.value == 0:
end = connection.ops.CURRENT_ROW
else:
end = connection.ops.UNBOUNDED_FOLLOWING
return self.template % {
"frame_type": self.frame_type,
"start": start,
"end": end,
}
def window_frame_start_end(self, connection, start, end):
raise NotImplementedError("Subclasses must implement window_frame_start_end().")
class RowRange(WindowFrame):
frame_type = "ROWS"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_rows_start_end(start, end)
class ValueRange(WindowFrame):
frame_type = "RANGE"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_range_start_end(start, end)
|
6a3d2100a20878c2eeb1a2c6f35b81bbaa729dc52a3ee2e60af6423304439979 | from collections import Counter, defaultdict
from functools import partial
from itertools import chain
from operator import attrgetter
from django.db import IntegrityError, connections, transaction
from django.db.models import query_utils, signals, sql
class ProtectedError(IntegrityError):
def __init__(self, msg, protected_objects):
self.protected_objects = protected_objects
super().__init__(msg, protected_objects)
class RestrictedError(IntegrityError):
def __init__(self, msg, restricted_objects):
self.restricted_objects = restricted_objects
super().__init__(msg, restricted_objects)
def CASCADE(collector, field, sub_objs, using):
collector.collect(
sub_objs,
source=field.remote_field.model,
source_attr=field.name,
nullable=field.null,
fail_on_restricted=False,
)
if field.null and not connections[using].features.can_defer_constraint_checks:
collector.add_field_update(field, None, sub_objs)
def PROTECT(collector, field, sub_objs, using):
raise ProtectedError(
"Cannot delete some instances of model '%s' because they are "
"referenced through a protected foreign key: '%s.%s'"
% (
field.remote_field.model.__name__,
sub_objs[0].__class__.__name__,
field.name,
),
sub_objs,
)
def RESTRICT(collector, field, sub_objs, using):
collector.add_restricted_objects(field, sub_objs)
collector.add_dependency(field.remote_field.model, field.model)
def SET(value):
if callable(value):
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value(), sub_objs)
else:
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value, sub_objs)
set_on_delete.deconstruct = lambda: ("django.db.models.SET", (value,), {})
return set_on_delete
def SET_NULL(collector, field, sub_objs, using):
collector.add_field_update(field, None, sub_objs)
def SET_DEFAULT(collector, field, sub_objs, using):
collector.add_field_update(field, field.get_default(), sub_objs)
def DO_NOTHING(collector, field, sub_objs, using):
pass
def get_candidate_relations_to_delete(opts):
# The candidate relations are the ones that come from N-1 and 1-1 relations.
# N-N (i.e., many-to-many) relations aren't candidates for deletion.
return (
f
for f in opts.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many)
)
class Collector:
def __init__(self, using, origin=None):
self.using = using
# A Model or QuerySet object.
self.origin = origin
# Initially, {model: {instances}}, later values become lists.
self.data = defaultdict(set)
# {model: {(field, value): {instances}}}
self.field_updates = defaultdict(partial(defaultdict, set))
# {model: {field: {instances}}}
self.restricted_objects = defaultdict(partial(defaultdict, set))
# fast_deletes is a list of queryset-likes that can be deleted without
# fetching the objects into memory.
self.fast_deletes = []
# Tracks deletion-order dependency for databases without transactions
# or ability to defer constraint checks. Only concrete model classes
# should be included, as the dependencies exist only between actual
# database tables; proxy models are represented here by their concrete
# parent.
self.dependencies = defaultdict(set) # {model: {models}}
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
"""
Add 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it,
and 'nullable' should be set to True if the relation can be null.
Return a list of all objects that were not already collected.
"""
if not objs:
return []
new_objs = []
model = objs[0].__class__
instances = self.data[model]
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.update(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if source is not None and not nullable:
self.add_dependency(source, model, reverse_dependency=reverse_dependency)
return new_objs
def add_dependency(self, model, dependency, reverse_dependency=False):
if reverse_dependency:
model, dependency = dependency, model
self.dependencies[model._meta.concrete_model].add(
dependency._meta.concrete_model
)
self.data.setdefault(dependency, self.data.default_factory())
def add_field_update(self, field, value, objs):
"""
Schedule a field update. 'objs' must be a homogeneous iterable
collection of model instances (e.g. a QuerySet).
"""
if not objs:
return
model = objs[0].__class__
self.field_updates[model][field, value].update(objs)
def add_restricted_objects(self, field, objs):
if objs:
model = objs[0].__class__
self.restricted_objects[model][field].update(objs)
def clear_restricted_objects_from_set(self, model, objs):
if model in self.restricted_objects:
self.restricted_objects[model] = {
field: items - objs
for field, items in self.restricted_objects[model].items()
}
def clear_restricted_objects_from_queryset(self, model, qs):
if model in self.restricted_objects:
objs = set(
qs.filter(
pk__in=[
obj.pk
for objs in self.restricted_objects[model].values()
for obj in objs
]
)
)
self.clear_restricted_objects_from_set(model, objs)
def _has_signal_listeners(self, model):
return signals.pre_delete.has_listeners(
model
) or signals.post_delete.has_listeners(model)
def can_fast_delete(self, objs, from_field=None):
"""
Determine if the objects in the given queryset-like or single object
can be fast-deleted. This can be done if there are no cascades, no
parents and no signal listeners for the object class.
The 'from_field' tells where we are coming from - we need this to
determine if the objects are in fact to be deleted. Allow also
skipping parent -> child -> parent chain preventing fast delete of
the child.
"""
if from_field and from_field.remote_field.on_delete is not CASCADE:
return False
if hasattr(objs, "_meta"):
model = objs._meta.model
elif hasattr(objs, "model") and hasattr(objs, "_raw_delete"):
model = objs.model
else:
return False
if self._has_signal_listeners(model):
return False
# The use of from_field comes from the need to avoid cascade back to
# parent when parent delete is cascading to child.
opts = model._meta
return (
all(
link == from_field
for link in opts.concrete_model._meta.parents.values()
)
and
# Foreign keys pointing to this model.
all(
related.field.remote_field.on_delete is DO_NOTHING
for related in get_candidate_relations_to_delete(opts)
)
and (
# Something like generic foreign key.
not any(
hasattr(field, "bulk_related_objects")
for field in opts.private_fields
)
)
)
def get_del_batches(self, objs, fields):
"""
Return the objs in suitably sized batches for the used connection.
"""
field_names = [field.name for field in fields]
conn_batch_size = max(
connections[self.using].ops.bulk_batch_size(field_names, objs), 1
)
if len(objs) > conn_batch_size:
return [
objs[i : i + conn_batch_size]
for i in range(0, len(objs), conn_batch_size)
]
else:
return [objs]
def collect(
self,
objs,
source=None,
nullable=False,
collect_related=True,
source_attr=None,
reverse_dependency=False,
keep_parents=False,
fail_on_restricted=True,
):
"""
Add 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogeneous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
If the call is the result of a cascade, 'source' should be the model
that caused it and 'nullable' should be set to True, if the relation
can be null.
If 'reverse_dependency' is True, 'source' will be deleted before the
current model, rather than after. (Needed for cascading to parent
models, the one case in which the cascade follows the forwards
direction of an FK rather than the reverse direction.)
If 'keep_parents' is True, data of parent model's will be not deleted.
If 'fail_on_restricted' is False, error won't be raised even if it's
prohibited to delete such objects due to RESTRICT, that defers
restricted object checking in recursive calls where the top-level call
may need to collect more objects to determine whether restricted ones
can be deleted.
"""
if self.can_fast_delete(objs):
self.fast_deletes.append(objs)
return
new_objs = self.add(
objs, source, nullable, reverse_dependency=reverse_dependency
)
if not new_objs:
return
model = new_objs[0].__class__
if not keep_parents:
# Recursively collect concrete model's parent models, but not their
# related objects. These will be found by meta.get_fields()
concrete_model = model._meta.concrete_model
for ptr in concrete_model._meta.parents.values():
if ptr:
parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
self.collect(
parent_objs,
source=model,
source_attr=ptr.remote_field.related_name,
collect_related=False,
reverse_dependency=True,
fail_on_restricted=False,
)
if not collect_related:
return
if keep_parents:
parents = set(model._meta.get_parent_list())
model_fast_deletes = defaultdict(list)
protected_objects = defaultdict(list)
for related in get_candidate_relations_to_delete(model._meta):
# Preserve parent reverse relationships if keep_parents=True.
if keep_parents and related.model in parents:
continue
field = related.field
if field.remote_field.on_delete == DO_NOTHING:
continue
related_model = related.related_model
if self.can_fast_delete(related_model, from_field=field):
model_fast_deletes[related_model].append(field)
continue
batches = self.get_del_batches(new_objs, [field])
for batch in batches:
sub_objs = self.related_objects(related_model, [field], batch)
# Non-referenced fields can be deferred if no signal receivers
# are connected for the related model as they'll never be
# exposed to the user. Skip field deferring when some
# relationships are select_related as interactions between both
# features are hard to get right. This should only happen in
# the rare cases where .related_objects is overridden anyway.
if not (
sub_objs.query.select_related
or self._has_signal_listeners(related_model)
):
referenced_fields = set(
chain.from_iterable(
(rf.attname for rf in rel.field.foreign_related_fields)
for rel in get_candidate_relations_to_delete(
related_model._meta
)
)
)
sub_objs = sub_objs.only(*tuple(referenced_fields))
if sub_objs:
try:
field.remote_field.on_delete(self, field, sub_objs, self.using)
except ProtectedError as error:
key = "'%s.%s'" % (field.model.__name__, field.name)
protected_objects[key] += error.protected_objects
if protected_objects:
raise ProtectedError(
"Cannot delete some instances of model %r because they are "
"referenced through protected foreign keys: %s."
% (
model.__name__,
", ".join(protected_objects),
),
set(chain.from_iterable(protected_objects.values())),
)
for related_model, related_fields in model_fast_deletes.items():
batches = self.get_del_batches(new_objs, related_fields)
for batch in batches:
sub_objs = self.related_objects(related_model, related_fields, batch)
self.fast_deletes.append(sub_objs)
for field in model._meta.private_fields:
if hasattr(field, "bulk_related_objects"):
# It's something like generic foreign key.
sub_objs = field.bulk_related_objects(new_objs, self.using)
self.collect(
sub_objs, source=model, nullable=True, fail_on_restricted=False
)
if fail_on_restricted:
# Raise an error if collected restricted objects (RESTRICT) aren't
# candidates for deletion also collected via CASCADE.
for related_model, instances in self.data.items():
self.clear_restricted_objects_from_set(related_model, instances)
for qs in self.fast_deletes:
self.clear_restricted_objects_from_queryset(qs.model, qs)
if self.restricted_objects.values():
restricted_objects = defaultdict(list)
for related_model, fields in self.restricted_objects.items():
for field, objs in fields.items():
if objs:
key = "'%s.%s'" % (related_model.__name__, field.name)
restricted_objects[key] += objs
if restricted_objects:
raise RestrictedError(
"Cannot delete some instances of model %r because "
"they are referenced through restricted foreign keys: "
"%s."
% (
model.__name__,
", ".join(restricted_objects),
),
set(chain.from_iterable(restricted_objects.values())),
)
def related_objects(self, related_model, related_fields, objs):
"""
Get a QuerySet of the related model to objs via related fields.
"""
predicate = query_utils.Q(
*((f"{related_field.name}__in", objs) for related_field in related_fields),
_connector=query_utils.Q.OR,
)
return related_model._base_manager.using(self.using).filter(predicate)
def instances_with_model(self):
for model, instances in self.data.items():
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
concrete_models = set()
models = list(self.data)
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model._meta.concrete_model)
if not (dependencies and dependencies.difference(concrete_models)):
sorted_models.append(model)
concrete_models.add(model._meta.concrete_model)
found = True
if not found:
return
self.data = {model: self.data[model] for model in sorted_models}
def delete(self):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until the
# end of a transaction.
self.sort()
# number of objects deleted for each model label
deleted_counter = Counter()
# Optimize for the case with a single obj and no dependencies
if len(self.data) == 1 and len(instances) == 1:
instance = list(instances)[0]
if self.can_fast_delete(instance):
with transaction.mark_for_rollback_on_error(self.using):
count = sql.DeleteQuery(model).delete_batch(
[instance.pk], self.using
)
setattr(instance, model._meta.pk.attname, None)
return count, {model._meta.label: count}
with transaction.atomic(using=self.using, savepoint=False):
# send pre_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model,
instance=obj,
using=self.using,
origin=self.origin,
)
# fast deletes
for qs in self.fast_deletes:
count = qs._raw_delete(using=self.using)
if count:
deleted_counter[qs.model._meta.label] += count
# update fields
for model, instances_for_fieldvalues in self.field_updates.items():
for (field, value), instances in instances_for_fieldvalues.items():
query = sql.UpdateQuery(model)
query.update_batch(
[obj.pk for obj in instances], {field.name: value}, self.using
)
# reverse instance collections
for instances in self.data.values():
instances.reverse()
# delete instances
for model, instances in self.data.items():
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
count = query.delete_batch(pk_list, self.using)
if count:
deleted_counter[model._meta.label] += count
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model,
instance=obj,
using=self.using,
origin=self.origin,
)
# update collected instances
for instances_for_fieldvalues in self.field_updates.values():
for (field, value), instances in instances_for_fieldvalues.items():
for obj in instances:
setattr(obj, field.attname, value)
for model, instances in self.data.items():
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
return sum(deleted_counter.values()), dict(deleted_counter)
|
f28255e1febc083b0d6af9c9bda78e62584c8b652a7111739a69948ba4998293 | import itertools
import math
from django.core.exceptions import EmptyResultSet
from django.db.models.expressions import Case, Expression, Func, Value, When
from django.db.models.fields import (
BooleanField,
CharField,
DateTimeField,
Field,
IntegerField,
UUIDField,
)
from django.db.models.query_utils import RegisterLookupMixin
from django.utils.datastructures import OrderedSet
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class Lookup(Expression):
lookup_name = None
prepare_rhs = True
can_use_none_as_rhs = False
def __init__(self, lhs, rhs):
self.lhs, self.rhs = lhs, rhs
self.rhs = self.get_prep_lookup()
self.lhs = self.get_prep_lhs()
if hasattr(self.lhs, "get_bilateral_transforms"):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if bilateral_transforms:
# Warn the user as soon as possible if they are trying to apply
# a bilateral transformation on a nested QuerySet: that won't work.
from django.db.models.sql.query import Query # avoid circular import
if isinstance(rhs, Query):
raise NotImplementedError(
"Bilateral transformations on nested querysets are not implemented."
)
self.bilateral_transforms = bilateral_transforms
def apply_bilateral_transforms(self, value):
for transform in self.bilateral_transforms:
value = transform(value)
return value
def __repr__(self):
return f"{self.__class__.__name__}({self.lhs!r}, {self.rhs!r})"
def batch_process_rhs(self, compiler, connection, rhs=None):
if rhs is None:
rhs = self.rhs
if self.bilateral_transforms:
sqls, sqls_params = [], []
for p in rhs:
value = Value(p, output_field=self.lhs.output_field)
value = self.apply_bilateral_transforms(value)
value = value.resolve_expression(compiler.query)
sql, sql_params = compiler.compile(value)
sqls.append(sql)
sqls_params.extend(sql_params)
else:
_, params = self.get_db_prep_lookup(rhs, connection)
sqls, sqls_params = ["%s"] * len(params), params
return sqls, sqls_params
def get_source_expressions(self):
if self.rhs_is_direct_value():
return [self.lhs]
return [self.lhs, self.rhs]
def set_source_expressions(self, new_exprs):
if len(new_exprs) == 1:
self.lhs = new_exprs[0]
else:
self.lhs, self.rhs = new_exprs
def get_prep_lookup(self):
if not self.prepare_rhs or hasattr(self.rhs, "resolve_expression"):
return self.rhs
if hasattr(self.lhs, "output_field"):
if hasattr(self.lhs.output_field, "get_prep_value"):
return self.lhs.output_field.get_prep_value(self.rhs)
elif self.rhs_is_direct_value():
return Value(self.rhs)
return self.rhs
def get_prep_lhs(self):
if hasattr(self.lhs, "resolve_expression"):
return self.lhs
return Value(self.lhs)
def get_db_prep_lookup(self, value, connection):
return ("%s", [value])
def process_lhs(self, compiler, connection, lhs=None):
lhs = lhs or self.lhs
if hasattr(lhs, "resolve_expression"):
lhs = lhs.resolve_expression(compiler.query)
sql, params = compiler.compile(lhs)
if isinstance(lhs, Lookup):
# Wrapped in parentheses to respect operator precedence.
sql = f"({sql})"
return sql, params
def process_rhs(self, compiler, connection):
value = self.rhs
if self.bilateral_transforms:
if self.rhs_is_direct_value():
# Do not call get_db_prep_lookup here as the value will be
# transformed before being used for lookup
value = Value(value, output_field=self.lhs.output_field)
value = self.apply_bilateral_transforms(value)
value = value.resolve_expression(compiler.query)
if hasattr(value, "as_sql"):
sql, params = compiler.compile(value)
# Ensure expression is wrapped in parentheses to respect operator
# precedence but avoid double wrapping as it can be misinterpreted
# on some backends (e.g. subqueries on SQLite).
if sql and sql[0] != "(":
sql = "(%s)" % sql
return sql, params
else:
return self.get_db_prep_lookup(value, connection)
def rhs_is_direct_value(self):
return not hasattr(self.rhs, "as_sql")
def get_group_by_cols(self, alias=None):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def as_oracle(self, compiler, connection):
# Oracle doesn't allow EXISTS() and filters to be compared to another
# expression unless they're wrapped in a CASE WHEN.
wrapped = False
exprs = []
for expr in (self.lhs, self.rhs):
if connection.ops.conditional_expression_supported_in_where_clause(expr):
expr = Case(When(expr, then=True), default=False)
wrapped = True
exprs.append(expr)
lookup = type(self)(*exprs) if wrapped else self
return lookup.as_sql(compiler, connection)
@cached_property
def output_field(self):
return BooleanField()
@property
def identity(self):
return self.__class__, self.lhs, self.rhs
def __eq__(self, other):
if not isinstance(other, Lookup):
return NotImplemented
return self.identity == other.identity
def __hash__(self):
return hash(make_hashable(self.identity))
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
c.lhs = self.lhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
c.rhs = self.rhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def select_format(self, compiler, sql, params):
# Wrap filters with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
return sql, params
class Transform(RegisterLookupMixin, Func):
"""
RegisterLookupMixin() is first so that get_lookup() and get_transform()
first examine self and then check output_field.
"""
bilateral = False
arity = 1
@property
def lhs(self):
return self.get_source_expressions()[0]
def get_bilateral_transforms(self):
if hasattr(self.lhs, "get_bilateral_transforms"):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if self.bilateral:
bilateral_transforms.append(self.__class__)
return bilateral_transforms
class BuiltinLookup(Lookup):
def process_lhs(self, compiler, connection, lhs=None):
lhs_sql, params = super().process_lhs(compiler, connection, lhs)
field_internal_type = self.lhs.output_field.get_internal_type()
db_type = self.lhs.output_field.db_type(connection=connection)
lhs_sql = connection.ops.field_cast_sql(db_type, field_internal_type) % lhs_sql
lhs_sql = (
connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql
)
return lhs_sql, list(params)
def as_sql(self, compiler, connection):
lhs_sql, params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
rhs_sql = self.get_rhs_op(connection, rhs_sql)
return "%s %s" % (lhs_sql, rhs_sql), params
def get_rhs_op(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
class FieldGetDbPrepValueMixin:
"""
Some lookups require Field.get_db_prep_value() to be called on their
inputs.
"""
get_db_prep_lookup_value_is_iterable = False
def get_db_prep_lookup(self, value, connection):
# For relational fields, use the 'target_field' attribute of the
# output_field.
field = getattr(self.lhs.output_field, "target_field", None)
get_db_prep_value = (
getattr(field, "get_db_prep_value", None)
or self.lhs.output_field.get_db_prep_value
)
return (
"%s",
[get_db_prep_value(v, connection, prepared=True) for v in value]
if self.get_db_prep_lookup_value_is_iterable
else [get_db_prep_value(value, connection, prepared=True)],
)
class FieldGetDbPrepValueIterableMixin(FieldGetDbPrepValueMixin):
"""
Some lookups require Field.get_db_prep_value() to be called on each value
in an iterable.
"""
get_db_prep_lookup_value_is_iterable = True
def get_prep_lookup(self):
if hasattr(self.rhs, "resolve_expression"):
return self.rhs
prepared_values = []
for rhs_value in self.rhs:
if hasattr(rhs_value, "resolve_expression"):
# An expression will be handled by the database but can coexist
# alongside real values.
pass
elif self.prepare_rhs and hasattr(self.lhs.output_field, "get_prep_value"):
rhs_value = self.lhs.output_field.get_prep_value(rhs_value)
prepared_values.append(rhs_value)
return prepared_values
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable of values. Use batch_process_rhs()
# to prepare/transform those values.
return self.batch_process_rhs(compiler, connection)
else:
return super().process_rhs(compiler, connection)
def resolve_expression_parameter(self, compiler, connection, sql, param):
params = [param]
if hasattr(param, "resolve_expression"):
param = param.resolve_expression(compiler.query)
if hasattr(param, "as_sql"):
sql, params = compiler.compile(param)
return sql, params
def batch_process_rhs(self, compiler, connection, rhs=None):
pre_processed = super().batch_process_rhs(compiler, connection, rhs)
# The params list may contain expressions which compile to a
# sql/param pair. Zip them to get sql and param pairs that refer to the
# same argument and attempt to replace them with the result of
# compiling the param step.
sql, params = zip(
*(
self.resolve_expression_parameter(compiler, connection, sql, param)
for sql, param in zip(*pre_processed)
)
)
params = itertools.chain.from_iterable(params)
return sql, tuple(params)
class PostgresOperatorLookup(FieldGetDbPrepValueMixin, Lookup):
"""Lookup defined by operators on PostgreSQL."""
postgres_operator = None
def as_postgresql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = tuple(lhs_params) + tuple(rhs_params)
return "%s %s %s" % (lhs, self.postgres_operator, rhs), params
@Field.register_lookup
class Exact(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "exact"
def get_prep_lookup(self):
from django.db.models.sql.query import Query # avoid circular import
if isinstance(self.rhs, Query):
if self.rhs.has_limit_one():
if not self.rhs.has_select_fields:
self.rhs.clear_select_clause()
self.rhs.add_fields(["pk"])
else:
raise ValueError(
"The QuerySet value for an exact lookup must be limited to "
"one result using slicing."
)
return super().get_prep_lookup()
def as_sql(self, compiler, connection):
# Avoid comparison against direct rhs if lhs is a boolean value. That
# turns "boolfield__exact=True" into "WHERE boolean_field" instead of
# "WHERE boolean_field = True" when allowed.
if (
isinstance(self.rhs, bool)
and getattr(self.lhs, "conditional", False)
and connection.ops.conditional_expression_supported_in_where_clause(
self.lhs
)
):
lhs_sql, params = self.process_lhs(compiler, connection)
template = "%s" if self.rhs else "NOT %s"
return template % lhs_sql, params
return super().as_sql(compiler, connection)
@Field.register_lookup
class IExact(BuiltinLookup):
lookup_name = "iexact"
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super().process_rhs(qn, connection)
if params:
params[0] = connection.ops.prep_for_iexact_query(params[0])
return rhs, params
@Field.register_lookup
class GreaterThan(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "gt"
@Field.register_lookup
class GreaterThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "gte"
@Field.register_lookup
class LessThan(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "lt"
@Field.register_lookup
class LessThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "lte"
class IntegerFieldFloatRounding:
"""
Allow floats to work as query values for IntegerField. Without this, the
decimal portion of the float would always be discarded.
"""
def get_prep_lookup(self):
if isinstance(self.rhs, float):
self.rhs = math.ceil(self.rhs)
return super().get_prep_lookup()
@IntegerField.register_lookup
class IntegerGreaterThanOrEqual(IntegerFieldFloatRounding, GreaterThanOrEqual):
pass
@IntegerField.register_lookup
class IntegerLessThan(IntegerFieldFloatRounding, LessThan):
pass
@Field.register_lookup
class In(FieldGetDbPrepValueIterableMixin, BuiltinLookup):
lookup_name = "in"
def get_prep_lookup(self):
from django.db.models.sql.query import Query # avoid circular import
if isinstance(self.rhs, Query):
self.rhs.clear_ordering(clear_default=True)
if not self.rhs.has_select_fields:
self.rhs.clear_select_clause()
self.rhs.add_fields(["pk"])
return super().get_prep_lookup()
def process_rhs(self, compiler, connection):
db_rhs = getattr(self.rhs, "_db", None)
if db_rhs is not None and db_rhs != connection.alias:
raise ValueError(
"Subqueries aren't allowed across different databases. Force "
"the inner query to be evaluated using `list(inner_query)`."
)
if self.rhs_is_direct_value():
# Remove None from the list as NULL is never equal to anything.
try:
rhs = OrderedSet(self.rhs)
rhs.discard(None)
except TypeError: # Unhashable items in self.rhs
rhs = [r for r in self.rhs if r is not None]
if not rhs:
raise EmptyResultSet
# rhs should be an iterable; use batch_process_rhs() to
# prepare/transform those values.
sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs)
placeholder = "(" + ", ".join(sqls) + ")"
return (placeholder, sqls_params)
return super().process_rhs(compiler, connection)
def get_rhs_op(self, connection, rhs):
return "IN %s" % rhs
def as_sql(self, compiler, connection):
max_in_list_size = connection.ops.max_in_list_size()
if (
self.rhs_is_direct_value()
and max_in_list_size
and len(self.rhs) > max_in_list_size
):
return self.split_parameter_list_as_sql(compiler, connection)
return super().as_sql(compiler, connection)
def split_parameter_list_as_sql(self, compiler, connection):
# This is a special case for databases which limit the number of
# elements which can appear in an 'IN' clause.
max_in_list_size = connection.ops.max_in_list_size()
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.batch_process_rhs(compiler, connection)
in_clause_elements = ["("]
params = []
for offset in range(0, len(rhs_params), max_in_list_size):
if offset > 0:
in_clause_elements.append(" OR ")
in_clause_elements.append("%s IN (" % lhs)
params.extend(lhs_params)
sqls = rhs[offset : offset + max_in_list_size]
sqls_params = rhs_params[offset : offset + max_in_list_size]
param_group = ", ".join(sqls)
in_clause_elements.append(param_group)
in_clause_elements.append(")")
params.extend(sqls_params)
in_clause_elements.append(")")
return "".join(in_clause_elements), params
class PatternLookup(BuiltinLookup):
param_pattern = "%%%s%%"
prepare_rhs = False
def get_rhs_op(self, connection, rhs):
# Assume we are in startswith. We need to produce SQL like:
# col LIKE %s, ['thevalue%']
# For python values we can (and should) do that directly in Python,
# but if the value is for example reference to other column, then
# we need to add the % pattern match to the lookup by something like
# col LIKE othercol || '%%'
# So, for Python values we don't need any special pattern, but for
# SQL reference values or SQL transformations we need the correct
# pattern added.
if hasattr(self.rhs, "as_sql") or self.bilateral_transforms:
pattern = connection.pattern_ops[self.lookup_name].format(
connection.pattern_esc
)
return pattern.format(rhs)
else:
return super().get_rhs_op(connection, rhs)
def process_rhs(self, qn, connection):
rhs, params = super().process_rhs(qn, connection)
if self.rhs_is_direct_value() and params and not self.bilateral_transforms:
params[0] = self.param_pattern % connection.ops.prep_for_like_query(
params[0]
)
return rhs, params
@Field.register_lookup
class Contains(PatternLookup):
lookup_name = "contains"
@Field.register_lookup
class IContains(Contains):
lookup_name = "icontains"
@Field.register_lookup
class StartsWith(PatternLookup):
lookup_name = "startswith"
param_pattern = "%s%%"
@Field.register_lookup
class IStartsWith(StartsWith):
lookup_name = "istartswith"
@Field.register_lookup
class EndsWith(PatternLookup):
lookup_name = "endswith"
param_pattern = "%%%s"
@Field.register_lookup
class IEndsWith(EndsWith):
lookup_name = "iendswith"
@Field.register_lookup
class Range(FieldGetDbPrepValueIterableMixin, BuiltinLookup):
lookup_name = "range"
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs[0], rhs[1])
@Field.register_lookup
class IsNull(BuiltinLookup):
lookup_name = "isnull"
prepare_rhs = False
def as_sql(self, compiler, connection):
if not isinstance(self.rhs, bool):
raise ValueError(
"The QuerySet value for an isnull lookup must be True or False."
)
sql, params = compiler.compile(self.lhs)
if self.rhs:
return "%s IS NULL" % sql, params
else:
return "%s IS NOT NULL" % sql, params
@Field.register_lookup
class Regex(BuiltinLookup):
lookup_name = "regex"
prepare_rhs = False
def as_sql(self, compiler, connection):
if self.lookup_name in connection.operators:
return super().as_sql(compiler, connection)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.regex_lookup(self.lookup_name)
return sql_template % (lhs, rhs), lhs_params + rhs_params
@Field.register_lookup
class IRegex(Regex):
lookup_name = "iregex"
class YearLookup(Lookup):
def year_lookup_bounds(self, connection, year):
from django.db.models.functions import ExtractIsoYear
iso_year = isinstance(self.lhs, ExtractIsoYear)
output_field = self.lhs.lhs.output_field
if isinstance(output_field, DateTimeField):
bounds = connection.ops.year_lookup_bounds_for_datetime_field(
year,
iso_year=iso_year,
)
else:
bounds = connection.ops.year_lookup_bounds_for_date_field(
year,
iso_year=iso_year,
)
return bounds
def as_sql(self, compiler, connection):
# Avoid the extract operation if the rhs is a direct value to allow
# indexes to be used.
if self.rhs_is_direct_value():
# Skip the extract part by directly using the originating field,
# that is self.lhs.lhs.
lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, _ = self.process_rhs(compiler, connection)
rhs_sql = self.get_direct_rhs_sql(connection, rhs_sql)
start, finish = self.year_lookup_bounds(connection, self.rhs)
params.extend(self.get_bound_params(start, finish))
return "%s %s" % (lhs_sql, rhs_sql), params
return super().as_sql(compiler, connection)
def get_direct_rhs_sql(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
def get_bound_params(self, start, finish):
raise NotImplementedError(
"subclasses of YearLookup must provide a get_bound_params() method"
)
class YearExact(YearLookup, Exact):
def get_direct_rhs_sql(self, connection, rhs):
return "BETWEEN %s AND %s"
def get_bound_params(self, start, finish):
return (start, finish)
class YearGt(YearLookup, GreaterThan):
def get_bound_params(self, start, finish):
return (finish,)
class YearGte(YearLookup, GreaterThanOrEqual):
def get_bound_params(self, start, finish):
return (start,)
class YearLt(YearLookup, LessThan):
def get_bound_params(self, start, finish):
return (start,)
class YearLte(YearLookup, LessThanOrEqual):
def get_bound_params(self, start, finish):
return (finish,)
class UUIDTextMixin:
"""
Strip hyphens from a value when filtering a UUIDField on backends without
a native datatype for UUID.
"""
def process_rhs(self, qn, connection):
if not connection.features.has_native_uuid_field:
from django.db.models.functions import Replace
if self.rhs_is_direct_value():
self.rhs = Value(self.rhs)
self.rhs = Replace(
self.rhs, Value("-"), Value(""), output_field=CharField()
)
rhs, params = super().process_rhs(qn, connection)
return rhs, params
@UUIDField.register_lookup
class UUIDIExact(UUIDTextMixin, IExact):
pass
@UUIDField.register_lookup
class UUIDContains(UUIDTextMixin, Contains):
pass
@UUIDField.register_lookup
class UUIDIContains(UUIDTextMixin, IContains):
pass
@UUIDField.register_lookup
class UUIDStartsWith(UUIDTextMixin, StartsWith):
pass
@UUIDField.register_lookup
class UUIDIStartsWith(UUIDTextMixin, IStartsWith):
pass
@UUIDField.register_lookup
class UUIDEndsWith(UUIDTextMixin, EndsWith):
pass
@UUIDField.register_lookup
class UUIDIEndsWith(UUIDTextMixin, IEndsWith):
pass
|
2f71baf09ff9eb0ccc4d997efbaf3238b9924a021db72efa6cb2c88f23cea3cc | from enum import Enum
from django.db.models.expressions import ExpressionList, F
from django.db.models.indexes import IndexExpression
from django.db.models.query_utils import Q
from django.db.models.sql.query import Query
__all__ = ["CheckConstraint", "Deferrable", "UniqueConstraint"]
class BaseConstraint:
def __init__(self, name):
self.name = name
@property
def contains_expressions(self):
return False
def constraint_sql(self, model, schema_editor):
raise NotImplementedError("This method must be implemented by a subclass.")
def create_sql(self, model, schema_editor):
raise NotImplementedError("This method must be implemented by a subclass.")
def remove_sql(self, model, schema_editor):
raise NotImplementedError("This method must be implemented by a subclass.")
def deconstruct(self):
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
path = path.replace("django.db.models.constraints", "django.db.models")
return (path, (), {"name": self.name})
def clone(self):
_, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
class CheckConstraint(BaseConstraint):
def __init__(self, *, check, name):
self.check = check
if not getattr(check, "conditional", False):
raise TypeError(
"CheckConstraint.check must be a Q instance or boolean expression."
)
super().__init__(name)
def _get_check_sql(self, model, schema_editor):
query = Query(model=model, alias_cols=False)
where = query.build_where(self.check)
compiler = query.get_compiler(connection=schema_editor.connection)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def constraint_sql(self, model, schema_editor):
check = self._get_check_sql(model, schema_editor)
return schema_editor._check_sql(self.name, check)
def create_sql(self, model, schema_editor):
check = self._get_check_sql(model, schema_editor)
return schema_editor._create_check_sql(model, self.name, check)
def remove_sql(self, model, schema_editor):
return schema_editor._delete_check_sql(model, self.name)
def __repr__(self):
return "<%s: check=%s name=%s>" % (
self.__class__.__qualname__,
self.check,
repr(self.name),
)
def __eq__(self, other):
if isinstance(other, CheckConstraint):
return self.name == other.name and self.check == other.check
return super().__eq__(other)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
kwargs["check"] = self.check
return path, args, kwargs
class Deferrable(Enum):
DEFERRED = "deferred"
IMMEDIATE = "immediate"
# A similar format was proposed for Python 3.10.
def __repr__(self):
return f"{self.__class__.__qualname__}.{self._name_}"
class UniqueConstraint(BaseConstraint):
def __init__(
self,
*expressions,
fields=(),
name=None,
condition=None,
deferrable=None,
include=None,
opclasses=(),
):
if not name:
raise ValueError("A unique constraint must be named.")
if not expressions and not fields:
raise ValueError(
"At least one field or expression is required to define a "
"unique constraint."
)
if expressions and fields:
raise ValueError(
"UniqueConstraint.fields and expressions are mutually exclusive."
)
if not isinstance(condition, (type(None), Q)):
raise ValueError("UniqueConstraint.condition must be a Q instance.")
if condition and deferrable:
raise ValueError("UniqueConstraint with conditions cannot be deferred.")
if include and deferrable:
raise ValueError("UniqueConstraint with include fields cannot be deferred.")
if opclasses and deferrable:
raise ValueError("UniqueConstraint with opclasses cannot be deferred.")
if expressions and deferrable:
raise ValueError("UniqueConstraint with expressions cannot be deferred.")
if expressions and opclasses:
raise ValueError(
"UniqueConstraint.opclasses cannot be used with expressions. "
"Use django.contrib.postgres.indexes.OpClass() instead."
)
if not isinstance(deferrable, (type(None), Deferrable)):
raise ValueError(
"UniqueConstraint.deferrable must be a Deferrable instance."
)
if not isinstance(include, (type(None), list, tuple)):
raise ValueError("UniqueConstraint.include must be a list or tuple.")
if not isinstance(opclasses, (list, tuple)):
raise ValueError("UniqueConstraint.opclasses must be a list or tuple.")
if opclasses and len(fields) != len(opclasses):
raise ValueError(
"UniqueConstraint.fields and UniqueConstraint.opclasses must "
"have the same number of elements."
)
self.fields = tuple(fields)
self.condition = condition
self.deferrable = deferrable
self.include = tuple(include) if include else ()
self.opclasses = opclasses
self.expressions = tuple(
F(expression) if isinstance(expression, str) else expression
for expression in expressions
)
super().__init__(name)
@property
def contains_expressions(self):
return bool(self.expressions)
def _get_condition_sql(self, model, schema_editor):
if self.condition is None:
return None
query = Query(model=model, alias_cols=False)
where = query.build_where(self.condition)
compiler = query.get_compiler(connection=schema_editor.connection)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def _get_index_expressions(self, model, schema_editor):
if not self.expressions:
return None
index_expressions = []
for expression in self.expressions:
index_expression = IndexExpression(expression)
index_expression.set_wrapper_classes(schema_editor.connection)
index_expressions.append(index_expression)
return ExpressionList(*index_expressions).resolve_expression(
Query(model, alias_cols=False),
)
def constraint_sql(self, model, schema_editor):
fields = [model._meta.get_field(field_name) for field_name in self.fields]
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
condition = self._get_condition_sql(model, schema_editor)
expressions = self._get_index_expressions(model, schema_editor)
return schema_editor._unique_sql(
model,
fields,
self.name,
condition=condition,
deferrable=self.deferrable,
include=include,
opclasses=self.opclasses,
expressions=expressions,
)
def create_sql(self, model, schema_editor):
fields = [model._meta.get_field(field_name) for field_name in self.fields]
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
condition = self._get_condition_sql(model, schema_editor)
expressions = self._get_index_expressions(model, schema_editor)
return schema_editor._create_unique_sql(
model,
fields,
self.name,
condition=condition,
deferrable=self.deferrable,
include=include,
opclasses=self.opclasses,
expressions=expressions,
)
def remove_sql(self, model, schema_editor):
condition = self._get_condition_sql(model, schema_editor)
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
expressions = self._get_index_expressions(model, schema_editor)
return schema_editor._delete_unique_sql(
model,
self.name,
condition=condition,
deferrable=self.deferrable,
include=include,
opclasses=self.opclasses,
expressions=expressions,
)
def __repr__(self):
return "<%s:%s%s%s%s%s%s%s>" % (
self.__class__.__qualname__,
"" if not self.fields else " fields=%s" % repr(self.fields),
"" if not self.expressions else " expressions=%s" % repr(self.expressions),
" name=%s" % repr(self.name),
"" if self.condition is None else " condition=%s" % self.condition,
"" if self.deferrable is None else " deferrable=%r" % self.deferrable,
"" if not self.include else " include=%s" % repr(self.include),
"" if not self.opclasses else " opclasses=%s" % repr(self.opclasses),
)
def __eq__(self, other):
if isinstance(other, UniqueConstraint):
return (
self.name == other.name
and self.fields == other.fields
and self.condition == other.condition
and self.deferrable == other.deferrable
and self.include == other.include
and self.opclasses == other.opclasses
and self.expressions == other.expressions
)
return super().__eq__(other)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.fields:
kwargs["fields"] = self.fields
if self.condition:
kwargs["condition"] = self.condition
if self.deferrable:
kwargs["deferrable"] = self.deferrable
if self.include:
kwargs["include"] = self.include
if self.opclasses:
kwargs["opclasses"] = self.opclasses
return path, self.expressions, kwargs
|
7810e7c6887bfcf6361fc006b99e45528c6c12c7299e632e60432acdf3d116a9 | """
Helpers to manipulate deferred DDL statements that might need to be adjusted or
discarded within when executing a migration.
"""
from copy import deepcopy
class Reference:
"""Base class that defines the reference interface."""
def references_table(self, table):
"""
Return whether or not this instance references the specified table.
"""
return False
def references_column(self, table, column):
"""
Return whether or not this instance references the specified column.
"""
return False
def rename_table_references(self, old_table, new_table):
"""
Rename all references to the old_name to the new_table.
"""
pass
def rename_column_references(self, table, old_column, new_column):
"""
Rename all references to the old_column to the new_column.
"""
pass
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, str(self))
def __str__(self):
raise NotImplementedError(
"Subclasses must define how they should be converted to string."
)
class Table(Reference):
"""Hold a reference to a table."""
def __init__(self, table, quote_name):
self.table = table
self.quote_name = quote_name
def references_table(self, table):
return self.table == table
def rename_table_references(self, old_table, new_table):
if self.table == old_table:
self.table = new_table
def __str__(self):
return self.quote_name(self.table)
class TableColumns(Table):
"""Base class for references to multiple columns of a table."""
def __init__(self, table, columns):
self.table = table
self.columns = columns
def references_column(self, table, column):
return self.table == table and column in self.columns
def rename_column_references(self, table, old_column, new_column):
if self.table == table:
for index, column in enumerate(self.columns):
if column == old_column:
self.columns[index] = new_column
class Columns(TableColumns):
"""Hold a reference to one or many columns."""
def __init__(self, table, columns, quote_name, col_suffixes=()):
self.quote_name = quote_name
self.col_suffixes = col_suffixes
super().__init__(table, columns)
def __str__(self):
def col_str(column, idx):
col = self.quote_name(column)
try:
suffix = self.col_suffixes[idx]
if suffix:
col = "{} {}".format(col, suffix)
except IndexError:
pass
return col
return ", ".join(
col_str(column, idx) for idx, column in enumerate(self.columns)
)
class IndexName(TableColumns):
"""Hold a reference to an index name."""
def __init__(self, table, columns, suffix, create_index_name):
self.suffix = suffix
self.create_index_name = create_index_name
super().__init__(table, columns)
def __str__(self):
return self.create_index_name(self.table, self.columns, self.suffix)
class IndexColumns(Columns):
def __init__(self, table, columns, quote_name, col_suffixes=(), opclasses=()):
self.opclasses = opclasses
super().__init__(table, columns, quote_name, col_suffixes)
def __str__(self):
def col_str(column, idx):
# Index.__init__() guarantees that self.opclasses is the same
# length as self.columns.
col = "{} {}".format(self.quote_name(column), self.opclasses[idx])
try:
suffix = self.col_suffixes[idx]
if suffix:
col = "{} {}".format(col, suffix)
except IndexError:
pass
return col
return ", ".join(
col_str(column, idx) for idx, column in enumerate(self.columns)
)
class ForeignKeyName(TableColumns):
"""Hold a reference to a foreign key name."""
def __init__(
self,
from_table,
from_columns,
to_table,
to_columns,
suffix_template,
create_fk_name,
):
self.to_reference = TableColumns(to_table, to_columns)
self.suffix_template = suffix_template
self.create_fk_name = create_fk_name
super().__init__(
from_table,
from_columns,
)
def references_table(self, table):
return super().references_table(table) or self.to_reference.references_table(
table
)
def references_column(self, table, column):
return super().references_column(
table, column
) or self.to_reference.references_column(table, column)
def rename_table_references(self, old_table, new_table):
super().rename_table_references(old_table, new_table)
self.to_reference.rename_table_references(old_table, new_table)
def rename_column_references(self, table, old_column, new_column):
super().rename_column_references(table, old_column, new_column)
self.to_reference.rename_column_references(table, old_column, new_column)
def __str__(self):
suffix = self.suffix_template % {
"to_table": self.to_reference.table,
"to_column": self.to_reference.columns[0],
}
return self.create_fk_name(self.table, self.columns, suffix)
class Statement(Reference):
"""
Statement template and formatting parameters container.
Allows keeping a reference to a statement without interpolating identifiers
that might have to be adjusted if they're referencing a table or column
that is removed
"""
def __init__(self, template, **parts):
self.template = template
self.parts = parts
def references_table(self, table):
return any(
hasattr(part, "references_table") and part.references_table(table)
for part in self.parts.values()
)
def references_column(self, table, column):
return any(
hasattr(part, "references_column") and part.references_column(table, column)
for part in self.parts.values()
)
def rename_table_references(self, old_table, new_table):
for part in self.parts.values():
if hasattr(part, "rename_table_references"):
part.rename_table_references(old_table, new_table)
def rename_column_references(self, table, old_column, new_column):
for part in self.parts.values():
if hasattr(part, "rename_column_references"):
part.rename_column_references(table, old_column, new_column)
def __str__(self):
return self.template % self.parts
class Expressions(TableColumns):
def __init__(self, table, expressions, compiler, quote_value):
self.compiler = compiler
self.expressions = expressions
self.quote_value = quote_value
columns = [
col.target.column
for col in self.compiler.query._gen_cols([self.expressions])
]
super().__init__(table, columns)
def rename_table_references(self, old_table, new_table):
if self.table != old_table:
return
self.expressions = self.expressions.relabeled_clone({old_table: new_table})
super().rename_table_references(old_table, new_table)
def rename_column_references(self, table, old_column, new_column):
if self.table != table:
return
expressions = deepcopy(self.expressions)
self.columns = []
for col in self.compiler.query._gen_cols([expressions]):
if col.target.column == old_column:
col.target.column = new_column
self.columns.append(col.target.column)
self.expressions = expressions
def __str__(self):
sql, params = self.compiler.compile(self.expressions)
params = map(self.quote_value, params)
return sql % tuple(params)
|
c6035b105d5a34ee5eff6c5a690c2da1c31402242fbb7c5fef4ceb1d508309f1 | import datetime
import decimal
import functools
import logging
import time
from contextlib import contextmanager
from django.db import NotSupportedError
from django.utils.crypto import md5
from django.utils.dateparse import parse_time
logger = logging.getLogger("django.db.backends")
class CursorWrapper:
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(["fetchone", "fetchmany", "fetchall", "nextset"])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
yield from self.cursor
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Close instead of passing through to avoid backend-specific behavior
# (#17671). Catch errors liberally because errors in cleanup code
# aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None, kparams=None):
# Keyword parameters for callproc aren't supported in PEP 249, but the
# database driver may support them (e.g. cx_Oracle).
if kparams is not None and not self.db.features.supports_callproc_kwargs:
raise NotSupportedError(
"Keyword parameters for callproc are not supported on this "
"database backend."
)
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None and kparams is None:
return self.cursor.callproc(procname)
elif kparams is None:
return self.cursor.callproc(procname, params)
else:
params = params or ()
return self.cursor.callproc(procname, params, kparams)
def execute(self, sql, params=None):
return self._execute_with_wrappers(
sql, params, many=False, executor=self._execute
)
def executemany(self, sql, param_list):
return self._execute_with_wrappers(
sql, param_list, many=True, executor=self._executemany
)
def _execute_with_wrappers(self, sql, params, many, executor):
context = {"connection": self.db, "cursor": self}
for wrapper in reversed(self.db.execute_wrappers):
executor = functools.partial(wrapper, executor)
return executor(sql, params, many, context)
def _execute(self, sql, params, *ignored_wrapper_args):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
# params default might be backend specific.
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def _executemany(self, sql, param_list, *ignored_wrapper_args):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
with self.debug_sql(sql, params, use_last_executed_query=True):
return super().execute(sql, params)
def executemany(self, sql, param_list):
with self.debug_sql(sql, param_list, many=True):
return super().executemany(sql, param_list)
@contextmanager
def debug_sql(
self, sql=None, params=None, use_last_executed_query=False, many=False
):
start = time.monotonic()
try:
yield
finally:
stop = time.monotonic()
duration = stop - start
if use_last_executed_query:
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
try:
times = len(params) if many else ""
except TypeError:
# params could be an iterator.
times = "?"
self.db.queries_log.append(
{
"sql": "%s times: %s" % (times, sql) if many else sql,
"time": "%.3f" % duration,
}
)
logger.debug(
"(%.3f) %s; args=%s; alias=%s",
duration,
sql,
params,
self.db.alias,
extra={
"duration": duration,
"sql": sql,
"params": params,
"alias": self.db.alias,
},
)
def split_tzname_delta(tzname):
"""
Split a time zone name into a 3-tuple of (name, sign, offset).
"""
for sign in ["+", "-"]:
if sign in tzname:
name, offset = tzname.rsplit(sign, 1)
if offset and parse_time(offset):
return name, sign, offset
return tzname, None, None
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return (
datetime.date(*map(int, s.split("-"))) if s else None
) # return None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(":")
if "." in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split(".")
else:
microseconds = "0"
return datetime.time(
int(hour), int(minutes), int(seconds), int((microseconds + "000000")[:6])
)
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if " " not in s:
return typecast_date(s)
d, t = s.split()
# Remove timezone information.
if "-" in t:
t, _ = t.split("-", 1)
elif "+" in t:
t, _ = t.split("+", 1)
dates = d.split("-")
times = t.split(":")
seconds = times[2]
if "." in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split(".")
else:
microseconds = "0"
return datetime.datetime(
int(dates[0]),
int(dates[1]),
int(dates[2]),
int(times[0]),
int(times[1]),
int(seconds),
int((microseconds + "000000")[:6]),
)
###############################################
# Converters from Python to database (string) #
###############################################
def split_identifier(identifier):
"""
Split an SQL identifier into a two element tuple of (namespace, name).
The identifier could be a table, column, or sequence name might be prefixed
by a namespace.
"""
try:
namespace, name = identifier.split('"."')
except ValueError:
namespace, name = "", identifier
return namespace.strip('"'), name.strip('"')
def truncate_name(identifier, length=None, hash_len=4):
"""
Shorten an SQL identifier to a repeatable mangled version with the given
length.
If a quote stripped name contains a namespace, e.g. USERNAME"."TABLE,
truncate the table portion only.
"""
namespace, name = split_identifier(identifier)
if length is None or len(name) <= length:
return identifier
digest = names_digest(name, length=hash_len)
return "%s%s%s" % (
'%s"."' % namespace if namespace else "",
name[: length - hash_len],
digest,
)
def names_digest(*args, length):
"""
Generate a 32-bit digest of a set of arguments that can be used to shorten
identifying names.
"""
h = md5(usedforsecurity=False)
for arg in args:
h.update(arg.encode())
return h.hexdigest()[:length]
def format_number(value, max_digits, decimal_places):
"""
Format a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(
decimal.Decimal(1).scaleb(-decimal_places), context=context
)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
def strip_quotes(table_name):
"""
Strip quotes off of quoted table names to make them safe for use in index
names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming
scheme) becomes 'USER"."TABLE'.
"""
has_quotes = table_name.startswith('"') and table_name.endswith('"')
return table_name[1:-1] if has_quotes else table_name
|
aba444bda7e6a1fe476fa7be3f1b1f7290956acc9d05f2276b63debff6f973e5 | from .fields import AddField, AlterField, RemoveField, RenameField
from .models import (
AddConstraint,
AddIndex,
AlterIndexTogether,
AlterModelManagers,
AlterModelOptions,
AlterModelTable,
AlterOrderWithRespectTo,
AlterUniqueTogether,
CreateModel,
DeleteModel,
RemoveConstraint,
RemoveIndex,
RenameModel,
)
from .special import RunPython, RunSQL, SeparateDatabaseAndState
__all__ = [
"CreateModel",
"DeleteModel",
"AlterModelTable",
"AlterUniqueTogether",
"RenameModel",
"AlterIndexTogether",
"AlterModelOptions",
"AddIndex",
"RemoveIndex",
"AddField",
"RemoveField",
"AlterField",
"RenameField",
"AddConstraint",
"RemoveConstraint",
"SeparateDatabaseAndState",
"RunSQL",
"RunPython",
"AlterOrderWithRespectTo",
"AlterModelManagers",
]
|
d6c663c049dc44442ee636e8441427fdf70e1ac8f3badd167455b36be131e94d | from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.db.migrations.utils import field_references, resolve_relation
from django.db.models.options import normalize_together
from django.utils.functional import cached_property
from .fields import AddField, AlterField, FieldOperation, RemoveField, RenameField
def _check_for_duplicates(arg_name, objs):
used_vals = set()
for val in objs:
if val in used_vals:
raise ValueError(
"Found duplicate value %s in CreateModel %s argument." % (val, arg_name)
)
used_vals.add(val)
class ModelOperation(Operation):
def __init__(self, name):
self.name = name
@cached_property
def name_lower(self):
return self.name.lower()
def references_model(self, name, app_label):
return name.lower() == self.name_lower
def reduce(self, operation, app_label):
return super().reduce(operation, app_label) or self.can_reduce_through(
operation, app_label
)
def can_reduce_through(self, operation, app_label):
return not operation.references_model(self.name, app_label)
class CreateModel(ModelOperation):
"""Create a model's table."""
serialization_expand_args = ["fields", "options", "managers"]
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
super().__init__(name)
# Sanity-check that there are no duplicated field names, bases, or
# manager names
_check_for_duplicates("fields", (name for name, _ in self.fields))
_check_for_duplicates(
"bases",
(
base._meta.label_lower
if hasattr(base, "_meta")
else base.lower()
if isinstance(base, str)
else base
for base in self.bases
),
)
_check_for_duplicates("managers", (name for name, _ in self.managers))
def deconstruct(self):
kwargs = {
"name": self.name,
"fields": self.fields,
}
if self.options:
kwargs["options"] = self.options
if self.bases and self.bases != (models.Model,):
kwargs["bases"] = self.bases
if self.managers and self.managers != [("objects", models.Manager())]:
kwargs["managers"] = self.managers
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.add_model(
ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
)
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % (
"proxy " if self.options.get("proxy", False) else "",
self.name,
)
@property
def migration_name_fragment(self):
return self.name_lower
def references_model(self, name, app_label):
name_lower = name.lower()
if name_lower == self.name_lower:
return True
# Check we didn't inherit from the model
reference_model_tuple = (app_label, name_lower)
for base in self.bases:
if (
base is not models.Model
and isinstance(base, (models.base.ModelBase, str))
and resolve_relation(base, app_label) == reference_model_tuple
):
return True
# Check we have no FKs/M2Ms with it
for _name, field in self.fields:
if field_references(
(app_label, self.name_lower), field, reference_model_tuple
):
return True
return False
def reduce(self, operation, app_label):
if (
isinstance(operation, DeleteModel)
and self.name_lower == operation.name_lower
and not self.options.get("proxy", False)
):
return []
elif (
isinstance(operation, RenameModel)
and self.name_lower == operation.old_name_lower
):
return [
CreateModel(
operation.new_name,
fields=self.fields,
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif (
isinstance(operation, AlterModelOptions)
and self.name_lower == operation.name_lower
):
options = {**self.options, **operation.options}
for key in operation.ALTER_OPTION_KEYS:
if key not in operation.options:
options.pop(key, None)
return [
CreateModel(
self.name,
fields=self.fields,
options=options,
bases=self.bases,
managers=self.managers,
),
]
elif (
isinstance(operation, AlterTogetherOptionOperation)
and self.name_lower == operation.name_lower
):
return [
CreateModel(
self.name,
fields=self.fields,
options={
**self.options,
**{operation.option_name: operation.option_value},
},
bases=self.bases,
managers=self.managers,
),
]
elif (
isinstance(operation, AlterOrderWithRespectTo)
and self.name_lower == operation.name_lower
):
return [
CreateModel(
self.name,
fields=self.fields,
options={
**self.options,
"order_with_respect_to": operation.order_with_respect_to,
},
bases=self.bases,
managers=self.managers,
),
]
elif (
isinstance(operation, FieldOperation)
and self.name_lower == operation.model_name_lower
):
if isinstance(operation, AddField):
return [
CreateModel(
self.name,
fields=self.fields + [(operation.name, operation.field)],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, AlterField):
return [
CreateModel(
self.name,
fields=[
(n, operation.field if n == operation.name else v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RemoveField):
options = self.options.copy()
for option_name in ("unique_together", "index_together"):
option = options.pop(option_name, None)
if option:
option = set(
filter(
bool,
(
tuple(
f for f in fields if f != operation.name_lower
)
for fields in option
),
)
)
if option:
options[option_name] = option
order_with_respect_to = options.get("order_with_respect_to")
if order_with_respect_to == operation.name_lower:
del options["order_with_respect_to"]
return [
CreateModel(
self.name,
fields=[
(n, v)
for n, v in self.fields
if n.lower() != operation.name_lower
],
options=options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RenameField):
options = self.options.copy()
for option_name in ("unique_together", "index_together"):
option = options.get(option_name)
if option:
options[option_name] = {
tuple(
operation.new_name if f == operation.old_name else f
for f in fields
)
for fields in option
}
order_with_respect_to = options.get("order_with_respect_to")
if order_with_respect_to == operation.old_name:
options["order_with_respect_to"] = operation.new_name
return [
CreateModel(
self.name,
fields=[
(operation.new_name if n == operation.old_name else n, v)
for n, v in self.fields
],
options=options,
bases=self.bases,
managers=self.managers,
),
]
return super().reduce(operation, app_label)
class DeleteModel(ModelOperation):
"""Drop a model's table."""
def deconstruct(self):
kwargs = {
"name": self.name,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.remove_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def references_model(self, name, app_label):
# The deleted model could be referencing the specified model through
# related fields.
return True
def describe(self):
return "Delete model %s" % self.name
@property
def migration_name_fragment(self):
return "delete_%s" % self.name_lower
class RenameModel(ModelOperation):
"""Rename a model."""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
super().__init__(old_name)
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
"old_name": self.old_name,
"new_name": self.new_name,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.rename_model(app_label, self.old_name, self.new_name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.new_name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.old_name)
old_db_table = old_model._meta.db_table
new_db_table = new_model._meta.db_table
# Don't alter when a table name is not changed.
if old_db_table == new_db_table:
return
# Move the main table
schema_editor.alter_db_table(new_model, old_db_table, new_db_table)
# Alter the fields pointing to us
for related_object in old_model._meta.related_objects:
if related_object.related_model == old_model:
model = new_model
related_key = (app_label, self.new_name_lower)
else:
model = related_object.related_model
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
to_field = to_state.apps.get_model(*related_key)._meta.get_field(
related_object.field.name
)
schema_editor.alter_field(
model,
related_object.field,
to_field,
)
# Rename M2M fields whose name is based on this model's name.
fields = zip(
old_model._meta.local_many_to_many, new_model._meta.local_many_to_many
)
for (old_field, new_field) in fields:
# Skip self-referential fields as these are renamed above.
if (
new_field.model == new_field.related_model
or not new_field.remote_field.through._meta.auto_created
):
continue
# Rename the M2M table that's based on this model's name.
old_m2m_model = old_field.remote_field.through
new_m2m_model = new_field.remote_field.through
schema_editor.alter_db_table(
new_m2m_model,
old_m2m_model._meta.db_table,
new_m2m_model._meta.db_table,
)
# Rename the column in the M2M table that's based on this
# model's name.
schema_editor.alter_field(
new_m2m_model,
old_m2m_model._meta.get_field(old_model._meta.model_name),
new_m2m_model._meta.get_field(new_model._meta.model_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name_lower, self.old_name_lower = (
self.old_name_lower,
self.new_name_lower,
)
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name_lower, self.old_name_lower = (
self.old_name_lower,
self.new_name_lower,
)
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label):
return (
name.lower() == self.old_name_lower or name.lower() == self.new_name_lower
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
@property
def migration_name_fragment(self):
return "rename_%s_%s" % (self.old_name_lower, self.new_name_lower)
def reduce(self, operation, app_label):
if (
isinstance(operation, RenameModel)
and self.new_name_lower == operation.old_name_lower
):
return [
RenameModel(
self.old_name,
operation.new_name,
),
]
# Skip `ModelOperation.reduce` as we want to run `references_model`
# against self.new_name.
return super(ModelOperation, self).reduce(
operation, app_label
) or not operation.references_model(self.new_name, app_label)
class ModelOptionOperation(ModelOperation):
def reduce(self, operation, app_label):
if (
isinstance(operation, (self.__class__, DeleteModel))
and self.name_lower == operation.name_lower
):
return [operation]
return super().reduce(operation, app_label)
class AlterModelTable(ModelOptionOperation):
"""Rename a model's table."""
def __init__(self, name, table):
self.table = table
super().__init__(name)
def deconstruct(self):
kwargs = {
"name": self.name,
"table": self.table,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_model_options(app_label, self.name_lower, {"db_table": self.table})
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for (old_field, new_field) in zip(
old_model._meta.local_many_to_many, new_model._meta.local_many_to_many
):
if new_field.remote_field.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Rename table for %s to %s" % (
self.name,
self.table if self.table is not None else "(default)",
)
@property
def migration_name_fragment(self):
return "alter_%s_table" % self.name_lower
class AlterTogetherOptionOperation(ModelOptionOperation):
option_name = None
def __init__(self, name, option_value):
if option_value:
option_value = set(normalize_together(option_value))
setattr(self, self.option_name, option_value)
super().__init__(name)
@cached_property
def option_value(self):
return getattr(self, self.option_name)
def deconstruct(self):
kwargs = {
"name": self.name,
self.option_name: self.option_value,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_model_options(
app_label,
self.name_lower,
{self.option_name: self.option_value},
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
alter_together = getattr(schema_editor, "alter_%s" % self.option_name)
alter_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label):
return self.references_model(model_name, app_label) and (
not self.option_value
or any((name in fields) for fields in self.option_value)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (
self.option_name,
self.name,
len(self.option_value or ""),
)
@property
def migration_name_fragment(self):
return "alter_%s_%s" % (self.name_lower, self.option_name)
def can_reduce_through(self, operation, app_label):
return super().can_reduce_through(operation, app_label) or (
isinstance(operation, AlterTogetherOptionOperation)
and type(operation) is not type(self)
)
class AlterUniqueTogether(AlterTogetherOptionOperation):
"""
Change the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
def __init__(self, name, unique_together):
super().__init__(name, unique_together)
class AlterIndexTogether(AlterTogetherOptionOperation):
"""
Change the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
super().__init__(name, index_together)
class AlterOrderWithRespectTo(ModelOptionOperation):
"""Represent a change with the order_with_respect_to option."""
option_name = "order_with_respect_to"
def __init__(self, name, order_with_respect_to):
self.order_with_respect_to = order_with_respect_to
super().__init__(name)
def deconstruct(self):
kwargs = {
"name": self.name,
"order_with_respect_to": self.order_with_respect_to,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_model_options(
app_label,
self.name_lower,
{self.option_name: self.order_with_respect_to},
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if (
from_model._meta.order_with_respect_to
and not to_model._meta.order_with_respect_to
):
schema_editor.remove_field(
from_model, from_model._meta.get_field("_order")
)
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif (
to_model._meta.order_with_respect_to
and not from_model._meta.order_with_respect_to
):
field = to_model._meta.get_field("_order")
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label):
return self.references_model(model_name, app_label) and (
self.order_with_respect_to is None or name == self.order_with_respect_to
)
def describe(self):
return "Set order_with_respect_to on %s to %s" % (
self.name,
self.order_with_respect_to,
)
@property
def migration_name_fragment(self):
return "alter_%s_order_with_respect_to" % self.name_lower
class AlterModelOptions(ModelOptionOperation):
"""
Set new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"base_manager_name",
"default_manager_name",
"default_related_name",
"get_latest_by",
"managed",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, name, options):
self.options = options
super().__init__(name)
def deconstruct(self):
kwargs = {
"name": self.name,
"options": self.options,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_model_options(
app_label,
self.name_lower,
self.options,
self.ALTER_OPTION_KEYS,
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change Meta options on %s" % self.name
@property
def migration_name_fragment(self):
return "alter_%s_options" % self.name_lower
class AlterModelManagers(ModelOptionOperation):
"""Alter the model's managers."""
serialization_expand_args = ["managers"]
def __init__(self, name, managers):
self.managers = managers
super().__init__(name)
def deconstruct(self):
return (self.__class__.__qualname__, [self.name, self.managers], {})
def state_forwards(self, app_label, state):
state.alter_model_managers(app_label, self.name_lower, self.managers)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change managers on %s" % self.name
@property
def migration_name_fragment(self):
return "alter_%s_managers" % self.name_lower
class IndexOperation(Operation):
option_name = "indexes"
@cached_property
def model_name_lower(self):
return self.model_name.lower()
class AddIndex(IndexOperation):
"""Add an index on a model."""
def __init__(self, model_name, index):
self.model_name = model_name
if not index.name:
raise ValueError(
"Indexes passed to AddIndex operations require a name "
"argument. %r doesn't have one." % index
)
self.index = index
def state_forwards(self, app_label, state):
state.add_index(app_label, self.model_name_lower, self.index)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_index(model, self.index)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.remove_index(model, self.index)
def deconstruct(self):
kwargs = {
"model_name": self.model_name,
"index": self.index,
}
return (
self.__class__.__qualname__,
[],
kwargs,
)
def describe(self):
if self.index.expressions:
return "Create index %s on %s on model %s" % (
self.index.name,
", ".join([str(expression) for expression in self.index.expressions]),
self.model_name,
)
return "Create index %s on field(s) %s of model %s" % (
self.index.name,
", ".join(self.index.fields),
self.model_name,
)
@property
def migration_name_fragment(self):
return "%s_%s" % (self.model_name_lower, self.index.name.lower())
class RemoveIndex(IndexOperation):
"""Remove an index from a model."""
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
state.remove_index(app_label, self.model_name_lower, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
from_model_state = from_state.models[app_label, self.model_name_lower]
index = from_model_state.get_index_by_name(self.name)
schema_editor.remove_index(model, index)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
to_model_state = to_state.models[app_label, self.model_name_lower]
index = to_model_state.get_index_by_name(self.name)
schema_editor.add_index(model, index)
def deconstruct(self):
kwargs = {
"model_name": self.model_name,
"name": self.name,
}
return (
self.__class__.__qualname__,
[],
kwargs,
)
def describe(self):
return "Remove index %s from %s" % (self.name, self.model_name)
@property
def migration_name_fragment(self):
return "remove_%s_%s" % (self.model_name_lower, self.name.lower())
class AddConstraint(IndexOperation):
option_name = "constraints"
def __init__(self, model_name, constraint):
self.model_name = model_name
self.constraint = constraint
def state_forwards(self, app_label, state):
state.add_constraint(app_label, self.model_name_lower, self.constraint)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_constraint(model, self.constraint)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.remove_constraint(model, self.constraint)
def deconstruct(self):
return (
self.__class__.__name__,
[],
{
"model_name": self.model_name,
"constraint": self.constraint,
},
)
def describe(self):
return "Create constraint %s on model %s" % (
self.constraint.name,
self.model_name,
)
@property
def migration_name_fragment(self):
return "%s_%s" % (self.model_name_lower, self.constraint.name.lower())
class RemoveConstraint(IndexOperation):
option_name = "constraints"
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
state.remove_constraint(app_label, self.model_name_lower, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
from_model_state = from_state.models[app_label, self.model_name_lower]
constraint = from_model_state.get_constraint_by_name(self.name)
schema_editor.remove_constraint(model, constraint)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
to_model_state = to_state.models[app_label, self.model_name_lower]
constraint = to_model_state.get_constraint_by_name(self.name)
schema_editor.add_constraint(model, constraint)
def deconstruct(self):
return (
self.__class__.__name__,
[],
{
"model_name": self.model_name,
"name": self.name,
},
)
def describe(self):
return "Remove constraint %s from model %s" % (self.name, self.model_name)
@property
def migration_name_fragment(self):
return "remove_%s_%s" % (self.model_name_lower, self.name.lower())
|
fb075695b54bb5419e39e58acae43aed1dc70b1fe37746aeb2cb5b25c053e104 | from django.db import router
class Operation:
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
# Should this operation be considered safe to elide and optimize across?
elidable = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Return a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Take the state from the previous migration, and mutate it
so that it matches what this migration would perform.
"""
raise NotImplementedError(
"subclasses of Operation must provide a state_forwards() method"
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Perform the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError(
"subclasses of Operation must provide a database_forwards() method"
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Perform the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError(
"subclasses of Operation must provide a database_backwards() method"
)
def describe(self):
"""
Output a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
@property
def migration_name_fragment(self):
"""
A filename part suitable for automatically naming a migration
containing this operation, or None if not applicable.
"""
return None
def references_model(self, name, app_label):
"""
Return True if there is a chance this operation references the given
model name (as a string), with an app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label):
"""
Return True if there is a chance this operation references the given
field name, with an app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allow_migrate_model(self, connection_alias, model):
"""
Return whether or not a model may be migrated.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model)
def reduce(self, operation, app_label):
"""
Return either a list of operations the actual operation should be
replaced with or a boolean that indicates whether or not the specified
operation can be optimized across.
"""
if self.elidable:
return [operation]
elif operation.elidable:
return [self]
return False
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
)
|
dd96f26ba0759c48ef2308502e8151f2418165495cdba9200715fb5d2dda7854 | from django.db import router
from .base import Operation
class SeparateDatabaseAndState(Operation):
"""
Take two lists of operations - ones that will be used for the database,
and ones that will be used for the state change. This allows operations
that don't support state change to have it applied, or have operations
that affect the state or not the database, or so on.
"""
serialization_expand_args = ["database_operations", "state_operations"]
def __init__(self, database_operations=None, state_operations=None):
self.database_operations = database_operations or []
self.state_operations = state_operations or []
def deconstruct(self):
kwargs = {}
if self.database_operations:
kwargs["database_operations"] = self.database_operations
if self.state_operations:
kwargs["state_operations"] = self.state_operations
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
for database_operation in self.database_operations:
to_state = from_state.clone()
database_operation.state_forwards(app_label, to_state)
database_operation.database_forwards(
app_label, schema_editor, from_state, to_state
)
from_state = to_state
def database_backwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
to_states = {}
for dbop in self.database_operations:
to_states[dbop] = to_state
to_state = to_state.clone()
dbop.state_forwards(app_label, to_state)
# to_state now has the states of all the database_operations applied
# which is the from_state for the backwards migration of the last
# operation.
for database_operation in reversed(self.database_operations):
from_state = to_state
to_state = to_states[database_operation]
database_operation.database_backwards(
app_label, schema_editor, from_state, to_state
)
def describe(self):
return "Custom state/database change combination"
class RunSQL(Operation):
"""
Run some raw SQL. A reverse SQL statement may be provided.
Also accept a list of operations that represent the state change effected
by this SQL change, in case it's custom column/table creation/deletion.
"""
noop = ""
def __init__(
self, sql, reverse_sql=None, state_operations=None, hints=None, elidable=False
):
self.sql = sql
self.reverse_sql = reverse_sql
self.state_operations = state_operations or []
self.hints = hints or {}
self.elidable = elidable
def deconstruct(self):
kwargs = {
"sql": self.sql,
}
if self.reverse_sql is not None:
kwargs["reverse_sql"] = self.reverse_sql
if self.state_operations:
kwargs["state_operations"] = self.state_operations
if self.hints:
kwargs["hints"] = self.hints
return (self.__class__.__qualname__, [], kwargs)
@property
def reversible(self):
return self.reverse_sql is not None
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if router.allow_migrate(
schema_editor.connection.alias, app_label, **self.hints
):
self._run_sql(schema_editor, self.sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_sql is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(
schema_editor.connection.alias, app_label, **self.hints
):
self._run_sql(schema_editor, self.reverse_sql)
def describe(self):
return "Raw SQL operation"
def _run_sql(self, schema_editor, sqls):
if isinstance(sqls, (list, tuple)):
for sql in sqls:
params = None
if isinstance(sql, (list, tuple)):
elements = len(sql)
if elements == 2:
sql, params = sql
else:
raise ValueError("Expected a 2-tuple but got %d" % elements)
schema_editor.execute(sql, params=params)
elif sqls != RunSQL.noop:
statements = schema_editor.connection.ops.prepare_sql_script(sqls)
for statement in statements:
schema_editor.execute(statement, params=None)
class RunPython(Operation):
"""
Run Python code in a context suitable for doing versioned ORM operations.
"""
reduces_to_sql = False
def __init__(
self, code, reverse_code=None, atomic=None, hints=None, elidable=False
):
self.atomic = atomic
# Forwards code
if not callable(code):
raise ValueError("RunPython must be supplied with a callable")
self.code = code
# Reverse code
if reverse_code is None:
self.reverse_code = None
else:
if not callable(reverse_code):
raise ValueError("RunPython must be supplied with callable arguments")
self.reverse_code = reverse_code
self.hints = hints or {}
self.elidable = elidable
def deconstruct(self):
kwargs = {
"code": self.code,
}
if self.reverse_code is not None:
kwargs["reverse_code"] = self.reverse_code
if self.atomic is not None:
kwargs["atomic"] = self.atomic
if self.hints:
kwargs["hints"] = self.hints
return (self.__class__.__qualname__, [], kwargs)
@property
def reversible(self):
return self.reverse_code is not None
def state_forwards(self, app_label, state):
# RunPython objects have no state effect. To add some, combine this
# with SeparateDatabaseAndState.
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# RunPython has access to all models. Ensure that all models are
# reloaded in case any are delayed.
from_state.clear_delayed_apps_cache()
if router.allow_migrate(
schema_editor.connection.alias, app_label, **self.hints
):
# We now execute the Python code in a context that contains a 'models'
# object, representing the versioned models as an app registry.
# We could try to override the global cache, but then people will still
# use direct imports, so we go with a documentation approach instead.
self.code(from_state.apps, schema_editor)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_code is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(
schema_editor.connection.alias, app_label, **self.hints
):
self.reverse_code(from_state.apps, schema_editor)
def describe(self):
return "Raw Python operation"
@staticmethod
def noop(apps, schema_editor):
return None
|
fface7c3a618c00c6cfd5e08d3905b3ff4f9f2b93347676a51c2f2526ba5f997 | from django.db.migrations.utils import field_references
from django.db.models import NOT_PROVIDED
from django.utils.functional import cached_property
from .base import Operation
class FieldOperation(Operation):
def __init__(self, model_name, name, field=None):
self.model_name = model_name
self.name = name
self.field = field
@cached_property
def model_name_lower(self):
return self.model_name.lower()
@cached_property
def name_lower(self):
return self.name.lower()
def is_same_model_operation(self, operation):
return self.model_name_lower == operation.model_name_lower
def is_same_field_operation(self, operation):
return (
self.is_same_model_operation(operation)
and self.name_lower == operation.name_lower
)
def references_model(self, name, app_label):
name_lower = name.lower()
if name_lower == self.model_name_lower:
return True
if self.field:
return bool(
field_references(
(app_label, self.model_name_lower),
self.field,
(app_label, name_lower),
)
)
return False
def references_field(self, model_name, name, app_label):
model_name_lower = model_name.lower()
# Check if this operation locally references the field.
if model_name_lower == self.model_name_lower:
if name == self.name:
return True
elif (
self.field
and hasattr(self.field, "from_fields")
and name in self.field.from_fields
):
return True
# Check if this operation remotely references the field.
if self.field is None:
return False
return bool(
field_references(
(app_label, self.model_name_lower),
self.field,
(app_label, model_name_lower),
name,
)
)
def reduce(self, operation, app_label):
return super().reduce(operation, app_label) or not operation.references_field(
self.model_name, self.name, app_label
)
class AddField(FieldOperation):
"""Add a field to a model."""
def __init__(self, model_name, name, field, preserve_default=True):
self.preserve_default = preserve_default
super().__init__(model_name, name, field)
def deconstruct(self):
kwargs = {
"model_name": self.model_name,
"name": self.name,
"field": self.field,
}
if self.preserve_default is not True:
kwargs["preserve_default"] = self.preserve_default
return (self.__class__.__name__, [], kwargs)
def state_forwards(self, app_label, state):
state.add_field(
app_label,
self.model_name_lower,
self.name,
self.field,
self.preserve_default,
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
field = to_model._meta.get_field(self.name)
if not self.preserve_default:
field.default = self.field.default
schema_editor.add_field(
from_model,
field,
)
if not self.preserve_default:
field.default = NOT_PROVIDED
def database_backwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, from_model):
schema_editor.remove_field(
from_model, from_model._meta.get_field(self.name)
)
def describe(self):
return "Add field %s to %s" % (self.name, self.model_name)
@property
def migration_name_fragment(self):
return "%s_%s" % (self.model_name_lower, self.name_lower)
def reduce(self, operation, app_label):
if isinstance(operation, FieldOperation) and self.is_same_field_operation(
operation
):
if isinstance(operation, AlterField):
return [
AddField(
model_name=self.model_name,
name=operation.name,
field=operation.field,
),
]
elif isinstance(operation, RemoveField):
return []
elif isinstance(operation, RenameField):
return [
AddField(
model_name=self.model_name,
name=operation.new_name,
field=self.field,
),
]
return super().reduce(operation, app_label)
class RemoveField(FieldOperation):
"""Remove a field from a model."""
def deconstruct(self):
kwargs = {
"model_name": self.model_name,
"name": self.name,
}
return (self.__class__.__name__, [], kwargs)
def state_forwards(self, app_label, state):
state.remove_field(app_label, self.model_name_lower, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, from_model):
schema_editor.remove_field(
from_model, from_model._meta.get_field(self.name)
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
schema_editor.add_field(from_model, to_model._meta.get_field(self.name))
def describe(self):
return "Remove field %s from %s" % (self.name, self.model_name)
@property
def migration_name_fragment(self):
return "remove_%s_%s" % (self.model_name_lower, self.name_lower)
def reduce(self, operation, app_label):
from .models import DeleteModel
if (
isinstance(operation, DeleteModel)
and operation.name_lower == self.model_name_lower
):
return [operation]
return super().reduce(operation, app_label)
class AlterField(FieldOperation):
"""
Alter a field's database column (e.g. null, max_length) to the provided
new field.
"""
def __init__(self, model_name, name, field, preserve_default=True):
self.preserve_default = preserve_default
super().__init__(model_name, name, field)
def deconstruct(self):
kwargs = {
"model_name": self.model_name,
"name": self.name,
"field": self.field,
}
if self.preserve_default is not True:
kwargs["preserve_default"] = self.preserve_default
return (self.__class__.__name__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_field(
app_label,
self.model_name_lower,
self.name,
self.field,
self.preserve_default,
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
from_field = from_model._meta.get_field(self.name)
to_field = to_model._meta.get_field(self.name)
if not self.preserve_default:
to_field.default = self.field.default
schema_editor.alter_field(from_model, from_field, to_field)
if not self.preserve_default:
to_field.default = NOT_PROVIDED
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Alter field %s on %s" % (self.name, self.model_name)
@property
def migration_name_fragment(self):
return "alter_%s_%s" % (self.model_name_lower, self.name_lower)
def reduce(self, operation, app_label):
if isinstance(operation, RemoveField) and self.is_same_field_operation(
operation
):
return [operation]
elif (
isinstance(operation, RenameField)
and self.is_same_field_operation(operation)
and self.field.db_column is None
):
return [
operation,
AlterField(
model_name=self.model_name,
name=operation.new_name,
field=self.field,
),
]
return super().reduce(operation, app_label)
class RenameField(FieldOperation):
"""Rename a field on the model. Might affect db_column too."""
def __init__(self, model_name, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
super().__init__(model_name, old_name)
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
"model_name": self.model_name,
"old_name": self.old_name,
"new_name": self.new_name,
}
return (self.__class__.__name__, [], kwargs)
def state_forwards(self, app_label, state):
state.rename_field(
app_label, self.model_name_lower, self.old_name, self.new_name
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
schema_editor.alter_field(
from_model,
from_model._meta.get_field(self.old_name),
to_model._meta.get_field(self.new_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
schema_editor.alter_field(
from_model,
from_model._meta.get_field(self.new_name),
to_model._meta.get_field(self.old_name),
)
def describe(self):
return "Rename field %s on %s to %s" % (
self.old_name,
self.model_name,
self.new_name,
)
@property
def migration_name_fragment(self):
return "rename_%s_%s_%s" % (
self.old_name_lower,
self.model_name_lower,
self.new_name_lower,
)
def references_field(self, model_name, name, app_label):
return self.references_model(model_name, app_label) and (
name.lower() == self.old_name_lower or name.lower() == self.new_name_lower
)
def reduce(self, operation, app_label):
if (
isinstance(operation, RenameField)
and self.is_same_model_operation(operation)
and self.new_name_lower == operation.old_name_lower
):
return [
RenameField(
self.model_name,
self.old_name,
operation.new_name,
),
]
# Skip `FieldOperation.reduce` as we want to run `references_field`
# against self.old_name and self.new_name.
return super(FieldOperation, self).reduce(operation, app_label) or not (
operation.references_field(self.model_name, self.old_name, app_label)
or operation.references_field(self.model_name, self.new_name, app_label)
)
|
6839d13177fa2c52720804a14b7982b205eac42ba42a3b299da6b06d0a380243 | import warnings
from django.db.models.lookups import (
Exact,
GreaterThan,
GreaterThanOrEqual,
In,
IsNull,
LessThan,
LessThanOrEqual,
)
from django.utils.deprecation import RemovedInDjango50Warning
class MultiColSource:
contains_aggregate = False
def __init__(self, alias, targets, sources, field):
self.targets, self.sources, self.field, self.alias = (
targets,
sources,
field,
alias,
)
self.output_field = self.field
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.alias, self.field)
def relabeled_clone(self, relabels):
return self.__class__(
relabels.get(self.alias, self.alias), self.targets, self.sources, self.field
)
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def resolve_expression(self, *args, **kwargs):
return self
def get_normalized_value(value, lhs):
from django.db.models import Model
if isinstance(value, Model):
if value.pk is None:
# When the deprecation ends, replace with:
# raise ValueError(
# "Model instances passed to related filters must be saved."
# )
warnings.warn(
"Passing unsaved model instances to related filters is deprecated.",
RemovedInDjango50Warning,
)
value_list = []
sources = lhs.output_field.path_infos[-1].target_fields
for source in sources:
while not isinstance(value, source.model) and source.remote_field:
source = source.remote_field.model._meta.get_field(
source.remote_field.field_name
)
try:
value_list.append(getattr(value, source.attname))
except AttributeError:
# A case like Restaurant.objects.filter(place=restaurant_instance),
# where place is a OneToOneField and the primary key of Restaurant.
return (value.pk,)
return tuple(value_list)
if not isinstance(value, tuple):
return (value,)
return value
class RelatedIn(In):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource):
if self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs]
# We need to run the related field's get_prep_value(). Consider
# case ForeignKey to IntegerField given value 'abc'. The
# ForeignKey itself doesn't have validation for non-integers,
# so we must run validation using the target field.
if hasattr(self.lhs.output_field, "path_infos"):
# Run the target field's get_prep_value. We can safely
# assume there is only one as we don't get to the direct
# value branch otherwise.
target_field = self.lhs.output_field.path_infos[-1].target_fields[
-1
]
self.rhs = [target_field.get_prep_value(v) for v in self.rhs]
elif not getattr(self.rhs, "has_select_fields", True) and not getattr(
self.lhs.field.target_field, "primary_key", False
):
self.rhs.clear_select_clause()
if (
getattr(self.lhs.output_field, "primary_key", False)
and self.lhs.output_field.model == self.rhs.model
):
# A case like
# Restaurant.objects.filter(place__in=restaurant_qs), where
# place is a OneToOneField and the primary key of
# Restaurant.
target_field = self.lhs.field.name
else:
target_field = self.lhs.field.target_field.name
self.rhs.add_fields([target_field], True)
return super().get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
# For multicolumn lookups we need to build a multicolumn where clause.
# This clause is either a SubqueryConstraint (for values that need
# to be compiled to SQL) or an OR-combined list of
# (col1 = val1 AND col2 = val2 AND ...) clauses.
from django.db.models.sql.where import (
AND,
OR,
SubqueryConstraint,
WhereNode,
)
root_constraint = WhereNode(connector=OR)
if self.rhs_is_direct_value():
values = [get_normalized_value(value, self.lhs) for value in self.rhs]
for value in values:
value_constraint = WhereNode()
for source, target, val in zip(
self.lhs.sources, self.lhs.targets, value
):
lookup_class = target.get_lookup("exact")
lookup = lookup_class(
target.get_col(self.lhs.alias, source), val
)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
root_constraint.add(
SubqueryConstraint(
self.lhs.alias,
[target.column for target in self.lhs.targets],
[source.name for source in self.lhs.sources],
self.rhs,
),
AND,
)
return root_constraint.as_sql(compiler, connection)
return super().as_sql(compiler, connection)
class RelatedLookupMixin:
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and not hasattr(
self.rhs, "resolve_expression"
):
# If we get here, we are dealing with single-column relations.
self.rhs = get_normalized_value(self.rhs, self.lhs)[0]
# We need to run the related field's get_prep_value(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if self.prepare_rhs and hasattr(self.lhs.output_field, "path_infos"):
# Get the target field. We can safely assume there is only one
# as we don't get to the direct value branch otherwise.
target_field = self.lhs.output_field.path_infos[-1].target_fields[-1]
self.rhs = target_field.get_prep_value(self.rhs)
return super().get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
assert self.rhs_is_direct_value()
self.rhs = get_normalized_value(self.rhs, self.lhs)
from django.db.models.sql.where import AND, WhereNode
root_constraint = WhereNode()
for target, source, val in zip(
self.lhs.targets, self.lhs.sources, self.rhs
):
lookup_class = target.get_lookup(self.lookup_name)
root_constraint.add(
lookup_class(target.get_col(self.lhs.alias, source), val), AND
)
return root_constraint.as_sql(compiler, connection)
return super().as_sql(compiler, connection)
class RelatedExact(RelatedLookupMixin, Exact):
pass
class RelatedLessThan(RelatedLookupMixin, LessThan):
pass
class RelatedGreaterThan(RelatedLookupMixin, GreaterThan):
pass
class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual):
pass
class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual):
pass
class RelatedIsNull(RelatedLookupMixin, IsNull):
pass
|
7851f29788114eaa1c8e077a9c80fd52542e388a69040e7b55cafbd5445e4c0b | """
Field-like classes that aren't really fields. It's easier to use objects that
have the same attributes as fields sometimes (avoids a lot of special casing).
"""
from django.db.models import fields
class OrderWrt(fields.IntegerField):
"""
A proxy for the _order database field that is used when
Meta.order_with_respect_to is specified.
"""
def __init__(self, *args, **kwargs):
kwargs["name"] = "_order"
kwargs["editable"] = False
super().__init__(*args, **kwargs)
|
cb2448cb27385c08b55b0f883cb587317ade10152b943c8b9e06aa0265f60c20 | import collections.abc
import copy
import datetime
import decimal
import math
import operator
import uuid
import warnings
from base64 import b64decode, b64encode
from functools import partialmethod, total_ordering
from django import forms
from django.apps import apps
from django.conf import settings
from django.core import checks, exceptions, validators
from django.db import connection, connections, router
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin
from django.utils import timezone
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import (
parse_date,
parse_datetime,
parse_duration,
parse_time,
)
from django.utils.duration import duration_microseconds, duration_string
from django.utils.functional import Promise, cached_property
from django.utils.ipv6 import clean_ipv6_address
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
__all__ = [
"AutoField",
"BLANK_CHOICE_DASH",
"BigAutoField",
"BigIntegerField",
"BinaryField",
"BooleanField",
"CharField",
"CommaSeparatedIntegerField",
"DateField",
"DateTimeField",
"DecimalField",
"DurationField",
"EmailField",
"Empty",
"Field",
"FilePathField",
"FloatField",
"GenericIPAddressField",
"IPAddressField",
"IntegerField",
"NOT_PROVIDED",
"NullBooleanField",
"PositiveBigIntegerField",
"PositiveIntegerField",
"PositiveSmallIntegerField",
"SlugField",
"SmallAutoField",
"SmallIntegerField",
"TextField",
"TimeField",
"URLField",
"UUIDField",
]
class Empty:
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
def return_None():
return None
@total_ordering
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
"invalid_choice": _("Value %(value)r is not a valid choice."),
"null": _("This field cannot be null."),
"blank": _("This field cannot be blank."),
"unique": _("%(model_name)s with this %(field_label)s already exists."),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
"unique_for_date": _(
"%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."
),
}
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
descriptor_class = DeferredAttribute
# Generic field type description, usually overridden by subclasses
def _description(self):
return _("Field of type: %(field_type)s") % {
"field_type": self.__class__.__name__
}
description = property(_description)
def __init__(
self,
verbose_name=None,
name=None,
primary_key=False,
max_length=None,
unique=False,
blank=False,
null=False,
db_index=False,
rel=None,
default=NOT_PROVIDED,
editable=True,
serialize=True,
unique_for_date=None,
unique_for_month=None,
unique_for_year=None,
choices=None,
help_text="",
db_column=None,
db_tablespace=None,
auto_created=False,
validators=(),
error_messages=None,
):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.remote_field = rel
self.is_relation = self.remote_field is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
if isinstance(choices, collections.abc.Iterator):
choices = list(choices)
self.choices = choices
self.help_text = help_text
self.db_index = db_index
self.db_column = db_column
self._db_tablespace = db_tablespace
self.auto_created = auto_created
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = list(validators) # Store for deconstruction later
self._error_messages = error_messages # Store for deconstruction later
def __str__(self):
"""
Return "app_label.model_label.field_name" for fields attached to
models.
"""
if not hasattr(self, "model"):
return super().__str__()
model = self.model
return "%s.%s" % (model._meta.label, self.name)
def __repr__(self):
"""Display the module, class, and name of the field."""
path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__)
name = getattr(self, "name", None)
if name is not None:
return "<%s: %s>" % (path, name)
return "<%s>" % path
def check(self, **kwargs):
return [
*self._check_field_name(),
*self._check_choices(),
*self._check_db_index(),
*self._check_null_allowed_for_primary_keys(),
*self._check_backend_specific_checks(**kwargs),
*self._check_validators(),
*self._check_deprecation_details(),
]
def _check_field_name(self):
"""
Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk".
"""
if self.name.endswith("_"):
return [
checks.Error(
"Field names must not end with an underscore.",
obj=self,
id="fields.E001",
)
]
elif LOOKUP_SEP in self.name:
return [
checks.Error(
'Field names must not contain "%s".' % LOOKUP_SEP,
obj=self,
id="fields.E002",
)
]
elif self.name == "pk":
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=self,
id="fields.E003",
)
]
else:
return []
@classmethod
def _choices_is_value(cls, value):
return isinstance(value, (str, Promise)) or not is_iterable(value)
def _check_choices(self):
if not self.choices:
return []
if not is_iterable(self.choices) or isinstance(self.choices, str):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
obj=self,
id="fields.E004",
)
]
choice_max_length = 0
# Expect [group_name, [value, display]]
for choices_group in self.choices:
try:
group_name, group_choices = choices_group
except (TypeError, ValueError):
# Containing non-pairs
break
try:
if not all(
self._choices_is_value(value) and self._choices_is_value(human_name)
for value, human_name in group_choices
):
break
if self.max_length is not None and group_choices:
choice_max_length = max(
[
choice_max_length,
*(
len(value)
for value, _ in group_choices
if isinstance(value, str)
),
]
)
except (TypeError, ValueError):
# No groups, choices in the form [value, display]
value, human_name = group_name, group_choices
if not self._choices_is_value(value) or not self._choices_is_value(
human_name
):
break
if self.max_length is not None and isinstance(value, str):
choice_max_length = max(choice_max_length, len(value))
# Special case: choices=['ab']
if isinstance(choices_group, str):
break
else:
if self.max_length is not None and choice_max_length > self.max_length:
return [
checks.Error(
"'max_length' is too small to fit the longest value "
"in 'choices' (%d characters)." % choice_max_length,
obj=self,
id="fields.E009",
),
]
return []
return [
checks.Error(
"'choices' must be an iterable containing "
"(actual value, human readable name) tuples.",
obj=self,
id="fields.E005",
)
]
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
obj=self,
id="fields.E006",
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (
self.primary_key
and self.null
and not connection.features.interprets_empty_strings_as_nulls
):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
"Primary keys must not have null=True.",
hint=(
"Set null=False on the field, or "
"remove primary_key=True argument."
),
obj=self,
id="fields.E007",
)
]
else:
return []
def _check_backend_specific_checks(self, databases=None, **kwargs):
if databases is None:
return []
app_label = self.model._meta.app_label
errors = []
for alias in databases:
if router.allow_migrate(
alias, app_label, model_name=self.model._meta.model_name
):
errors.extend(connections[alias].validation.check_field(self, **kwargs))
return errors
def _check_validators(self):
errors = []
for i, validator in enumerate(self.validators):
if not callable(validator):
errors.append(
checks.Error(
"All 'validators' must be callable.",
hint=(
"validators[{i}] ({repr}) isn't a function or "
"instance of a validator class.".format(
i=i,
repr=repr(validator),
)
),
obj=self,
id="fields.E008",
)
)
return errors
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
"msg",
"%s has been removed except for support in historical "
"migrations." % self.__class__.__name__,
),
hint=self.system_check_removed_details.get("hint"),
obj=self,
id=self.system_check_removed_details.get("id", "fields.EXXX"),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
"msg", "%s has been deprecated." % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get("hint"),
obj=self,
id=self.system_check_deprecated_details.get("id", "fields.WXXX"),
)
]
return []
def get_col(self, alias, output_field=None):
if alias == self.model._meta.db_table and (
output_field is None or output_field == self
):
return self.cached_col
from django.db.models.expressions import Col
return Col(alias, self, output_field)
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be
used by Django.
"""
return sql, params
def deconstruct(self):
"""
Return enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class() has
been run.
* The import path of the field, including the class, e.g.
django.db.models.IntegerField. This should be the most portable
version, so less specific may be better.
* A list of positional arguments.
* A dict of keyword arguments.
Note that the positional or keyword arguments must contain values of
the following types (including inner values of collection types):
* None, bool, str, int, float, complex, set, frozenset, list, tuple,
dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their
full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this
time, just ensure that the resulting field is the same - prefer keyword
arguments over positional ones, and omit parameters with their default
values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": None,
"help_text": "",
"db_column": None,
"db_tablespace": None,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
"db_tablespace": "_db_tablespace",
}
equals_comparison = {"choices", "validators"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.abc.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
elif path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
elif path.startswith("django.db.models.fields.json"):
path = path.replace("django.db.models.fields.json", "django.db.models")
elif path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
elif path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (self.name, path, [], keywords)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter and getattr(
self, "model", None
) == getattr(other, "model", None)
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
# Order by creation_counter first for backward compatibility.
if isinstance(other, Field):
if (
self.creation_counter != other.creation_counter
or not hasattr(self, "model")
and not hasattr(other, "model")
):
return self.creation_counter < other.creation_counter
elif hasattr(self, "model") != hasattr(other, "model"):
return not hasattr(self, "model") # Order no-model fields first
else:
# creation_counter's are equal, compare only models.
return (self.model._meta.app_label, self.model._meta.model_name) < (
other.model._meta.app_label,
other.model._meta.model_name,
)
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.remote_field:
obj.remote_field = copy.copy(self.remote_field)
if hasattr(self.remote_field, "field") and self.remote_field.field is self:
obj.remote_field.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, use the app registry to load the
model and then the field back.
"""
if not hasattr(self, "model"):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
state = self.__dict__.copy()
# The _get_default cached_property can't be pickled due to lambda
# usage.
state.pop("_get_default", None)
return _empty, (self.__class__,), state
return _load_field, (
self.model._meta.app_label,
self.model._meta.object_name,
self.name,
)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Convert the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Return the converted value. Subclasses should override this.
"""
return value
@cached_property
def error_messages(self):
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, "default_error_messages", {}))
messages.update(self._error_messages or {})
return messages
@cached_property
def validators(self):
"""
Some validators can't be created at field initialization time.
This method provides a way to delay their creation until required.
"""
return [*self.default_validators, *self._validators]
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, "code") and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validate value and raise ValidationError if necessary. Subclasses
should override this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self.choices is not None and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages["null"], code="null")
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages["blank"], code="blank")
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python() and validate() are propagated. Return the correct
value if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type_parameters(self, connection):
return DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
def db_check(self, connection):
"""
Return the database column check constraint for this field, for the
provided connection. Works the same way as db_type() for the case that
get_internal_type() does not map to a preexisting model field.
"""
data = self.db_type_parameters(connection)
try:
return (
connection.data_type_check_constraints[self.get_internal_type()] % data
)
except KeyError:
return None
def db_type(self, connection):
"""
Return the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = self.db_type_parameters(connection)
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. For example, this method is called by ForeignKey and OneToOneField
to determine its data type.
"""
return self.db_type(connection)
def cast_db_type(self, connection):
"""Return the data type to use in the Cast() function."""
db_type = connection.ops.cast_data_types.get(self.get_internal_type())
if db_type:
return db_type % self.db_type_parameters(connection)
return self.db_type(connection)
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return values
(type, checks). This will look at db_type(), allowing custom model
fields to override it.
"""
type_string = self.db_type(connection)
check_string = self.db_check(connection)
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, "from_db_value"):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
@property
def db_tablespace(self):
return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
@property
def db_returning(self):
"""
Private API intended only to be used by Django itself. Currently only
the PostgreSQL backend supports returning multiple fields on a model.
"""
return False
def set_attributes_from_name(self, name):
self.name = self.name or name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace("_", " ")
def contribute_to_class(self, cls, name, private_only=False):
"""
Register the field with the model class it belongs to.
If private_only is True, create a separate instance of this field
for every subclass of cls, even if cls is not an abstract model.
"""
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self, private=private_only)
if self.column:
setattr(cls, self.attname, self.descriptor_class(self))
if self.choices is not None:
# Don't override a get_FOO_display() method defined explicitly on
# this class, but don't check methods derived from inheritance, to
# allow overriding inherited choices. For more complex inheritance
# structures users should override contribute_to_class().
if "get_%s_display" % self.name not in cls.__dict__:
setattr(
cls,
"get_%s_display" % self.name,
partialmethod(cls._get_FIELD_display, field=self),
)
def get_filter_kwargs_for_object(self, obj):
"""
Return a dict that when passed as kwargs to self.model.filter(), would
yield all instances having the same value for this field as obj has.
"""
return {self.name: getattr(obj, self.attname)}
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""Return field's value just before saving."""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""Perform preliminary non-db specific value checks and conversions."""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""
Return field's value prepared for interacting with the database backend.
Used by the default implementations of get_db_prep_save().
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""Return field's value prepared for saving into a database."""
return self.get_db_prep_value(value, connection=connection, prepared=False)
def has_default(self):
"""Return a boolean of whether this field has a default value."""
return self.default is not NOT_PROVIDED
def get_default(self):
"""Return the default value for this field."""
return self._get_default()
@cached_property
def _get_default(self):
if self.has_default():
if callable(self.default):
return self.default
return lambda: self.default
if (
not self.empty_strings_allowed
or self.null
and not connection.features.interprets_empty_strings_as_nulls
):
return return_None
return str # return empty string
def get_choices(
self,
include_blank=True,
blank_choice=BLANK_CHOICE_DASH,
limit_choices_to=None,
ordering=(),
):
"""
Return choices with a default blank choices included, for use
as <select> choices for this field.
"""
if self.choices is not None:
choices = list(self.choices)
if include_blank:
blank_defined = any(
choice in ("", None) for choice, _ in self.flatchoices
)
if not blank_defined:
choices = blank_choice + choices
return choices
rel_model = self.remote_field.model
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
choice_func = operator.attrgetter(
self.remote_field.get_related_field().attname
if hasattr(self.remote_field, "get_related_field")
else "pk"
)
qs = rel_model._default_manager.complex_filter(limit_choices_to)
if ordering:
qs = qs.order_by(*ordering)
return (blank_choice if include_blank else []) + [
(choice_func(x), str(x)) for x in qs
]
def value_to_string(self, obj):
"""
Return a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return str(self.value_from_object(obj))
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
if self.choices is None:
return []
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""Return a django.forms.Field instance for this field."""
defaults = {
"required": not self.blank,
"label": capfirst(self.verbose_name),
"help_text": self.help_text,
}
if self.has_default():
if callable(self.default):
defaults["initial"] = self.default
defaults["show_hidden_initial"] = True
else:
defaults["initial"] = self.get_default()
if self.choices is not None:
# Fields with choices get special treatment.
include_blank = self.blank or not (
self.has_default() or "initial" in kwargs
)
defaults["choices"] = self.get_choices(include_blank=include_blank)
defaults["coerce"] = self.to_python
if self.null:
defaults["empty_value"] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in (
"coerce",
"empty_value",
"choices",
"required",
"widget",
"label",
"initial",
"help_text",
"error_messages",
"show_hidden_initial",
"disabled",
):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""Return the value of this field in the given model instance."""
return getattr(obj, self.attname)
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
"invalid": _("“%(value)s” value must be either True or False."),
"invalid_nullable": _("“%(value)s” value must be either True, False, or None."),
}
description = _("Boolean (Either True or False)")
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if self.null and value in self.empty_values:
return None
if value in (True, False):
# 1/0 are equal to True/False. bool() converts former to latter.
return bool(value)
if value in ("t", "True", "1"):
return True
if value in ("f", "False", "0"):
return False
raise exceptions.ValidationError(
self.error_messages["invalid_nullable" if self.null else "invalid"],
code="invalid",
params={"value": value},
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
if self.choices is not None:
include_blank = not (self.has_default() or "initial" in kwargs)
defaults = {"choices": self.get_choices(include_blank=include_blank)}
else:
form_class = forms.NullBooleanField if self.null else forms.BooleanField
# In HTML checkboxes, 'required' means "must be checked" which is
# different from the choices case ("must select some value").
# required=False allows unchecked checkboxes.
defaults = {"form_class": form_class, "required": False}
return super().formfield(**{**defaults, **kwargs})
def select_format(self, compiler, sql, params):
sql, params = super().select_format(compiler, sql, params)
# Filters that match everything are handled as empty strings in the
# WHERE clause, but in SELECT or GROUP BY list they must use a
# predicate that's always True.
if sql == "":
sql = "1"
return sql, params
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, db_collation=None, **kwargs):
super().__init__(*args, **kwargs)
self.db_collation = db_collation
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
databases = kwargs.get("databases") or []
return [
*super().check(**kwargs),
*self._check_db_collation(databases),
*self._check_max_length_attribute(**kwargs),
]
def _check_max_length_attribute(self, **kwargs):
if self.max_length is None:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
obj=self,
id="fields.E120",
)
]
elif (
not isinstance(self.max_length, int)
or isinstance(self.max_length, bool)
or self.max_length <= 0
):
return [
checks.Error(
"'max_length' must be a positive integer.",
obj=self,
id="fields.E121",
)
]
else:
return []
def _check_db_collation(self, databases):
errors = []
for db in databases:
if not router.allow_migrate_model(db, self.model):
continue
connection = connections[db]
if not (
self.db_collation is None
or "supports_collation_on_charfield"
in self.model._meta.required_db_features
or connection.features.supports_collation_on_charfield
):
errors.append(
checks.Error(
"%s does not support a database collation on "
"CharFields." % connection.display_name,
obj=self,
id="fields.E190",
),
)
return errors
def cast_db_type(self, connection):
if self.max_length is None:
return connection.ops.cast_char_field_without_max_length
return super().cast_db_type(connection)
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, str) or value is None:
return value
return str(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {"max_length": self.max_length}
# TODO: Handle multiple backends with different feature flags.
if self.null and not connection.features.interprets_empty_strings_as_nulls:
defaults["empty_value"] = None
defaults.update(kwargs)
return super().formfield(**defaults)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.db_collation:
kwargs["db_collation"] = self.db_collation
return name, path, args, kwargs
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
system_check_removed_details = {
"msg": (
"CommaSeparatedIntegerField is removed except for support in "
"historical migrations."
),
"hint": (
"Use CharField(validators=[validate_comma_separated_integer_list]) "
"instead."
),
"id": "fields.E901",
}
def _to_naive(value):
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
return value
def _get_naive_now():
return _to_naive(timezone.now())
class DateTimeCheckMixin:
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_mutually_exclusive_options(),
*self._check_fix_default_value(),
]
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [
self.auto_now_add,
self.auto_now,
self.has_default(),
]
enabled_options = [
option not in (None, False) for option in mutually_exclusive_options
].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=self,
id="fields.E160",
)
]
else:
return []
def _check_fix_default_value(self):
return []
# Concrete subclasses use this in their implementations of
# _check_fix_default_value().
def _check_if_value_fixed(self, value, now=None):
"""
Check if the given value appears to have been provided as a "fixed"
time value, and include a warning in the returned list if it does. The
value argument must be a date object or aware/naive datetime object. If
now is provided, it must be a naive datetime object.
"""
if now is None:
now = _get_naive_now()
offset = datetime.timedelta(seconds=10)
lower = now - offset
upper = now + offset
if isinstance(value, datetime.datetime):
value = _to_naive(value)
else:
assert isinstance(value, datetime.date)
lower = lower.date()
upper = upper.date()
if lower <= value <= upper:
return [
checks.Warning(
"Fixed default value provided.",
hint=(
"It seems you set a fixed date / time / datetime "
"value as default for this field. This may not be "
"what you want. If you want to have the current date "
"as default, use `django.utils.timezone.now`"
),
obj=self,
id="fields.W161",
)
]
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
"invalid": _(
"“%(value)s” value has an invalid date format. It must be "
"in YYYY-MM-DD format."
),
"invalid_date": _(
"“%(value)s” value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."
),
}
description = _("Date (without time)")
def __init__(
self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs
):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs["editable"] = False
kwargs["blank"] = True
super().__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
value = self.default
if isinstance(value, datetime.datetime):
value = _to_naive(value).date()
elif isinstance(value, datetime.date):
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
# At this point, value is a date object.
return self._check_if_value_fixed(value)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.auto_now:
kwargs["auto_now"] = True
if self.auto_now_add:
kwargs["auto_now_add"] = True
if self.auto_now or self.auto_now_add:
del kwargs["editable"]
del kwargs["blank"]
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages["invalid_date"],
code="invalid_date",
params={"value": value},
)
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(
cls,
"get_next_by_%s" % self.name,
partialmethod(
cls._get_next_or_previous_by_FIELD, field=self, is_next=True
),
)
setattr(
cls,
"get_previous_by_%s" % self.name,
partialmethod(
cls._get_next_or_previous_by_FIELD, field=self, is_next=False
),
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return "" if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.DateField,
**kwargs,
}
)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
"invalid": _(
"“%(value)s” value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."
),
"invalid_date": _(
"“%(value)s” value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."
),
"invalid_datetime": _(
"“%(value)s” value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."
),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
value = self.default
if isinstance(value, (datetime.datetime, datetime.date)):
return self._check_if_value_fixed(value)
# No explicit date / datetime value -- no checks necessary.
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn(
"DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active."
% (self.model.__name__, self.name, value),
RuntimeWarning,
)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages["invalid_datetime"],
code="invalid_datetime",
params={"value": value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages["invalid_date"],
code="invalid_date",
params={"value": value},
)
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
def get_prep_value(self, value):
value = super().get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = "%s.%s" % (self.model.__name__, self.name)
except AttributeError:
name = "(unbound)"
warnings.warn(
"DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." % (name, value),
RuntimeWarning,
)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datetimefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return "" if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.DateTimeField,
**kwargs,
}
)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
"invalid": _("“%(value)s” value must be a decimal number."),
}
description = _("Decimal number")
def __init__(
self,
verbose_name=None,
name=None,
max_digits=None,
decimal_places=None,
**kwargs,
):
self.max_digits, self.decimal_places = max_digits, decimal_places
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
digits_errors = [
*self._check_decimal_places(),
*self._check_max_digits(),
]
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=self,
id="fields.E130",
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
obj=self,
id="fields.E131",
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=self,
id="fields.E132",
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
obj=self,
id="fields.E133",
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=self,
id="fields.E134",
)
]
return []
@cached_property
def validators(self):
return super().validators + [
validators.DecimalValidator(self.max_digits, self.decimal_places)
]
@cached_property
def context(self):
return decimal.Context(prec=self.max_digits)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.max_digits is not None:
kwargs["max_digits"] = self.max_digits
if self.decimal_places is not None:
kwargs["decimal_places"] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, float):
if math.isnan(value):
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
return self.context.create_decimal_from_float(value)
try:
return decimal.Decimal(value)
except (decimal.InvalidOperation, TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def get_db_prep_save(self, value, connection):
return connection.ops.adapt_decimalfield_value(
self.to_python(value), self.max_digits, self.decimal_places
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
return super().formfield(
**{
"max_digits": self.max_digits,
"decimal_places": self.decimal_places,
"form_class": forms.DecimalField,
**kwargs,
}
)
class DurationField(Field):
"""
Store timedelta objects.
Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint
of microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
"invalid": _(
"“%(value)s” value has an invalid format. It must be in "
"[DD] [[HH:]MM:]ss[.uuuuuu] format."
)
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
return duration_microseconds(value)
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super().get_db_converters(connection)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return "" if val is None else duration_string(val)
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.DurationField,
**kwargs,
}
)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs.setdefault("max_length", 254)
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
return super().formfield(
**{
"form_class": forms.EmailField,
**kwargs,
}
)
class FilePathField(Field):
description = _("File path")
def __init__(
self,
verbose_name=None,
name=None,
path="",
match=None,
recursive=False,
allow_files=True,
allow_folders=False,
**kwargs,
):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs.setdefault("max_length", 100)
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_allowing_files_or_folders(**kwargs),
]
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' "
"set to True.",
obj=self,
id="fields.E140",
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.path != "":
kwargs["path"] = self.path
if self.match is not None:
kwargs["match"] = self.match
if self.recursive is not False:
kwargs["recursive"] = self.recursive
if self.allow_files is not True:
kwargs["allow_files"] = self.allow_files
if self.allow_folders is not False:
kwargs["allow_folders"] = self.allow_folders
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return str(value)
def formfield(self, **kwargs):
return super().formfield(
**{
"path": self.path() if callable(self.path) else self.path,
"match": self.match,
"recursive": self.recursive,
"form_class": forms.FilePathField,
"allow_files": self.allow_files,
"allow_folders": self.allow_folders,
**kwargs,
}
)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
"invalid": _("“%(value)s” value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
try:
return float(value)
except (TypeError, ValueError) as e:
raise e.__class__(
"Field '%s' expected a number but got %r." % (self.name, value),
) from e
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.FloatField,
**kwargs,
}
)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
"invalid": _("“%(value)s” value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_max_length_warning(),
]
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with %s."
% self.__class__.__name__,
hint="Remove 'max_length' from field",
obj=self,
id="fields.W122",
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
validators_ = super().validators
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None and not any(
(
isinstance(validator, validators.MinValueValidator)
and (
validator.limit_value()
if callable(validator.limit_value)
else validator.limit_value
)
>= min_value
)
for validator in validators_
):
validators_.append(validators.MinValueValidator(min_value))
if max_value is not None and not any(
(
isinstance(validator, validators.MaxValueValidator)
and (
validator.limit_value()
if callable(validator.limit_value)
else validator.limit_value
)
<= max_value
)
for validator in validators_
):
validators_.append(validators.MaxValueValidator(max_value))
return validators_
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
try:
return int(value)
except (TypeError, ValueError) as e:
raise e.__class__(
"Field '%s' expected a number but got %r." % (self.name, value),
) from e
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.IntegerField,
**kwargs,
}
)
class BigIntegerField(IntegerField):
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
return super().formfield(
**{
"min_value": -BigIntegerField.MAX_BIGINT - 1,
"max_value": BigIntegerField.MAX_BIGINT,
**kwargs,
}
)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_removed_details = {
"msg": (
"IPAddressField has been removed except for support in "
"historical migrations."
),
"hint": "Use GenericIPAddressField instead.",
"id": "fields.E900",
}
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 15
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return str(value)
def get_internal_type(self):
return "IPAddressField"
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(
self,
verbose_name=None,
name=None,
protocol="both",
unpack_ipv4=False,
*args,
**kwargs,
):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
(
self.default_validators,
invalid_error_message,
) = validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages["invalid"] = invalid_error_message
kwargs["max_length"] = 39
super().__init__(verbose_name, name, *args, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_blank_and_null_values(**kwargs),
]
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, "null", False) and getattr(self, "blank", False):
return [
checks.Error(
"GenericIPAddressFields cannot have blank=True if null=False, "
"as blank values are stored as nulls.",
obj=self,
id="fields.E150",
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.unpack_ipv4 is not False:
kwargs["unpack_ipv4"] = self.unpack_ipv4
if self.protocol != "both":
kwargs["protocol"] = self.protocol
if kwargs.get("max_length") == 39:
del kwargs["max_length"]
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value is None:
return None
if not isinstance(value, str):
value = str(value)
value = value.strip()
if ":" in value:
return clean_ipv6_address(
value, self.unpack_ipv4, self.error_messages["invalid"]
)
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
if value and ":" in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return str(value)
def formfield(self, **kwargs):
return super().formfield(
**{
"protocol": self.protocol,
"form_class": forms.GenericIPAddressField,
**kwargs,
}
)
class NullBooleanField(BooleanField):
default_error_messages = {
"invalid": _("“%(value)s” value must be either None, True or False."),
"invalid_nullable": _("“%(value)s” value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
system_check_removed_details = {
"msg": (
"NullBooleanField is removed except for support in historical "
"migrations."
),
"hint": "Use BooleanField(null=True) instead.",
"id": "fields.E903",
}
def __init__(self, *args, **kwargs):
kwargs["null"] = True
kwargs["blank"] = True
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["null"]
del kwargs["blank"]
return name, path, args, kwargs
class PositiveIntegerRelDbTypeMixin:
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
if not hasattr(cls, "integer_field_class"):
cls.integer_field_class = next(
(
parent
for parent in cls.__mro__[1:]
if issubclass(parent, IntegerField)
),
None,
)
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. In most cases, a foreign key pointing to a positive integer
primary key will have an integer column data type but some databases
(e.g. MySQL) have an unsigned integer type. In that case
(related_fields_match_type=True), the primary key should return its
db_type.
"""
if connection.features.related_fields_match_type:
return self.db_type(connection)
else:
return self.integer_field_class().db_type(connection=connection)
class PositiveBigIntegerField(PositiveIntegerRelDbTypeMixin, BigIntegerField):
description = _("Positive big integer")
def get_internal_type(self):
return "PositiveBigIntegerField"
def formfield(self, **kwargs):
return super().formfield(
**{
"min_value": 0,
**kwargs,
}
)
class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
return super().formfield(
**{
"min_value": 0,
**kwargs,
}
)
class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, SmallIntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
return super().formfield(
**{
"min_value": 0,
**kwargs,
}
)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(
self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs
):
self.allow_unicode = allow_unicode
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 50:
del kwargs["max_length"]
if self.db_index is False:
kwargs["db_index"] = False
else:
del kwargs["db_index"]
if self.allow_unicode is not False:
kwargs["allow_unicode"] = self.allow_unicode
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.SlugField,
"allow_unicode": self.allow_unicode,
**kwargs,
}
)
class TextField(Field):
description = _("Text")
def __init__(self, *args, db_collation=None, **kwargs):
super().__init__(*args, **kwargs)
self.db_collation = db_collation
def check(self, **kwargs):
databases = kwargs.get("databases") or []
return [
*super().check(**kwargs),
*self._check_db_collation(databases),
]
def _check_db_collation(self, databases):
errors = []
for db in databases:
if not router.allow_migrate_model(db, self.model):
continue
connection = connections[db]
if not (
self.db_collation is None
or "supports_collation_on_textfield"
in self.model._meta.required_db_features
or connection.features.supports_collation_on_textfield
):
errors.append(
checks.Error(
"%s does not support a database collation on "
"TextFields." % connection.display_name,
obj=self,
id="fields.E190",
),
)
return errors
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if isinstance(value, str) or value is None:
return value
return str(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
return super().formfield(
**{
"max_length": self.max_length,
**({} if self.choices is not None else {"widget": forms.Textarea}),
**kwargs,
}
)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.db_collation:
kwargs["db_collation"] = self.db_collation
return name, path, args, kwargs
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
"invalid": _(
"“%(value)s” value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."
),
"invalid_time": _(
"“%(value)s” value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."
),
}
description = _("Time")
def __init__(
self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs
):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs["editable"] = False
kwargs["blank"] = True
super().__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
value = self.default
if isinstance(value, datetime.datetime):
now = None
elif isinstance(value, datetime.time):
now = _get_naive_now()
# This will not use the right date in the race condition where now
# is just before the date change and value is just past 0:00.
value = datetime.datetime.combine(now.date(), value)
else:
# No explicit time / datetime value -- no checks necessary
return []
# At this point, value is a datetime object.
return self._check_if_value_fixed(value, now=now)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs["blank"]
del kwargs["editable"]
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages["invalid_time"],
code="invalid_time",
params={"value": value},
)
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_timefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return "" if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.TimeField,
**kwargs,
}
)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs.setdefault("max_length", 200)
super().__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 200:
del kwargs["max_length"]
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
return super().formfield(
**{
"form_class": forms.URLField,
**kwargs,
}
)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b""]
def __init__(self, *args, **kwargs):
kwargs.setdefault("editable", False)
super().__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
return [*super().check(**kwargs), *self._check_str_default_value()]
def _check_str_default_value(self):
if self.has_default() and isinstance(self.default, str):
return [
checks.Error(
"BinaryField's default cannot be a string. Use bytes "
"content instead.",
obj=self,
id="fields.E170",
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.editable:
kwargs["editable"] = True
else:
del kwargs["editable"]
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_placeholder(self, value, compiler, connection):
return connection.ops.binary_placeholder_sql(value)
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super().get_default()
if default == "":
return b""
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super().get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(self.value_from_object(obj)).decode("ascii")
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, str):
return memoryview(b64decode(value.encode("ascii")))
return value
class UUIDField(Field):
default_error_messages = {
"invalid": _("“%(value)s” is not a valid UUID."),
}
description = _("Universally unique identifier")
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs["max_length"] = 32
super().__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["max_length"]
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
if not isinstance(value, uuid.UUID):
value = self.to_python(value)
if connection.features.has_native_uuid_field:
return value
return value.hex
def to_python(self, value):
if value is not None and not isinstance(value, uuid.UUID):
input_form = "int" if isinstance(value, int) else "hex"
try:
return uuid.UUID(**{input_form: value})
except (AttributeError, ValueError):
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
return value
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.UUIDField,
**kwargs,
}
)
class AutoFieldMixin:
db_returning = True
def __init__(self, *args, **kwargs):
kwargs["blank"] = True
super().__init__(*args, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_primary_key(),
]
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
"AutoFields must set primary_key=True.",
obj=self,
id="fields.E100",
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["blank"]
kwargs["primary_key"] = True
return name, path, args, kwargs
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def contribute_to_class(self, cls, name, **kwargs):
if cls._meta.auto_field:
raise ValueError(
"Model %s can't have more than one auto-generated field."
% cls._meta.label
)
super().contribute_to_class(cls, name, **kwargs)
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class AutoFieldMeta(type):
"""
Metaclass to maintain backward inheritance compatibility for AutoField.
It is intended that AutoFieldMixin become public API when it is possible to
create a non-integer automatically-generated field using column defaults
stored in the database.
In many areas Django also relies on using isinstance() to check for an
automatically-generated field as a subclass of AutoField. A new flag needs
to be implemented on Field to be used instead.
When these issues have been addressed, this metaclass could be used to
deprecate inheritance from AutoField and use of isinstance() with AutoField
for detecting automatically-generated fields.
"""
@property
def _subclasses(self):
return (BigAutoField, SmallAutoField)
def __instancecheck__(self, instance):
return isinstance(instance, self._subclasses) or super().__instancecheck__(
instance
)
def __subclasscheck__(self, subclass):
return issubclass(subclass, self._subclasses) or super().__subclasscheck__(
subclass
)
class AutoField(AutoFieldMixin, IntegerField, metaclass=AutoFieldMeta):
def get_internal_type(self):
return "AutoField"
def rel_db_type(self, connection):
return IntegerField().db_type(connection=connection)
class BigAutoField(AutoFieldMixin, BigIntegerField):
def get_internal_type(self):
return "BigAutoField"
def rel_db_type(self, connection):
return BigIntegerField().db_type(connection=connection)
class SmallAutoField(AutoFieldMixin, SmallIntegerField):
def get_internal_type(self):
return "SmallAutoField"
def rel_db_type(self, connection):
return SmallIntegerField().db_type(connection=connection)
|
760524e6bebf7fd881502375c0b1342b3dfda67df87c5962638fd46918ec3108 | import functools
import inspect
from functools import partial
from django import forms
from django.apps import apps
from django.conf import SettingsReference, settings
from django.core import checks, exceptions
from django.db import connection, router
from django.db.backends import utils
from django.db.models import Q
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL
from django.db.models.query_utils import PathInfo
from django.db.models.utils import make_model_tuple
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from . import Field
from .mixins import FieldCacheMixin
from .related_descriptors import (
ForeignKeyDeferredAttribute,
ForwardManyToOneDescriptor,
ForwardOneToOneDescriptor,
ManyToManyDescriptor,
ReverseManyToOneDescriptor,
ReverseOneToOneDescriptor,
)
from .related_lookups import (
RelatedExact,
RelatedGreaterThan,
RelatedGreaterThanOrEqual,
RelatedIn,
RelatedIsNull,
RelatedLessThan,
RelatedLessThanOrEqual,
)
from .reverse_related import ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel
RECURSIVE_RELATIONSHIP_CONSTANT = "self"
def resolve_relation(scope_model, relation):
"""
Transform relation into a model or fully-qualified model string of the form
"app_label.ModelName", relative to scope_model.
The relation argument can be:
* RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case
the model argument will be returned.
* A bare model name without an app_label, in which case scope_model's
app_label will be prepended.
* An "app_label.ModelName" string.
* A model class, which will be returned unchanged.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
relation = scope_model
# Look for an "app.Model" relation
if isinstance(relation, str):
if "." not in relation:
relation = "%s.%s" % (scope_model._meta.app_label, relation)
return relation
def lazy_related_operation(function, model, *related_models, **kwargs):
"""
Schedule `function` to be called once `model` and all `related_models`
have been imported and registered with the app registry. `function` will
be called with the newly-loaded model classes as its positional arguments,
plus any optional keyword arguments.
The `model` argument must be a model class. Each subsequent positional
argument is another model, or a reference to another model - see
`resolve_relation()` for the various forms these may take. Any relative
references will be resolved relative to `model`.
This is a convenience wrapper for `Apps.lazy_model_operation` - the app
registry model used is the one found in `model._meta.apps`.
"""
models = [model] + [resolve_relation(model, rel) for rel in related_models]
model_keys = (make_model_tuple(m) for m in models)
apps = model._meta.apps
return apps.lazy_model_operation(partial(function, **kwargs), *model_keys)
class RelatedField(FieldCacheMixin, Field):
"""Base class that all relational fields inherit from."""
# Field flags
one_to_many = False
one_to_one = False
many_to_many = False
many_to_one = False
def __init__(
self,
related_name=None,
related_query_name=None,
limit_choices_to=None,
**kwargs,
):
self._related_name = related_name
self._related_query_name = related_query_name
self._limit_choices_to = limit_choices_to
super().__init__(**kwargs)
@cached_property
def related_model(self):
# Can't cache this property until all the models are loaded.
apps.check_models_ready()
return self.remote_field.model
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_related_name_is_valid(),
*self._check_related_query_name_is_valid(),
*self._check_relation_model_exists(),
*self._check_referencing_to_swapped_model(),
*self._check_clashes(),
]
def _check_related_name_is_valid(self):
import keyword
related_name = self.remote_field.related_name
if related_name is None:
return []
is_valid_id = (
not keyword.iskeyword(related_name) and related_name.isidentifier()
)
if not (is_valid_id or related_name.endswith("+")):
return [
checks.Error(
"The name '%s' is invalid related_name for field %s.%s"
% (
self.remote_field.related_name,
self.model._meta.object_name,
self.name,
),
hint=(
"Related name must be a valid Python identifier or end with a "
"'+'"
),
obj=self,
id="fields.E306",
)
]
return []
def _check_related_query_name_is_valid(self):
if self.remote_field.is_hidden():
return []
rel_query_name = self.related_query_name()
errors = []
if rel_query_name.endswith("_"):
errors.append(
checks.Error(
"Reverse query name '%s' must not end with an underscore."
% rel_query_name,
hint=(
"Add or change a related_name or related_query_name "
"argument for this field."
),
obj=self,
id="fields.E308",
)
)
if LOOKUP_SEP in rel_query_name:
errors.append(
checks.Error(
"Reverse query name '%s' must not contain '%s'."
% (rel_query_name, LOOKUP_SEP),
hint=(
"Add or change a related_name or related_query_name "
"argument for this field."
),
obj=self,
id="fields.E309",
)
)
return errors
def _check_relation_model_exists(self):
rel_is_missing = self.remote_field.model not in self.opts.apps.get_models()
rel_is_string = isinstance(self.remote_field.model, str)
model_name = (
self.remote_field.model
if rel_is_string
else self.remote_field.model._meta.object_name
)
if rel_is_missing and (
rel_is_string or not self.remote_field.model._meta.swapped
):
return [
checks.Error(
"Field defines a relation with model '%s', which is either "
"not installed, or is abstract." % model_name,
obj=self,
id="fields.E300",
)
]
return []
def _check_referencing_to_swapped_model(self):
if (
self.remote_field.model not in self.opts.apps.get_models()
and not isinstance(self.remote_field.model, str)
and self.remote_field.model._meta.swapped
):
return [
checks.Error(
"Field defines a relation with the model '%s', which has "
"been swapped out." % self.remote_field.model._meta.label,
hint="Update the relation to point at 'settings.%s'."
% self.remote_field.model._meta.swappable,
obj=self,
id="fields.E301",
)
]
return []
def _check_clashes(self):
"""Check accessor and reverse query name clashes."""
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# f.remote_field.model may be a string instead of a model. Skip if
# model name is not resolved.
if not isinstance(self.remote_field.model, ModelBase):
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
# rel_opts.object_name == "Target"
rel_opts = self.remote_field.model._meta
# If the field doesn't install a backward relation on the target model
# (so `is_hidden` returns True), then there are no clashes to check
# and we can skip these fields.
rel_is_hidden = self.remote_field.is_hidden()
rel_name = self.remote_field.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
# i.e. "app_label.Model.field".
field_name = "%s.%s" % (opts.label, self.name)
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i.e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
# i.e. "app_label.Target.model_set".
clash_name = "%s.%s" % (rel_opts.label, clash_field.name)
if not rel_is_hidden and clash_field.name == rel_name:
errors.append(
checks.Error(
f"Reverse accessor '{rel_opts.object_name}.{rel_name}' "
f"for '{field_name}' clashes with field name "
f"'{clash_name}'.",
hint=(
"Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'."
)
% (clash_name, field_name),
obj=self,
id="fields.E302",
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'."
% (field_name, clash_name),
hint=(
"Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'."
)
% (clash_name, field_name),
obj=self,
id="fields.E303",
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = (r for r in rel_opts.related_objects if r.field is not self)
for clash_field in potential_clashes:
# i.e. "app_label.Model.m2m".
clash_name = "%s.%s" % (
clash_field.related_model._meta.label,
clash_field.field.name,
)
if not rel_is_hidden and clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
f"Reverse accessor '{rel_opts.object_name}.{rel_name}' "
f"for '{field_name}' clashes with reverse accessor for "
f"'{clash_name}'.",
hint=(
"Add or change a related_name argument "
"to the definition for '%s' or '%s'."
)
% (field_name, clash_name),
obj=self,
id="fields.E304",
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name "
"for '%s'." % (field_name, clash_name),
hint=(
"Add or change a related_name argument "
"to the definition for '%s' or '%s'."
)
% (field_name, clash_name),
obj=self,
id="fields.E305",
)
)
return errors
def db_type(self, connection):
# By default related field will not have a column as it relates to
# columns from another table.
return None
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
super().contribute_to_class(cls, name, private_only=private_only, **kwargs)
self.opts = cls._meta
if not cls._meta.abstract:
if self.remote_field.related_name:
related_name = self.remote_field.related_name
else:
related_name = self.opts.default_related_name
if related_name:
related_name = related_name % {
"class": cls.__name__.lower(),
"model_name": cls._meta.model_name.lower(),
"app_label": cls._meta.app_label.lower(),
}
self.remote_field.related_name = related_name
if self.remote_field.related_query_name:
related_query_name = self.remote_field.related_query_name % {
"class": cls.__name__.lower(),
"app_label": cls._meta.app_label.lower(),
}
self.remote_field.related_query_name = related_query_name
def resolve_related_class(model, related, field):
field.remote_field.model = related
field.do_related_class(related, model)
lazy_related_operation(
resolve_related_class, cls, self.remote_field.model, field=self
)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self._limit_choices_to:
kwargs["limit_choices_to"] = self._limit_choices_to
if self._related_name is not None:
kwargs["related_name"] = self._related_name
if self._related_query_name is not None:
kwargs["related_query_name"] = self._related_query_name
return name, path, args, kwargs
def get_forward_related_filter(self, obj):
"""
Return the keyword arguments that when supplied to
self.model.object.filter(), would select all instances related through
this field to the remote obj. This is used to build the querysets
returned by related descriptors. obj is an instance of
self.related_field.model.
"""
return {
"%s__%s" % (self.name, rh_field.name): getattr(obj, rh_field.attname)
for _, rh_field in self.related_fields
}
def get_reverse_related_filter(self, obj):
"""
Complement to get_forward_related_filter(). Return the keyword
arguments that when passed to self.related_field.model.object.filter()
select all instances of self.related_field.model related through
this field to obj. obj is an instance of self.model.
"""
base_filter = (
(rh_field.attname, getattr(obj, lh_field.attname))
for lh_field, rh_field in self.related_fields
)
descriptor_filter = self.get_extra_descriptor_filter(obj)
base_q = Q(*base_filter)
if isinstance(descriptor_filter, dict):
return base_q & Q(**descriptor_filter)
elif descriptor_filter:
return base_q & descriptor_filter
return base_q
@property
def swappable_setting(self):
"""
Get the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.remote_field.model, str):
to_string = self.remote_field.model
else:
to_string = self.remote_field.model._meta.label
return apps.get_swappable_settings_name(to_string)
return None
def set_attributes_from_rel(self):
self.name = self.name or (
self.remote_field.model._meta.model_name
+ "_"
+ self.remote_field.model._meta.pk.name
)
if self.verbose_name is None:
self.verbose_name = self.remote_field.model._meta.verbose_name
self.remote_field.set_field_name()
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.contribute_to_related_class(other, self.remote_field)
def get_limit_choices_to(self):
"""
Return ``limit_choices_to`` for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.remote_field.limit_choices_to):
return self.remote_field.limit_choices_to()
return self.remote_field.limit_choices_to
def formfield(self, **kwargs):
"""
Pass ``limit_choices_to`` to the field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.remote_field, "get_related_field"):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.remote_field.limit_choices_to
defaults.update(
{
"limit_choices_to": limit_choices_to,
}
)
defaults.update(kwargs)
return super().formfield(**defaults)
def related_query_name(self):
"""
Define the name that can be used to identify this related object in a
table-spanning query.
"""
return (
self.remote_field.related_query_name
or self.remote_field.related_name
or self.opts.model_name
)
@property
def target_field(self):
"""
When filtering against this relation, return the field on the remote
model against which the filtering should happen.
"""
target_fields = self.path_infos[-1].target_fields
if len(target_fields) > 1:
raise exceptions.FieldError(
"The relation has multiple target fields, but only single target field "
"was asked for"
)
return target_fields[0]
def get_cache_name(self):
return self.name
class ForeignObject(RelatedField):
"""
Abstraction of the ForeignKey relation to support multi-column relations.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
requires_unique_target = True
related_accessor_class = ReverseManyToOneDescriptor
forward_related_accessor_class = ForwardManyToOneDescriptor
rel_class = ForeignObjectRel
def __init__(
self,
to,
on_delete,
from_fields,
to_fields,
rel=None,
related_name=None,
related_query_name=None,
limit_choices_to=None,
parent_link=False,
swappable=True,
**kwargs,
):
if rel is None:
rel = self.rel_class(
self,
to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
super().__init__(
rel=rel,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
**kwargs,
)
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
def __copy__(self):
obj = super().__copy__()
# Remove any cached PathInfo values.
obj.__dict__.pop("path_infos", None)
obj.__dict__.pop("reverse_path_infos", None)
return obj
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_to_fields_exist(),
*self._check_unique_target(),
]
def _check_to_fields_exist(self):
# Skip nonexistent models.
if isinstance(self.remote_field.model, str):
return []
errors = []
for to_field in self.to_fields:
if to_field:
try:
self.remote_field.model._meta.get_field(to_field)
except exceptions.FieldDoesNotExist:
errors.append(
checks.Error(
"The to_field '%s' doesn't exist on the related "
"model '%s'."
% (to_field, self.remote_field.model._meta.label),
obj=self,
id="fields.E312",
)
)
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.remote_field.model, str)
if rel_is_string or not self.requires_unique_target:
return []
try:
self.foreign_related_fields
except exceptions.FieldDoesNotExist:
return []
if not self.foreign_related_fields:
return []
unique_foreign_fields = {
frozenset([f.name])
for f in self.remote_field.model._meta.get_fields()
if getattr(f, "unique", False)
}
unique_foreign_fields.update(
{frozenset(ut) for ut in self.remote_field.model._meta.unique_together}
)
unique_foreign_fields.update(
{
frozenset(uc.fields)
for uc in self.remote_field.model._meta.total_unique_constraints
}
)
foreign_fields = {f.name for f in self.foreign_related_fields}
has_unique_constraint = any(u <= foreign_fields for u in unique_foreign_fields)
if not has_unique_constraint and len(self.foreign_related_fields) > 1:
field_combination = ", ".join(
"'%s'" % rel_field.name for rel_field in self.foreign_related_fields
)
model_name = self.remote_field.model.__name__
return [
checks.Error(
"No subset of the fields %s on model '%s' is unique."
% (field_combination, model_name),
hint=(
"Mark a single field as unique=True or add a set of "
"fields to a unique constraint (via unique_together "
"or a UniqueConstraint (without condition) in the "
"model Meta.constraints)."
),
obj=self,
id="fields.E310",
)
]
elif not has_unique_constraint:
field_name = self.foreign_related_fields[0].name
model_name = self.remote_field.model.__name__
return [
checks.Error(
"'%s.%s' must be unique because it is referenced by "
"a foreign key." % (model_name, field_name),
hint=(
"Add unique=True to this field or add a "
"UniqueConstraint (without condition) in the model "
"Meta.constraints."
),
obj=self,
id="fields.E311",
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs["on_delete"] = self.remote_field.on_delete
kwargs["from_fields"] = self.from_fields
kwargs["to_fields"] = self.to_fields
if self.remote_field.parent_link:
kwargs["parent_link"] = self.remote_field.parent_link
if isinstance(self.remote_field.model, str):
if "." in self.remote_field.model:
app_label, model_name = self.remote_field.model.split(".")
kwargs["to"] = "%s.%s" % (app_label, model_name.lower())
else:
kwargs["to"] = self.remote_field.model.lower()
else:
kwargs["to"] = self.remote_field.model._meta.label_lower
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs["to"], "setting_name"):
if kwargs["to"].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ForeignKey pointing to a model "
"that is swapped in place of more than one model (%s and %s)"
% (kwargs["to"].setting_name, swappable_setting)
)
# Set it
kwargs["to"] = SettingsReference(
kwargs["to"],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if not self.from_fields or len(self.from_fields) != len(self.to_fields):
raise ValueError(
"Foreign Object from and to fields must be the same non-zero length"
)
if isinstance(self.remote_field.model, str):
raise ValueError(
"Related model %r cannot be resolved" % self.remote_field.model
)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (
self
if from_field_name == RECURSIVE_RELATIONSHIP_CONSTANT
else self.opts.get_field(from_field_name)
)
to_field = (
self.remote_field.model._meta.pk
if to_field_name is None
else self.remote_field.model._meta.get_field(to_field_name)
)
related_fields.append((from_field, to_field))
return related_fields
@cached_property
def related_fields(self):
return self.resolve_related_fields()
@cached_property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@cached_property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@cached_property
def foreign_related_fields(self):
return tuple(
rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field
)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (
not possible_parent_link
or possible_parent_link.primary_key
or possible_parent_link.model._meta.abstract
):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super().get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple(
(lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source
)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Return an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, alias, related_alias):
"""
Return a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(compiler, connection)
method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self, filtered_relation=None):
"""Get path from this field to the related model."""
opts = self.remote_field.model._meta
from_opts = self.model._meta
return [
PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=self.foreign_related_fields,
join_field=self,
m2m=False,
direct=True,
filtered_relation=filtered_relation,
)
]
@cached_property
def path_infos(self):
return self.get_path_info()
def get_reverse_path_info(self, filtered_relation=None):
"""Get path from the related model to this field's model."""
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [
PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self.remote_field,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)
]
@cached_property
def reverse_path_infos(self):
return self.get_reverse_path_info()
@classmethod
@functools.lru_cache(maxsize=None)
def get_lookups(cls):
bases = inspect.getmro(cls)
bases = bases[: bases.index(ForeignObject) + 1]
class_lookups = [parent.__dict__.get("class_lookups", {}) for parent in bases]
return cls.merge_dicts(class_lookups)
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
super().contribute_to_class(cls, name, private_only=private_only, **kwargs)
setattr(cls, self.name, self.forward_related_accessor_class(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if (
not self.remote_field.is_hidden()
and not related.related_model._meta.swapped
):
setattr(
cls._meta.concrete_model,
related.get_accessor_name(),
self.related_accessor_class(related),
)
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.remote_field.limit_choices_to:
cls._meta.related_fkey_lookups.append(
self.remote_field.limit_choices_to
)
ForeignObject.register_lookup(RelatedIn)
ForeignObject.register_lookup(RelatedExact)
ForeignObject.register_lookup(RelatedLessThan)
ForeignObject.register_lookup(RelatedGreaterThan)
ForeignObject.register_lookup(RelatedGreaterThanOrEqual)
ForeignObject.register_lookup(RelatedLessThanOrEqual)
ForeignObject.register_lookup(RelatedIsNull)
class ForeignKey(ForeignObject):
"""
Provide a many-to-one relation by adding a column to the local model
to hold the remote value.
By default ForeignKey will target the pk of the remote model but this
behavior can be changed by using the ``to_field`` argument.
"""
descriptor_class = ForeignKeyDeferredAttribute
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
rel_class = ManyToOneRel
empty_strings_allowed = False
default_error_messages = {
"invalid": _("%(model)s instance with %(field)s %(value)r does not exist.")
}
description = _("Foreign Key (type determined by related field)")
def __init__(
self,
to,
on_delete,
related_name=None,
related_query_name=None,
limit_choices_to=None,
parent_link=False,
to_field=None,
db_constraint=True,
**kwargs,
):
try:
to._meta.model_name
except AttributeError:
if not isinstance(to, str):
raise TypeError(
"%s(%r) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string %r"
% (
self.__class__.__name__,
to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if not callable(on_delete):
raise TypeError("on_delete must be callable.")
kwargs["rel"] = self.rel_class(
self,
to,
to_field,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
kwargs.setdefault("db_index", True)
super().__init__(
to,
on_delete,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
from_fields=[RECURSIVE_RELATIONSHIP_CONSTANT],
to_fields=[to_field],
**kwargs,
)
self.db_constraint = db_constraint
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_on_delete(),
*self._check_unique(),
]
def _check_on_delete(self):
on_delete = getattr(self.remote_field, "on_delete", None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
"Field specifies on_delete=SET_NULL, but cannot be null.",
hint=(
"Set null=True argument on the field, or change the on_delete "
"rule."
),
obj=self,
id="fields.E320",
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
"Field specifies on_delete=SET_DEFAULT, but has no default value.",
hint="Set a default value, or change the on_delete rule.",
obj=self,
id="fields.E321",
)
]
else:
return []
def _check_unique(self, **kwargs):
return (
[
checks.Warning(
"Setting unique=True on a ForeignKey has the same effect as using "
"a OneToOneField.",
hint=(
"ForeignKey(unique=True) is usually better served by a "
"OneToOneField."
),
obj=self,
id="fields.W342",
)
]
if self.unique
else []
)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["to_fields"]
del kwargs["from_fields"]
# Handle the simpler arguments
if self.db_index:
del kwargs["db_index"]
else:
kwargs["db_index"] = False
if self.db_constraint is not True:
kwargs["db_constraint"] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.remote_field.model, "_meta", None)
if self.remote_field.field_name and (
not to_meta
or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name)
):
kwargs["to_field"] = self.remote_field.field_name
return name, path, args, kwargs
def to_python(self, value):
return self.target_field.to_python(value)
@property
def target_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self, filtered_relation=None):
"""Get path from the related model to this field's model."""
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [
PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self.remote_field,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)
]
def validate(self, value, model_instance):
if self.remote_field.parent_link:
return
super().validate(value, model_instance)
if value is None:
return
using = router.db_for_read(self.remote_field.model, instance=model_instance)
qs = self.remote_field.model._base_manager.using(using).filter(
**{self.remote_field.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={
"model": self.remote_field.model._meta.verbose_name,
"pk": value,
"field": self.remote_field.field_name,
"value": value,
}, # 'pk' is included for backwards compatibility
)
def resolve_related_fields(self):
related_fields = super().resolve_related_fields()
for from_field, to_field in related_fields:
if (
to_field
and to_field.model != self.remote_field.model._meta.concrete_model
):
raise exceptions.FieldError(
"'%s.%s' refers to field '%s' which is not local to model "
"'%s'."
% (
self.model._meta.label,
self.name,
to_field.name,
self.remote_field.model._meta.concrete_model._meta.label,
)
)
return related_fields
def get_attname(self):
return "%s_id" % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_default(self):
"""Return the to_field if the default value is an object."""
field_default = super().get_default()
if isinstance(field_default, self.remote_field.model):
return getattr(field_default, self.target_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (
value == ""
and (
not self.target_field.empty_strings_allowed
or connection.features.interprets_empty_strings_as_nulls
)
):
return None
else:
return self.target_field.get_db_prep_save(value, connection=connection)
def get_db_prep_value(self, value, connection, prepared=False):
return self.target_field.get_db_prep_value(value, connection, prepared)
def get_prep_value(self, value):
return self.target_field.get_prep_value(value)
def contribute_to_related_class(self, cls, related):
super().contribute_to_related_class(cls, related)
if self.remote_field.field_name is None:
self.remote_field.field_name = cls._meta.pk.name
def formfield(self, *, using=None, **kwargs):
if isinstance(self.remote_field.model, str):
raise ValueError(
"Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet"
% (self.name, self.remote_field.model)
)
return super().formfield(
**{
"form_class": forms.ModelChoiceField,
"queryset": self.remote_field.model._default_manager.using(using),
"to_field_name": self.remote_field.field_name,
**kwargs,
"blank": self.blank,
}
)
def db_check(self, connection):
return None
def db_type(self, connection):
return self.target_field.rel_db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": self.db_check(connection)}
def convert_empty_strings(self, value, expression, connection):
if (not value) and isinstance(value, str):
return None
return value
def get_db_converters(self, connection):
converters = super().get_db_converters(connection)
if connection.features.interprets_empty_strings_as_nulls:
converters += [self.convert_empty_strings]
return converters
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self.target_field
while isinstance(output_field, ForeignKey):
output_field = output_field.target_field
if output_field is self:
raise ValueError("Cannot resolve output_field.")
return super().get_col(alias, output_field)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that it always carries a "unique" constraint with it and the reverse
relation always returns the object pointed to (since there will only ever
be one), rather than returning a list.
"""
# Field flags
many_to_many = False
many_to_one = False
one_to_many = False
one_to_one = True
related_accessor_class = ReverseOneToOneDescriptor
forward_related_accessor_class = ForwardOneToOneDescriptor
rel_class = OneToOneRel
description = _("One-to-one relationship")
def __init__(self, to, on_delete, to_field=None, **kwargs):
kwargs["unique"] = True
super().__init__(to, on_delete, to_field=to_field, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if "unique" in kwargs:
del kwargs["unique"]
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.remote_field.parent_link:
return None
return super().formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.remote_field.model):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
# Remote field object must be cleared otherwise Model.save()
# will reassign attname using the related object pk.
if data is None:
setattr(instance, self.name, data)
def _check_unique(self, **kwargs):
# Override ForeignKey since check isn't applicable here.
return []
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
def set_managed(model, related, through):
through._meta.managed = model._meta.managed or related._meta.managed
to_model = resolve_relation(klass, field.remote_field.model)
name = "%s_%s" % (klass._meta.object_name, field.name)
lazy_related_operation(set_managed, klass, to_model, name)
to = make_model_tuple(to_model)[1]
from_ = klass._meta.model_name
if to == from_:
to = "to_%s" % to
from_ = "from_%s" % from_
meta = type(
"Meta",
(),
{
"db_table": field._get_m2m_db_table(klass._meta),
"auto_created": klass,
"app_label": klass._meta.app_label,
"db_tablespace": klass._meta.db_tablespace,
"unique_together": (from_, to),
"verbose_name": _("%(from)s-%(to)s relationship")
% {"from": from_, "to": to},
"verbose_name_plural": _("%(from)s-%(to)s relationships")
% {"from": from_, "to": to},
"apps": field.model._meta.apps,
},
)
# Construct and return the new class.
return type(
name,
(models.Model,),
{
"Meta": meta,
"__module__": klass.__module__,
from_: models.ForeignKey(
klass,
related_name="%s+" % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
),
to: models.ForeignKey(
to_model,
related_name="%s+" % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
),
},
)
class ManyToManyField(RelatedField):
"""
Provide a many-to-many relation by using an intermediary model that
holds two ForeignKey fields pointed at the two sides of the relation.
Unless a ``through`` model was provided, ManyToManyField will use the
create_many_to_many_intermediary_model factory to automatically generate
the intermediary model.
"""
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
rel_class = ManyToManyRel
description = _("Many-to-many relationship")
def __init__(
self,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
symmetrical=None,
through=None,
through_fields=None,
db_constraint=True,
db_table=None,
swappable=True,
**kwargs,
):
try:
to._meta
except AttributeError:
if not isinstance(to, str):
raise TypeError(
"%s(%r) is invalid. First parameter to ManyToManyField "
"must be either a model, a model name, or the string %r"
% (
self.__class__.__name__,
to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
if symmetrical is None:
symmetrical = to == RECURSIVE_RELATIONSHIP_CONSTANT
if through is not None and db_table is not None:
raise ValueError(
"Cannot specify a db_table if an intermediary model is used."
)
kwargs["rel"] = self.rel_class(
self,
to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
symmetrical=symmetrical,
through=through,
through_fields=through_fields,
db_constraint=db_constraint,
)
self.has_null_arg = "null" in kwargs
super().__init__(
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
**kwargs,
)
self.db_table = db_table
self.swappable = swappable
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_unique(**kwargs),
*self._check_relationship_model(**kwargs),
*self._check_ignored_options(**kwargs),
*self._check_table_uniqueness(**kwargs),
]
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
"ManyToManyFields cannot be unique.",
obj=self,
id="fields.E330",
)
]
return []
def _check_ignored_options(self, **kwargs):
warnings = []
if self.has_null_arg:
warnings.append(
checks.Warning(
"null has no effect on ManyToManyField.",
obj=self,
id="fields.W340",
)
)
if self._validators:
warnings.append(
checks.Warning(
"ManyToManyField does not support validators.",
obj=self,
id="fields.W341",
)
)
if self.remote_field.symmetrical and self._related_name:
warnings.append(
checks.Warning(
"related_name has no effect on ManyToManyField "
'with a symmetrical relationship, e.g. to "self".',
obj=self,
id="fields.W345",
)
)
return warnings
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.remote_field.through, "_meta"):
qualified_model_name = "%s.%s" % (
self.remote_field.through._meta.app_label,
self.remote_field.through.__name__,
)
else:
qualified_model_name = self.remote_field.through
errors = []
if self.remote_field.through not in self.opts.apps.get_models(
include_auto_created=True
):
# The relationship model is not installed.
errors.append(
checks.Error(
"Field specifies a many-to-many relation through model "
"'%s', which has not been installed." % qualified_model_name,
obj=self,
id="fields.E331",
)
)
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
# Set some useful local variables
to_model = resolve_relation(from_model, self.remote_field.model)
from_model_name = from_model._meta.object_name
if isinstance(to_model, str):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.remote_field.through._meta.object_name
self_referential = from_model == to_model
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(
from_model == getattr(field.remote_field, "model", None)
for field in self.remote_field.through._meta.fields
)
if seen_self > 2 and not self.remote_field.through_fields:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument."
% (self, from_model_name),
hint=(
"Use through_fields to specify which two foreign keys "
"Django should use."
),
obj=self.remote_field.through,
id="fields.E333",
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(
from_model == getattr(field.remote_field, "model", None)
for field in self.remote_field.through._meta.fields
)
seen_to = sum(
to_model == getattr(field.remote_field, "model", None)
for field in self.remote_field.through._meta.fields
)
if seen_from > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
(
"The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument."
)
% (self, from_model_name),
hint=(
"If you want to create a recursive relationship, "
'use ManyToManyField("%s", through="%s").'
)
% (
RECURSIVE_RELATIONSHIP_CONSTANT,
relationship_model_name,
),
obj=self,
id="fields.E334",
)
)
if seen_to > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument." % (self, to_model_name),
hint=(
"If you want to create a recursive relationship, "
'use ManyToManyField("%s", through="%s").'
)
% (
RECURSIVE_RELATIONSHIP_CONSTANT,
relationship_model_name,
),
obj=self,
id="fields.E335",
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'."
% (self, from_model_name, to_model_name),
obj=self.remote_field.through,
id="fields.E336",
)
)
# Validate `through_fields`.
if self.remote_field.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy".
if not (
len(self.remote_field.through_fields) >= 2
and self.remote_field.through_fields[0]
and self.remote_field.through_fields[1]
):
errors.append(
checks.Error(
"Field specifies 'through_fields' but does not provide "
"the names of the two link fields that should be used "
"for the relation through model '%s'." % qualified_model_name,
hint=(
"Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"
),
obj=self,
id="fields.E337",
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models.
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
source, through, target = (
from_model,
self.remote_field.through,
self.remote_field.model,
)
source_field_name, target_field_name = self.remote_field.through_fields[
:2
]
for field_name, related_model in (
(source_field_name, source),
(target_field_name, target),
):
possible_field_names = []
for f in through._meta.fields:
if (
hasattr(f, "remote_field")
and getattr(f.remote_field, "model", None) == related_model
):
possible_field_names.append(f.name)
if possible_field_names:
hint = (
"Did you mean one of the following foreign keys to '%s': "
"%s?"
% (
related_model._meta.object_name,
", ".join(possible_field_names),
)
)
else:
hint = None
try:
field = through._meta.get_field(field_name)
except exceptions.FieldDoesNotExist:
errors.append(
checks.Error(
"The intermediary model '%s' has no field '%s'."
% (qualified_model_name, field_name),
hint=hint,
obj=self,
id="fields.E338",
)
)
else:
if not (
hasattr(field, "remote_field")
and getattr(field.remote_field, "model", None)
== related_model
):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'."
% (
through._meta.object_name,
field_name,
related_model._meta.object_name,
),
hint=hint,
obj=self,
id="fields.E339",
)
)
return errors
def _check_table_uniqueness(self, **kwargs):
if (
isinstance(self.remote_field.through, str)
or not self.remote_field.through._meta.managed
):
return []
registered_tables = {
model._meta.db_table: model
for model in self.opts.apps.get_models(include_auto_created=True)
if model != self.remote_field.through and model._meta.managed
}
m2m_db_table = self.m2m_db_table()
model = registered_tables.get(m2m_db_table)
# The second condition allows multiple m2m relations on a model if
# some point to a through model that proxies another through model.
if (
model
and model._meta.concrete_model
!= self.remote_field.through._meta.concrete_model
):
if model._meta.auto_created:
def _get_field_name(model):
for field in model._meta.auto_created._meta.many_to_many:
if field.remote_field.through is model:
return field.name
opts = model._meta.auto_created._meta
clashing_obj = "%s.%s" % (opts.label, _get_field_name(model))
else:
clashing_obj = model._meta.label
if settings.DATABASE_ROUTERS:
error_class, error_id = checks.Warning, "fields.W344"
error_hint = (
"You have configured settings.DATABASE_ROUTERS. Verify "
"that the table of %r is correctly routed to a separate "
"database." % clashing_obj
)
else:
error_class, error_id = checks.Error, "fields.E340"
error_hint = None
return [
error_class(
"The field's intermediary table '%s' clashes with the "
"table name of '%s'." % (m2m_db_table, clashing_obj),
obj=self,
hint=error_hint,
id=error_id,
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# Handle the simpler arguments.
if self.db_table is not None:
kwargs["db_table"] = self.db_table
if self.remote_field.db_constraint is not True:
kwargs["db_constraint"] = self.remote_field.db_constraint
# Lowercase model names as they should be treated as case-insensitive.
if isinstance(self.remote_field.model, str):
if "." in self.remote_field.model:
app_label, model_name = self.remote_field.model.split(".")
kwargs["to"] = "%s.%s" % (app_label, model_name.lower())
else:
kwargs["to"] = self.remote_field.model.lower()
else:
kwargs["to"] = self.remote_field.model._meta.label_lower
if getattr(self.remote_field, "through", None) is not None:
if isinstance(self.remote_field.through, str):
kwargs["through"] = self.remote_field.through
elif not self.remote_field.through._meta.auto_created:
kwargs["through"] = self.remote_field.through._meta.label
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error.
if hasattr(kwargs["to"], "setting_name"):
if kwargs["to"].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ManyToManyField pointing to a "
"model that is swapped in place of more than one model "
"(%s and %s)" % (kwargs["to"].setting_name, swappable_setting)
)
kwargs["to"] = SettingsReference(
kwargs["to"],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False, filtered_relation=None):
"""Called by both direct and indirect m2m traversal."""
int_model = self.remote_field.through
linkfield1 = int_model._meta.get_field(self.m2m_field_name())
linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name())
if direct:
join1infos = linkfield1.reverse_path_infos
if filtered_relation:
join2infos = linkfield2.get_path_info(filtered_relation)
else:
join2infos = linkfield2.path_infos
else:
join1infos = linkfield2.reverse_path_infos
if filtered_relation:
join2infos = linkfield1.get_path_info(filtered_relation)
else:
join2infos = linkfield1.path_infos
# Get join infos between the last model of join 1 and the first model
# of join 2. Assume the only reason these may differ is due to model
# inheritance.
join1_final = join1infos[-1].to_opts
join2_initial = join2infos[0].from_opts
if join1_final is join2_initial:
intermediate_infos = []
elif issubclass(join1_final.model, join2_initial.model):
intermediate_infos = join1_final.get_path_to_parent(join2_initial.model)
else:
intermediate_infos = join2_initial.get_path_from_parent(join1_final.model)
return [*join1infos, *intermediate_infos, *join2infos]
def get_path_info(self, filtered_relation=None):
return self._get_path_info(direct=True, filtered_relation=filtered_relation)
@cached_property
def path_infos(self):
return self.get_path_info()
def get_reverse_path_info(self, filtered_relation=None):
return self._get_path_info(direct=False, filtered_relation=filtered_relation)
@cached_property
def reverse_path_infos(self):
return self.get_reverse_path_info()
def _get_m2m_db_table(self, opts):
"""
Function that can be curried to provide the m2m table name for this
relation.
"""
if self.remote_field.through is not None:
return self.remote_field.through._meta.db_table
elif self.db_table:
return self.db_table
else:
m2m_table_name = "%s_%s" % (utils.strip_quotes(opts.db_table), self.name)
return utils.truncate_name(m2m_table_name, connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"""
Function that can be curried to provide the source accessor or DB
column name for the m2m table.
"""
cache_attr = "_m2m_%s_cache" % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[0]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if (
f.is_relation
and f.remote_field.model == related.related_model
and (link_field_name is None or link_field_name == f.name)
):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"""
Function that can be curried to provide the related accessor or DB
column name for the m2m table.
"""
cache_attr = "_m2m_reverse_%s_cache" % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[1]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if f.is_relation and f.remote_field.model == related.model:
if link_field_name is None and related.related_model == related.model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def contribute_to_class(self, cls, name, **kwargs):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.remote_field.symmetrical and (
self.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT
or self.remote_field.model == cls._meta.object_name
):
self.remote_field.related_name = "%s_rel_+" % name
elif self.remote_field.is_hidden():
# If the backwards relation is disabled, replace the original
# related_name with one generated from the m2m field name. Django
# still uses backwards relations internally and we need to avoid
# clashes between multiple m2m fields with related_name == '+'.
self.remote_field.related_name = "_%s_%s_%s_+" % (
cls._meta.app_label,
cls.__name__.lower(),
name,
)
super().contribute_to_class(cls, name, **kwargs)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not cls._meta.abstract:
if self.remote_field.through:
def resolve_through_model(_, model, field):
field.remote_field.through = model
lazy_related_operation(
resolve_through_model, cls, self.remote_field.through, field=self
)
elif not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(
self, cls
)
# Add the descriptor for the m2m relation.
setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False))
# Set up the accessor for the m2m table name for the relation.
self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if (
not self.remote_field.is_hidden()
and not related.related_model._meta.swapped
):
setattr(
cls,
related.get_accessor_name(),
ManyToManyDescriptor(self.remote_field, reverse=True),
)
# Set up the accessors for the column names on the m2m table.
self.m2m_column_name = partial(self._get_m2m_attr, related, "column")
self.m2m_reverse_name = partial(self._get_m2m_reverse_attr, related, "column")
self.m2m_field_name = partial(self._get_m2m_attr, related, "name")
self.m2m_reverse_field_name = partial(
self._get_m2m_reverse_attr, related, "name"
)
get_m2m_rel = partial(self._get_m2m_attr, related, "remote_field")
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = partial(
self._get_m2m_reverse_attr, related, "remote_field"
)
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
return [] if obj.pk is None else list(getattr(obj, self.attname).all())
def save_form_data(self, instance, data):
getattr(instance, self.attname).set(data)
def formfield(self, *, using=None, **kwargs):
defaults = {
"form_class": forms.ModelMultipleChoiceField,
"queryset": self.remote_field.model._default_manager.using(using),
**kwargs,
}
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get("initial") is not None:
initial = defaults["initial"]
if callable(initial):
initial = initial()
defaults["initial"] = [i.pk for i in initial]
return super().formfield(**defaults)
def db_check(self, connection):
return None
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.