hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
bef3e3f5ca977935d38bae403cfabb2e78d3554fc950484c2395b8b37582a444 | from django.db import connection
from django.db.models import CharField
from django.db.models.functions import MD5
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import Author
class MD5Tests(TestCase):
@classmethod
def setUpTestData(cls):
Author.objects.bulk_create([
Author(alias='John Smith'),
Author(alias='Jordan Élena'),
Author(alias='皇帝'),
Author(alias=''),
Author(alias=None),
])
def test_basic(self):
authors = Author.objects.annotate(
md5_alias=MD5('alias'),
).values_list('md5_alias', flat=True).order_by('pk')
self.assertSequenceEqual(
authors,
[
'6117323d2cabbc17d44c2b44587f682c',
'ca6d48f6772000141e66591aee49d56c',
'bf2c13bc1154e3d2e7df848cbc8be73d',
'd41d8cd98f00b204e9800998ecf8427e',
'd41d8cd98f00b204e9800998ecf8427e' if connection.features.interprets_empty_strings_as_nulls else None,
],
)
def test_transform(self):
with register_lookup(CharField, MD5):
authors = Author.objects.filter(
alias__md5='6117323d2cabbc17d44c2b44587f682c',
).values_list('alias', flat=True)
self.assertSequenceEqual(authors, ['John Smith'])
|
a353a061c3b66e8000cc392ac75409422b624a54b9887c777f0899f30f3584f0 | from django.db import connection
from django.db.models import CharField, Value
from django.db.models.functions import Length, LPad, RPad
from django.test import TestCase
from ..models import Author
class PadTests(TestCase):
def test_pad(self):
Author.objects.create(name='John', alias='j')
none_value = '' if connection.features.interprets_empty_strings_as_nulls else None
tests = (
(LPad('name', 7, Value('xy')), 'xyxJohn'),
(RPad('name', 7, Value('xy')), 'Johnxyx'),
(LPad('name', 6, Value('x')), 'xxJohn'),
(RPad('name', 6, Value('x')), 'Johnxx'),
# The default pad string is a space.
(LPad('name', 6), ' John'),
(RPad('name', 6), 'John '),
# If string is longer than length it is truncated.
(LPad('name', 2), 'Jo'),
(RPad('name', 2), 'Jo'),
(LPad('name', 0), ''),
(RPad('name', 0), ''),
(LPad('name', None), none_value),
(RPad('name', None), none_value),
(LPad('goes_by', 1), none_value),
(RPad('goes_by', 1), none_value),
)
for function, padded_name in tests:
with self.subTest(function=function):
authors = Author.objects.annotate(padded_name=function)
self.assertQuerysetEqual(authors, [padded_name], lambda a: a.padded_name, ordered=False)
def test_pad_negative_length(self):
for function in (LPad, RPad):
with self.subTest(function=function):
with self.assertRaisesMessage(ValueError, "'length' must be greater or equal to 0."):
function('name', -1)
def test_combined_with_length(self):
Author.objects.create(name='Rhonda', alias='john_smith')
Author.objects.create(name='♥♣♠', alias='bytes')
authors = Author.objects.annotate(filled=LPad('name', Length('alias'), output_field=CharField()))
self.assertQuerysetEqual(
authors.order_by('alias'),
[' ♥♣♠', ' Rhonda'],
lambda a: a.filled,
)
|
cfc48ae50103532ed21091f0db7c14584325928fcd14ec05b68b0e6a5f938387 | from datetime import datetime, timedelta
from django.db.models.functions import Now
from django.test import TestCase
from django.utils import timezone
from ..models import Article
lorem_ipsum = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua."""
class NowTests(TestCase):
def test_basic(self):
a1 = Article.objects.create(
title='How to Django',
text=lorem_ipsum,
written=timezone.now(),
)
a2 = Article.objects.create(
title='How to Time Travel',
text=lorem_ipsum,
written=timezone.now(),
)
num_updated = Article.objects.filter(id=a1.id, published=None).update(published=Now())
self.assertEqual(num_updated, 1)
num_updated = Article.objects.filter(id=a1.id, published=None).update(published=Now())
self.assertEqual(num_updated, 0)
a1.refresh_from_db()
self.assertIsInstance(a1.published, datetime)
a2.published = Now() + timedelta(days=2)
a2.save()
a2.refresh_from_db()
self.assertIsInstance(a2.published, datetime)
self.assertQuerysetEqual(
Article.objects.filter(published__lte=Now()),
['How to Django'],
lambda a: a.title
)
self.assertQuerysetEqual(
Article.objects.filter(published__gt=Now()),
['How to Time Travel'],
lambda a: a.title
)
|
d13338f8166ef43653495c8df090caba594c66a2ccab2efffaa1172a177f7064 | from datetime import datetime, timedelta, timezone as datetime_timezone
import pytz
from django.conf import settings
from django.db.models import (
DateField, DateTimeField, F, IntegerField, Max, OuterRef, Subquery,
TimeField,
)
from django.db.models.functions import (
Extract, ExtractDay, ExtractHour, ExtractIsoYear, ExtractMinute,
ExtractMonth, ExtractQuarter, ExtractSecond, ExtractWeek, ExtractWeekDay,
ExtractYear, Trunc, TruncDate, TruncDay, TruncHour, TruncMinute,
TruncMonth, TruncQuarter, TruncSecond, TruncTime, TruncWeek, TruncYear,
)
from django.test import (
TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils import timezone
from ..models import Author, DTModel, Fan
def truncate_to(value, kind, tzinfo=None):
# Convert to target timezone before truncation
if tzinfo is not None:
value = value.astimezone(tzinfo)
def truncate(value, kind):
if kind == 'second':
return value.replace(microsecond=0)
if kind == 'minute':
return value.replace(second=0, microsecond=0)
if kind == 'hour':
return value.replace(minute=0, second=0, microsecond=0)
if kind == 'day':
if isinstance(value, datetime):
return value.replace(hour=0, minute=0, second=0, microsecond=0)
return value
if kind == 'week':
if isinstance(value, datetime):
return (value - timedelta(days=value.weekday())).replace(hour=0, minute=0, second=0, microsecond=0)
return value - timedelta(days=value.weekday())
if kind == 'month':
if isinstance(value, datetime):
return value.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(day=1)
if kind == 'quarter':
month_in_quarter = value.month - (value.month - 1) % 3
if isinstance(value, datetime):
return value.replace(month=month_in_quarter, day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(month=month_in_quarter, day=1)
# otherwise, truncate to year
if isinstance(value, datetime):
return value.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(month=1, day=1)
value = truncate(value, kind)
if tzinfo is not None:
# If there was a daylight saving transition, then reset the timezone.
value = timezone.make_aware(value.replace(tzinfo=None), tzinfo)
return value
@override_settings(USE_TZ=False)
class DateFunctionTests(TestCase):
def create_model(self, start_datetime, end_datetime):
return DTModel.objects.create(
name=start_datetime.isoformat() if start_datetime else 'None',
start_datetime=start_datetime,
end_datetime=end_datetime,
start_date=start_datetime.date() if start_datetime else None,
end_date=end_datetime.date() if end_datetime else None,
start_time=start_datetime.time() if start_datetime else None,
end_time=end_datetime.time() if end_datetime else None,
duration=(end_datetime - start_datetime) if start_datetime and end_datetime else None,
)
def test_extract_year_exact_lookup(self):
"""
Extract year uses a BETWEEN filter to compare the year to allow indexes
to be used.
"""
start_datetime = datetime(2015, 6, 15, 14, 10)
end_datetime = datetime(2016, 6, 15, 14, 10)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
for lookup in ('year', 'iso_year'):
with self.subTest(lookup):
qs = DTModel.objects.filter(**{'start_datetime__%s__exact' % lookup: 2015})
self.assertEqual(qs.count(), 1)
query_string = str(qs.query).lower()
self.assertEqual(query_string.count(' between '), 1)
self.assertEqual(query_string.count('extract'), 0)
# exact is implied and should be the same
qs = DTModel.objects.filter(**{'start_datetime__%s' % lookup: 2015})
self.assertEqual(qs.count(), 1)
query_string = str(qs.query).lower()
self.assertEqual(query_string.count(' between '), 1)
self.assertEqual(query_string.count('extract'), 0)
# date and datetime fields should behave the same
qs = DTModel.objects.filter(**{'start_date__%s' % lookup: 2015})
self.assertEqual(qs.count(), 1)
query_string = str(qs.query).lower()
self.assertEqual(query_string.count(' between '), 1)
self.assertEqual(query_string.count('extract'), 0)
# an expression rhs cannot use the between optimization.
qs = DTModel.objects.annotate(
start_year=ExtractYear('start_datetime'),
).filter(end_datetime__year=F('start_year') + 1)
self.assertEqual(qs.count(), 1)
query_string = str(qs.query).lower()
self.assertEqual(query_string.count(' between '), 0)
self.assertEqual(query_string.count('extract'), 3)
def test_extract_year_greaterthan_lookup(self):
start_datetime = datetime(2015, 6, 15, 14, 10)
end_datetime = datetime(2016, 6, 15, 14, 10)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
for lookup in ('year', 'iso_year'):
with self.subTest(lookup):
qs = DTModel.objects.filter(**{'start_datetime__%s__gt' % lookup: 2015})
self.assertEqual(qs.count(), 1)
self.assertEqual(str(qs.query).lower().count('extract'), 0)
qs = DTModel.objects.filter(**{'start_datetime__%s__gte' % lookup: 2015})
self.assertEqual(qs.count(), 2)
self.assertEqual(str(qs.query).lower().count('extract'), 0)
qs = DTModel.objects.annotate(
start_year=ExtractYear('start_datetime'),
).filter(**{'end_datetime__%s__gte' % lookup: F('start_year')})
self.assertEqual(qs.count(), 1)
self.assertGreaterEqual(str(qs.query).lower().count('extract'), 2)
def test_extract_year_lessthan_lookup(self):
start_datetime = datetime(2015, 6, 15, 14, 10)
end_datetime = datetime(2016, 6, 15, 14, 10)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
for lookup in ('year', 'iso_year'):
with self.subTest(lookup):
qs = DTModel.objects.filter(**{'start_datetime__%s__lt' % lookup: 2016})
self.assertEqual(qs.count(), 1)
self.assertEqual(str(qs.query).count('extract'), 0)
qs = DTModel.objects.filter(**{'start_datetime__%s__lte' % lookup: 2016})
self.assertEqual(qs.count(), 2)
self.assertEqual(str(qs.query).count('extract'), 0)
qs = DTModel.objects.annotate(
end_year=ExtractYear('end_datetime'),
).filter(**{'start_datetime__%s__lte' % lookup: F('end_year')})
self.assertEqual(qs.count(), 1)
self.assertGreaterEqual(str(qs.query).lower().count('extract'), 2)
def test_extract_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
with self.assertRaisesMessage(ValueError, 'lookup_name must be provided'):
Extract('start_datetime')
msg = 'Extract input expression must be DateField, DateTimeField, TimeField, or DurationField.'
with self.assertRaisesMessage(ValueError, msg):
list(DTModel.objects.annotate(extracted=Extract('name', 'hour')))
with self.assertRaisesMessage(
ValueError, "Cannot extract time component 'second' from DateField 'start_date'."):
list(DTModel.objects.annotate(extracted=Extract('start_date', 'second')))
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'year')).order_by('start_datetime'),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'quarter')).order_by('start_datetime'),
[(start_datetime, 2), (end_datetime, 2)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'month')).order_by('start_datetime'),
[(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'day')).order_by('start_datetime'),
[(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'week')).order_by('start_datetime'),
[(start_datetime, 25), (end_datetime, 24)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'week_day')).order_by('start_datetime'),
[
(start_datetime, (start_datetime.isoweekday() % 7) + 1),
(end_datetime, (end_datetime.isoweekday() % 7) + 1)
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'hour')).order_by('start_datetime'),
[(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'minute')).order_by('start_datetime'),
[(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('start_datetime', 'second')).order_by('start_datetime'),
[(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__year=Extract('start_datetime', 'year')).count(), 2)
self.assertEqual(DTModel.objects.filter(start_datetime__hour=Extract('start_datetime', 'hour')).count(), 2)
self.assertEqual(DTModel.objects.filter(start_date__month=Extract('start_date', 'month')).count(), 2)
self.assertEqual(DTModel.objects.filter(start_time__hour=Extract('start_time', 'hour')).count(), 2)
def test_extract_none(self):
self.create_model(None, None)
for t in (Extract('start_datetime', 'year'), Extract('start_date', 'year'), Extract('start_time', 'hour')):
with self.subTest(t):
self.assertIsNone(DTModel.objects.annotate(extracted=t).first().extracted)
@skipUnlessDBFeature('has_native_duration_field')
def test_extract_duration(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=Extract('duration', 'second')).order_by('start_datetime'),
[
(start_datetime, (end_datetime - start_datetime).seconds % 60),
(end_datetime, (start_datetime - end_datetime).seconds % 60)
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(
DTModel.objects.annotate(
duration_days=Extract('duration', 'day'),
).filter(duration_days__gt=200).count(),
1
)
@skipIfDBFeature('has_native_duration_field')
def test_extract_duration_without_native_duration_field(self):
msg = 'Extract requires native DurationField database support.'
with self.assertRaisesMessage(ValueError, msg):
list(DTModel.objects.annotate(extracted=Extract('duration', 'second')))
def test_extract_year_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractYear('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractYear('start_date')).order_by('start_datetime'),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__year=ExtractYear('start_datetime')).count(), 2)
def test_extract_iso_year_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractIsoYear('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractIsoYear('start_date')).order_by('start_datetime'),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted)
)
# Both dates are from the same week year.
self.assertEqual(DTModel.objects.filter(start_datetime__iso_year=ExtractIsoYear('start_datetime')).count(), 2)
def test_extract_iso_year_func_boundaries(self):
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
week_52_day_2014 = datetime(2014, 12, 27, 13, 0) # Sunday
week_1_day_2014_2015 = datetime(2014, 12, 31, 13, 0) # Wednesday
week_53_day_2015 = datetime(2015, 12, 31, 13, 0) # Thursday
if settings.USE_TZ:
week_1_day_2014_2015 = timezone.make_aware(week_1_day_2014_2015, is_dst=False)
week_52_day_2014 = timezone.make_aware(week_52_day_2014, is_dst=False)
week_53_day_2015 = timezone.make_aware(week_53_day_2015, is_dst=False)
days = [week_52_day_2014, week_1_day_2014_2015, week_53_day_2015]
self.create_model(week_53_day_2015, end_datetime)
self.create_model(week_52_day_2014, end_datetime)
self.create_model(week_1_day_2014_2015, end_datetime)
qs = DTModel.objects.filter(start_datetime__in=days).annotate(
extracted=ExtractIsoYear('start_datetime'),
).order_by('start_datetime')
self.assertQuerysetEqual(qs, [
(week_52_day_2014, 2014),
(week_1_day_2014_2015, 2015),
(week_53_day_2015, 2015),
], lambda m: (m.start_datetime, m.extracted))
def test_extract_month_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMonth('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMonth('start_date')).order_by('start_datetime'),
[(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__month=ExtractMonth('start_datetime')).count(), 2)
def test_extract_day_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractDay('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractDay('start_date')).order_by('start_datetime'),
[(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__day=ExtractDay('start_datetime')).count(), 2)
def test_extract_week_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractWeek('start_datetime')).order_by('start_datetime'),
[(start_datetime, 25), (end_datetime, 24)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractWeek('start_date')).order_by('start_datetime'),
[(start_datetime, 25), (end_datetime, 24)],
lambda m: (m.start_datetime, m.extracted)
)
# both dates are from the same week.
self.assertEqual(DTModel.objects.filter(start_datetime__week=ExtractWeek('start_datetime')).count(), 2)
def test_extract_quarter_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 8, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractQuarter('start_datetime')).order_by('start_datetime'),
[(start_datetime, 2), (end_datetime, 3)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractQuarter('start_date')).order_by('start_datetime'),
[(start_datetime, 2), (end_datetime, 3)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__quarter=ExtractQuarter('start_datetime')).count(), 2)
def test_extract_quarter_func_boundaries(self):
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
last_quarter_2014 = datetime(2014, 12, 31, 13, 0)
first_quarter_2015 = datetime(2015, 1, 1, 13, 0)
if settings.USE_TZ:
last_quarter_2014 = timezone.make_aware(last_quarter_2014, is_dst=False)
first_quarter_2015 = timezone.make_aware(first_quarter_2015, is_dst=False)
dates = [last_quarter_2014, first_quarter_2015]
self.create_model(last_quarter_2014, end_datetime)
self.create_model(first_quarter_2015, end_datetime)
qs = DTModel.objects.filter(start_datetime__in=dates).annotate(
extracted=ExtractQuarter('start_datetime'),
).order_by('start_datetime')
self.assertQuerysetEqual(qs, [
(last_quarter_2014, 4),
(first_quarter_2015, 1),
], lambda m: (m.start_datetime, m.extracted))
def test_extract_week_func_boundaries(self):
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
week_52_day_2014 = datetime(2014, 12, 27, 13, 0) # Sunday
week_1_day_2014_2015 = datetime(2014, 12, 31, 13, 0) # Wednesday
week_53_day_2015 = datetime(2015, 12, 31, 13, 0) # Thursday
if settings.USE_TZ:
week_1_day_2014_2015 = timezone.make_aware(week_1_day_2014_2015, is_dst=False)
week_52_day_2014 = timezone.make_aware(week_52_day_2014, is_dst=False)
week_53_day_2015 = timezone.make_aware(week_53_day_2015, is_dst=False)
days = [week_52_day_2014, week_1_day_2014_2015, week_53_day_2015]
self.create_model(week_53_day_2015, end_datetime)
self.create_model(week_52_day_2014, end_datetime)
self.create_model(week_1_day_2014_2015, end_datetime)
qs = DTModel.objects.filter(start_datetime__in=days).annotate(
extracted=ExtractWeek('start_datetime'),
).order_by('start_datetime')
self.assertQuerysetEqual(qs, [
(week_52_day_2014, 52),
(week_1_day_2014_2015, 1),
(week_53_day_2015, 53),
], lambda m: (m.start_datetime, m.extracted))
def test_extract_weekday_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractWeekDay('start_datetime')).order_by('start_datetime'),
[
(start_datetime, (start_datetime.isoweekday() % 7) + 1),
(end_datetime, (end_datetime.isoweekday() % 7) + 1),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractWeekDay('start_date')).order_by('start_datetime'),
[
(start_datetime, (start_datetime.isoweekday() % 7) + 1),
(end_datetime, (end_datetime.isoweekday() % 7) + 1),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__week_day=ExtractWeekDay('start_datetime')).count(), 2)
def test_extract_hour_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractHour('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractHour('start_time')).order_by('start_datetime'),
[(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__hour=ExtractHour('start_datetime')).count(), 2)
def test_extract_minute_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMinute('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractMinute('start_time')).order_by('start_datetime'),
[(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__minute=ExtractMinute('start_datetime')).count(), 2)
def test_extract_second_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractSecond('start_datetime')).order_by('start_datetime'),
[(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=ExtractSecond('start_time')).order_by('start_datetime'),
[(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__second=ExtractSecond('start_datetime')).count(), 2)
def test_trunc_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
msg = 'output_field must be either DateField, TimeField, or DateTimeField'
with self.assertRaisesMessage(ValueError, msg):
list(DTModel.objects.annotate(truncated=Trunc('start_datetime', 'year', output_field=IntegerField())))
with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."):
list(DTModel.objects.annotate(truncated=Trunc('name', 'year', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'second')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'month')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'month', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'second', output_field=DateTimeField())))
def test_datetime_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_datetime', kind, output_field=DateTimeField())
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, kind)),
(end_datetime, truncate_to(end_datetime, kind))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_date_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_date', kind, output_field=DateField())
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), kind)),
(end_datetime, truncate_to(end_datetime.date(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_time_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_time', kind, output_field=TimeField())
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), kind)),
(end_datetime, truncate_to(end_datetime.time(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
test_date_kind('year')
test_date_kind('quarter')
test_date_kind('month')
test_date_kind('week')
test_date_kind('day')
test_time_kind('hour')
test_time_kind('minute')
test_time_kind('second')
test_datetime_kind('year')
test_datetime_kind('quarter')
test_datetime_kind('month')
test_datetime_kind('week')
test_datetime_kind('day')
test_datetime_kind('hour')
test_datetime_kind('minute')
test_datetime_kind('second')
qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField()))
self.assertEqual(qs.count(), 2)
def test_trunc_none(self):
self.create_model(None, None)
for t in (Trunc('start_datetime', 'year'), Trunc('start_date', 'year'), Trunc('start_time', 'hour')):
with self.subTest(t):
self.assertIsNone(DTModel.objects.annotate(truncated=t).first().truncated)
def test_trunc_year_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'year')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncYear('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'year')),
(end_datetime, truncate_to(end_datetime, 'year')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncYear('start_date')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), 'year')),
(end_datetime, truncate_to(end_datetime.date(), 'year')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncYear('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncYear('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncYear('start_time', output_field=TimeField())))
def test_trunc_quarter_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = truncate_to(datetime(2016, 10, 15, 14, 10, 50, 123), 'quarter')
last_quarter_2015 = truncate_to(datetime(2015, 12, 31, 14, 10, 50, 123), 'quarter')
first_quarter_2016 = truncate_to(datetime(2016, 1, 1, 14, 10, 50, 123), 'quarter')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
last_quarter_2015 = timezone.make_aware(last_quarter_2015, is_dst=False)
first_quarter_2016 = timezone.make_aware(first_quarter_2016, is_dst=False)
self.create_model(start_datetime=start_datetime, end_datetime=end_datetime)
self.create_model(start_datetime=end_datetime, end_datetime=start_datetime)
self.create_model(start_datetime=last_quarter_2015, end_datetime=end_datetime)
self.create_model(start_datetime=first_quarter_2016, end_datetime=end_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncQuarter('start_date')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), 'quarter')),
(last_quarter_2015, truncate_to(last_quarter_2015.date(), 'quarter')),
(first_quarter_2016, truncate_to(first_quarter_2016.date(), 'quarter')),
(end_datetime, truncate_to(end_datetime.date(), 'quarter')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncQuarter('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'quarter')),
(last_quarter_2015, truncate_to(last_quarter_2015, 'quarter')),
(first_quarter_2016, truncate_to(first_quarter_2016, 'quarter')),
(end_datetime, truncate_to(end_datetime, 'quarter')),
],
lambda m: (m.start_datetime, m.extracted)
)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncQuarter('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncQuarter('start_time', output_field=TimeField())))
def test_trunc_month_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'month')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMonth('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'month')),
(end_datetime, truncate_to(end_datetime, 'month')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMonth('start_date')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), 'month')),
(end_datetime, truncate_to(end_datetime.date(), 'month')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncMonth('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMonth('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMonth('start_time', output_field=TimeField())))
def test_trunc_week_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'week')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncWeek('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'week')),
(end_datetime, truncate_to(end_datetime, 'week')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncWeek('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncWeek('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncWeek('start_time', output_field=TimeField())))
def test_trunc_date_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncDate('start_datetime')).order_by('start_datetime'),
[
(start_datetime, start_datetime.date()),
(end_datetime, end_datetime.date()),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__date=TruncDate('start_datetime')).count(), 2)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateField"):
list(DTModel.objects.annotate(truncated=TruncDate('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateField"):
list(DTModel.objects.annotate(truncated=TruncDate('start_time', output_field=TimeField())))
def test_trunc_date_none(self):
self.create_model(None, None)
self.assertIsNone(DTModel.objects.annotate(truncated=TruncDate('start_datetime')).first().truncated)
def test_trunc_time_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncTime('start_datetime')).order_by('start_datetime'),
[
(start_datetime, start_datetime.time()),
(end_datetime, end_datetime.time()),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__time=TruncTime('start_datetime')).count(), 2)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date', output_field=DateField())))
def test_trunc_time_none(self):
self.create_model(None, None)
self.assertIsNone(DTModel.objects.annotate(truncated=TruncTime('start_datetime')).first().truncated)
def test_trunc_day_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'day')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncDay('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'day')),
(end_datetime, truncate_to(end_datetime, 'day')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncDay('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncDay('start_time')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncDay('start_time', output_field=TimeField())))
def test_trunc_hour_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'hour')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncHour('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'hour')),
(end_datetime, truncate_to(end_datetime, 'hour')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncHour('start_time')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), 'hour')),
(end_datetime, truncate_to(end_datetime.time(), 'hour')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncHour('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncHour('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncHour('start_date', output_field=DateField())))
def test_trunc_minute_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'minute')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMinute('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'minute')),
(end_datetime, truncate_to(end_datetime, 'minute')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncMinute('start_time')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), 'minute')),
(end_datetime, truncate_to(end_datetime.time(), 'minute')),
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncMinute('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMinute('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncMinute('start_date', output_field=DateField())))
def test_trunc_second_func(self):
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'second')
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncSecond('start_datetime')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime, 'second')),
(end_datetime, truncate_to(end_datetime, 'second'))
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncSecond('start_time')).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), 'second')),
(end_datetime, truncate_to(end_datetime.time(), 'second'))
],
lambda m: (m.start_datetime, m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime=TruncSecond('start_datetime')).count(), 1)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncSecond('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=TruncSecond('start_date', output_field=DateField())))
def test_trunc_subquery_with_parameters(self):
author_1 = Author.objects.create(name='J. R. R. Tolkien')
author_2 = Author.objects.create(name='G. R. R. Martin')
fan_since_1 = datetime(2016, 2, 3, 15, 0, 0)
fan_since_2 = datetime(2015, 2, 3, 15, 0, 0)
fan_since_3 = datetime(2017, 2, 3, 15, 0, 0)
if settings.USE_TZ:
fan_since_1 = timezone.make_aware(fan_since_1, is_dst=False)
fan_since_2 = timezone.make_aware(fan_since_2, is_dst=False)
fan_since_3 = timezone.make_aware(fan_since_3, is_dst=False)
Fan.objects.create(author=author_1, name='Tom', fan_since=fan_since_1)
Fan.objects.create(author=author_1, name='Emma', fan_since=fan_since_2)
Fan.objects.create(author=author_2, name='Isabella', fan_since=fan_since_3)
inner = Fan.objects.filter(
author=OuterRef('pk'),
name__in=('Emma', 'Isabella', 'Tom')
).values('author').annotate(newest_fan=Max('fan_since')).values('newest_fan')
outer = Author.objects.annotate(
newest_fan_year=TruncYear(Subquery(inner, output_field=DateTimeField()))
)
tz = pytz.UTC if settings.USE_TZ else None
self.assertSequenceEqual(
outer.order_by('name').values('name', 'newest_fan_year'),
[
{'name': 'G. R. R. Martin', 'newest_fan_year': datetime(2017, 1, 1, 0, 0, tzinfo=tz)},
{'name': 'J. R. R. Tolkien', 'newest_fan_year': datetime(2016, 1, 1, 0, 0, tzinfo=tz)},
]
)
@override_settings(USE_TZ=True, TIME_ZONE='UTC')
class DateFunctionWithTimeZoneTests(DateFunctionTests):
def test_extract_func_with_timezone(self):
start_datetime = datetime(2015, 6, 15, 23, 30, 1, 321)
end_datetime = datetime(2015, 6, 16, 13, 11, 27, 123)
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
melb = pytz.timezone('Australia/Melbourne')
delta_tzinfo_pos = datetime_timezone(timedelta(hours=5))
delta_tzinfo_neg = datetime_timezone(timedelta(hours=-5, minutes=17))
qs = DTModel.objects.annotate(
day=Extract('start_datetime', 'day'),
day_melb=Extract('start_datetime', 'day', tzinfo=melb),
week=Extract('start_datetime', 'week', tzinfo=melb),
isoyear=ExtractIsoYear('start_datetime', tzinfo=melb),
weekday=ExtractWeekDay('start_datetime'),
weekday_melb=ExtractWeekDay('start_datetime', tzinfo=melb),
quarter=ExtractQuarter('start_datetime', tzinfo=melb),
hour=ExtractHour('start_datetime'),
hour_melb=ExtractHour('start_datetime', tzinfo=melb),
hour_with_delta_pos=ExtractHour('start_datetime', tzinfo=delta_tzinfo_pos),
hour_with_delta_neg=ExtractHour('start_datetime', tzinfo=delta_tzinfo_neg),
minute_with_delta_neg=ExtractMinute('start_datetime', tzinfo=delta_tzinfo_neg),
).order_by('start_datetime')
utc_model = qs.get()
self.assertEqual(utc_model.day, 15)
self.assertEqual(utc_model.day_melb, 16)
self.assertEqual(utc_model.week, 25)
self.assertEqual(utc_model.isoyear, 2015)
self.assertEqual(utc_model.weekday, 2)
self.assertEqual(utc_model.weekday_melb, 3)
self.assertEqual(utc_model.quarter, 2)
self.assertEqual(utc_model.hour, 23)
self.assertEqual(utc_model.hour_melb, 9)
self.assertEqual(utc_model.hour_with_delta_pos, 4)
self.assertEqual(utc_model.hour_with_delta_neg, 18)
self.assertEqual(utc_model.minute_with_delta_neg, 47)
with timezone.override(melb):
melb_model = qs.get()
self.assertEqual(melb_model.day, 16)
self.assertEqual(melb_model.day_melb, 16)
self.assertEqual(melb_model.week, 25)
self.assertEqual(melb_model.isoyear, 2015)
self.assertEqual(melb_model.weekday, 3)
self.assertEqual(melb_model.quarter, 2)
self.assertEqual(melb_model.weekday_melb, 3)
self.assertEqual(melb_model.hour, 9)
self.assertEqual(melb_model.hour_melb, 9)
def test_extract_func_explicit_timezone_priority(self):
start_datetime = datetime(2015, 6, 15, 23, 30, 1, 321)
end_datetime = datetime(2015, 6, 16, 13, 11, 27, 123)
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
melb = pytz.timezone('Australia/Melbourne')
with timezone.override(melb):
model = DTModel.objects.annotate(
day_melb=Extract('start_datetime', 'day'),
day_utc=Extract('start_datetime', 'day', tzinfo=timezone.utc),
).order_by('start_datetime').get()
self.assertEqual(model.day_melb, 16)
self.assertEqual(model.day_utc, 15)
def test_trunc_timezone_applied_before_truncation(self):
start_datetime = datetime(2016, 1, 1, 1, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
melb = pytz.timezone('Australia/Melbourne')
pacific = pytz.timezone('US/Pacific')
model = DTModel.objects.annotate(
melb_year=TruncYear('start_datetime', tzinfo=melb),
pacific_year=TruncYear('start_datetime', tzinfo=pacific),
).order_by('start_datetime').get()
self.assertEqual(model.start_datetime, start_datetime)
self.assertEqual(model.melb_year, truncate_to(start_datetime, 'year', melb))
self.assertEqual(model.pacific_year, truncate_to(start_datetime, 'year', pacific))
self.assertEqual(model.start_datetime.year, 2016)
self.assertEqual(model.melb_year.year, 2016)
self.assertEqual(model.pacific_year.year, 2015)
def test_trunc_ambiguous_and_invalid_times(self):
sao = pytz.timezone('America/Sao_Paulo')
utc = pytz.timezone('UTC')
start_datetime = utc.localize(datetime(2016, 10, 16, 13))
end_datetime = utc.localize(datetime(2016, 2, 21, 1))
self.create_model(start_datetime, end_datetime)
with timezone.override(sao):
with self.assertRaisesMessage(pytz.NonExistentTimeError, '2016-10-16 00:00:00'):
model = DTModel.objects.annotate(truncated_start=TruncDay('start_datetime')).get()
with self.assertRaisesMessage(pytz.AmbiguousTimeError, '2016-02-20 23:00:00'):
model = DTModel.objects.annotate(truncated_end=TruncHour('end_datetime')).get()
model = DTModel.objects.annotate(
truncated_start=TruncDay('start_datetime', is_dst=False),
truncated_end=TruncHour('end_datetime', is_dst=False),
).get()
self.assertEqual(model.truncated_start.dst(), timedelta(0))
self.assertEqual(model.truncated_end.dst(), timedelta(0))
model = DTModel.objects.annotate(
truncated_start=TruncDay('start_datetime', is_dst=True),
truncated_end=TruncHour('end_datetime', is_dst=True),
).get()
self.assertEqual(model.truncated_start.dst(), timedelta(0, 3600))
self.assertEqual(model.truncated_end.dst(), timedelta(0, 3600))
def test_trunc_func_with_timezone(self):
"""
If the truncated datetime transitions to a different offset (daylight
saving) then the returned value will have that new timezone/offset.
"""
start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321)
end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123)
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
melb = pytz.timezone('Australia/Melbourne')
def test_datetime_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_datetime', kind, output_field=DateTimeField(), tzinfo=melb)
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.astimezone(melb), kind, melb)),
(end_datetime, truncate_to(end_datetime.astimezone(melb), kind, melb))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_date_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_date', kind, output_field=DateField(), tzinfo=melb)
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.date(), kind)),
(end_datetime, truncate_to(end_datetime.date(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
def test_time_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_time', kind, output_field=TimeField(), tzinfo=melb)
).order_by('start_datetime'),
[
(start_datetime, truncate_to(start_datetime.time(), kind)),
(end_datetime, truncate_to(end_datetime.time(), kind))
],
lambda m: (m.start_datetime, m.truncated)
)
test_date_kind('year')
test_date_kind('quarter')
test_date_kind('month')
test_date_kind('week')
test_date_kind('day')
test_time_kind('hour')
test_time_kind('minute')
test_time_kind('second')
test_datetime_kind('year')
test_datetime_kind('quarter')
test_datetime_kind('month')
test_datetime_kind('week')
test_datetime_kind('day')
test_datetime_kind('hour')
test_datetime_kind('minute')
test_datetime_kind('second')
qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField()))
self.assertEqual(qs.count(), 2)
|
079f12b313de243809e5af4fb5ba0c426ec0a2e37bfbe59b372d9768fa64e685 | from django.db.models import TextField
from django.db.models.functions import Coalesce, Lower
from django.test import TestCase
from django.utils import timezone
from ..models import Article, Author
lorem_ipsum = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua."""
class CoalesceTests(TestCase):
def test_basic(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(display_name=Coalesce('alias', 'name'))
self.assertQuerysetEqual(
authors.order_by('name'), ['smithj', 'Rhonda'],
lambda a: a.display_name
)
def test_gt_two_expressions(self):
with self.assertRaisesMessage(ValueError, 'Coalesce must take at least two expressions'):
Author.objects.annotate(display_name=Coalesce('alias'))
def test_mixed_values(self):
a1 = Author.objects.create(name='John Smith', alias='smithj')
a2 = Author.objects.create(name='Rhonda')
ar1 = Article.objects.create(
title='How to Django',
text=lorem_ipsum,
written=timezone.now(),
)
ar1.authors.add(a1)
ar1.authors.add(a2)
# mixed Text and Char
article = Article.objects.annotate(
headline=Coalesce('summary', 'text', output_field=TextField()),
)
self.assertQuerysetEqual(
article.order_by('title'), [lorem_ipsum],
lambda a: a.headline
)
# mixed Text and Char wrapped
article = Article.objects.annotate(
headline=Coalesce(Lower('summary'), Lower('text'), output_field=TextField()),
)
self.assertQuerysetEqual(
article.order_by('title'), [lorem_ipsum.lower()],
lambda a: a.headline
)
def test_ordering(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.order_by(Coalesce('alias', 'name'))
self.assertQuerysetEqual(
authors, ['Rhonda', 'John Smith'],
lambda a: a.name
)
authors = Author.objects.order_by(Coalesce('alias', 'name').asc())
self.assertQuerysetEqual(
authors, ['Rhonda', 'John Smith'],
lambda a: a.name
)
authors = Author.objects.order_by(Coalesce('alias', 'name').desc())
self.assertQuerysetEqual(
authors, ['John Smith', 'Rhonda'],
lambda a: a.name
)
|
0dc619420ed6872f3fb14b8ee0846db1d64ecdbceeb5472d4487a29b5ebadcb7 | import datetime
import decimal
import unittest
from django.db import connection, models
from django.db.models import Avg
from django.db.models.expressions import Value
from django.db.models.functions import Cast
from django.test import (
TestCase, ignore_warnings, override_settings, skipUnlessDBFeature,
)
from ..models import Author, DTModel, Fan, FloatModel
class CastTests(TestCase):
@classmethod
def setUpTestData(self):
Author.objects.create(name='Bob', age=1, alias='1')
def test_cast_from_value(self):
numbers = Author.objects.annotate(cast_integer=Cast(Value('0'), models.IntegerField()))
self.assertEqual(numbers.get().cast_integer, 0)
def test_cast_from_field(self):
numbers = Author.objects.annotate(cast_string=Cast('age', models.CharField(max_length=255)),)
self.assertEqual(numbers.get().cast_string, '1')
def test_cast_to_char_field_without_max_length(self):
numbers = Author.objects.annotate(cast_string=Cast('age', models.CharField()))
self.assertEqual(numbers.get().cast_string, '1')
# Silence "Truncated incorrect CHAR(1) value: 'Bob'".
@ignore_warnings(module='django.db.backends.mysql.base')
@skipUnlessDBFeature('supports_cast_with_precision')
def test_cast_to_char_field_with_max_length(self):
names = Author.objects.annotate(cast_string=Cast('name', models.CharField(max_length=1)))
self.assertEqual(names.get().cast_string, 'B')
@skipUnlessDBFeature('supports_cast_with_precision')
def test_cast_to_decimal_field(self):
FloatModel.objects.create(f1=-1.934, f2=3.467)
float_obj = FloatModel.objects.annotate(
cast_f1_decimal=Cast('f1', models.DecimalField(max_digits=8, decimal_places=2)),
cast_f2_decimal=Cast('f2', models.DecimalField(max_digits=8, decimal_places=1)),
).get()
self.assertEqual(float_obj.cast_f1_decimal, decimal.Decimal('-1.93'))
self.assertEqual(float_obj.cast_f2_decimal, decimal.Decimal('3.5'))
author_obj = Author.objects.annotate(
cast_alias_decimal=Cast('alias', models.DecimalField(max_digits=8, decimal_places=2)),
).get()
self.assertEqual(author_obj.cast_alias_decimal, decimal.Decimal('1'))
def test_cast_to_integer(self):
for field_class in (
models.AutoField,
models.BigAutoField,
models.IntegerField,
models.BigIntegerField,
models.SmallIntegerField,
models.PositiveIntegerField,
models.PositiveSmallIntegerField,
):
with self.subTest(field_class=field_class):
numbers = Author.objects.annotate(cast_int=Cast('alias', field_class()))
self.assertEqual(numbers.get().cast_int, 1)
def test_cast_from_db_datetime_to_date(self):
dt_value = datetime.datetime(2018, 9, 28, 12, 42, 10, 234567)
DTModel.objects.create(start_datetime=dt_value)
dtm = DTModel.objects.annotate(
start_datetime_as_date=Cast('start_datetime', models.DateField())
).first()
self.assertEqual(dtm.start_datetime_as_date, datetime.date(2018, 9, 28))
def test_cast_from_db_datetime_to_time(self):
dt_value = datetime.datetime(2018, 9, 28, 12, 42, 10, 234567)
DTModel.objects.create(start_datetime=dt_value)
dtm = DTModel.objects.annotate(
start_datetime_as_time=Cast('start_datetime', models.TimeField())
).first()
rounded_ms = int(round(.234567, connection.features.time_cast_precision) * 10**6)
self.assertEqual(dtm.start_datetime_as_time, datetime.time(12, 42, 10, rounded_ms))
def test_cast_from_db_date_to_datetime(self):
dt_value = datetime.date(2018, 9, 28)
DTModel.objects.create(start_date=dt_value)
dtm = DTModel.objects.annotate(start_as_datetime=Cast('start_date', models.DateTimeField())).first()
self.assertEqual(dtm.start_as_datetime, datetime.datetime(2018, 9, 28, 0, 0, 0, 0))
def test_cast_from_db_datetime_to_date_group_by(self):
author = Author.objects.create(name='John Smith', age=45)
dt_value = datetime.datetime(2018, 9, 28, 12, 42, 10, 234567)
Fan.objects.create(name='Margaret', age=50, author=author, fan_since=dt_value)
fans = Fan.objects.values('author').annotate(
fan_for_day=Cast('fan_since', models.DateField()),
fans=models.Count('*')
).values()
self.assertEqual(fans[0]['fan_for_day'], datetime.date(2018, 9, 28))
self.assertEqual(fans[0]['fans'], 1)
def test_cast_from_python_to_date(self):
today = datetime.date.today()
dates = Author.objects.annotate(cast_date=Cast(today, models.DateField()))
self.assertEqual(dates.get().cast_date, today)
def test_cast_from_python_to_datetime(self):
now = datetime.datetime.now()
dates = Author.objects.annotate(cast_datetime=Cast(now, models.DateTimeField()))
time_precision = datetime.timedelta(
microseconds=10**(6 - connection.features.time_cast_precision)
)
self.assertAlmostEqual(dates.get().cast_datetime, now, delta=time_precision)
def test_cast_from_python(self):
numbers = Author.objects.annotate(cast_float=Cast(decimal.Decimal(0.125), models.FloatField()))
cast_float = numbers.get().cast_float
self.assertIsInstance(cast_float, float)
self.assertEqual(cast_float, 0.125)
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL test')
@override_settings(DEBUG=True)
def test_expression_wrapped_with_parentheses_on_postgresql(self):
"""
The SQL for the Cast expression is wrapped with parentheses in case
it's a complex expression.
"""
list(Author.objects.annotate(cast_float=Cast(Avg('age'), models.FloatField())))
self.assertIn('(AVG("db_functions_author"."age"))::double precision', connection.queries[-1]['sql'])
def test_cast_to_text_field(self):
self.assertEqual(Author.objects.values_list(Cast('age', models.TextField()), flat=True).get(), '1')
|
0e67b572304c17ea90b6a7ab164a17142186f860d68d74142c72cc1f8dd2862e | from datetime import datetime, timedelta
from decimal import Decimal
from unittest import skipIf, skipUnless
from django.db import connection
from django.db.models.expressions import RawSQL
from django.db.models.functions import Coalesce, Greatest
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils import timezone
from ..models import Article, Author, DecimalModel, Fan
class GreatestTests(TestCase):
def test_basic(self):
now = timezone.now()
before = now - timedelta(hours=1)
Article.objects.create(title='Testing with Django', written=before, published=now)
articles = Article.objects.annotate(last_updated=Greatest('written', 'published'))
self.assertEqual(articles.first().last_updated, now)
@skipUnlessDBFeature('greatest_least_ignores_nulls')
def test_ignores_null(self):
now = timezone.now()
Article.objects.create(title='Testing with Django', written=now)
articles = Article.objects.annotate(last_updated=Greatest('written', 'published'))
self.assertEqual(articles.first().last_updated, now)
@skipIfDBFeature('greatest_least_ignores_nulls')
def test_propagates_null(self):
Article.objects.create(title='Testing with Django', written=timezone.now())
articles = Article.objects.annotate(last_updated=Greatest('written', 'published'))
self.assertIsNone(articles.first().last_updated)
@skipIf(connection.vendor == 'mysql', "This doesn't work on MySQL")
def test_coalesce_workaround(self):
past = datetime(1900, 1, 1)
now = timezone.now()
Article.objects.create(title='Testing with Django', written=now)
articles = Article.objects.annotate(
last_updated=Greatest(
Coalesce('written', past),
Coalesce('published', past),
),
)
self.assertEqual(articles.first().last_updated, now)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific workaround")
def test_coalesce_workaround_mysql(self):
past = datetime(1900, 1, 1)
now = timezone.now()
Article.objects.create(title='Testing with Django', written=now)
past_sql = RawSQL("cast(%s as datetime)", (past,))
articles = Article.objects.annotate(
last_updated=Greatest(
Coalesce('written', past_sql),
Coalesce('published', past_sql),
),
)
self.assertEqual(articles.first().last_updated, now)
def test_all_null(self):
Article.objects.create(title='Testing with Django', written=timezone.now())
articles = Article.objects.annotate(last_updated=Greatest('published', 'updated'))
self.assertIsNone(articles.first().last_updated)
def test_one_expressions(self):
with self.assertRaisesMessage(ValueError, 'Greatest must take at least two expressions'):
Greatest('written')
def test_related_field(self):
author = Author.objects.create(name='John Smith', age=45)
Fan.objects.create(name='Margaret', age=50, author=author)
authors = Author.objects.annotate(highest_age=Greatest('age', 'fans__age'))
self.assertEqual(authors.first().highest_age, 50)
def test_update(self):
author = Author.objects.create(name='James Smith', goes_by='Jim')
Author.objects.update(alias=Greatest('name', 'goes_by'))
author.refresh_from_db()
self.assertEqual(author.alias, 'Jim')
def test_decimal_filter(self):
obj = DecimalModel.objects.create(n1=Decimal('1.1'), n2=Decimal('1.2'))
self.assertCountEqual(
DecimalModel.objects.annotate(
greatest=Greatest('n1', 'n2'),
).filter(greatest=Decimal('1.2')),
[obj],
)
|
663725137f96e34bf4f4fe76ab3e56029680aaa579295a705bd21b20c310f0ee | from unittest import skipUnless
from django.db import connection
from django.db.models import Value
from django.db.models.functions import NullIf
from django.test import TestCase
from ..models import Author
class NullIfTests(TestCase):
@classmethod
def setUpTestData(cls):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda', alias='Rhonda')
def test_basic(self):
authors = Author.objects.annotate(nullif=NullIf('alias', 'name')).values_list('nullif')
self.assertSequenceEqual(
authors, [
('smithj',),
('' if connection.features.interprets_empty_strings_as_nulls else None,)
]
)
def test_null_argument(self):
authors = Author.objects.annotate(nullif=NullIf('name', Value(None))).values_list('nullif')
self.assertSequenceEqual(authors, [('John Smith',), ('Rhonda',)])
def test_too_few_args(self):
msg = "'NullIf' takes exactly 2 arguments (1 given)"
with self.assertRaisesMessage(TypeError, msg):
NullIf('name')
@skipUnless(connection.vendor == 'oracle', 'Oracle specific test for NULL-literal')
def test_null_literal(self):
msg = 'Oracle does not allow Value(None) for expression1.'
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.annotate(nullif=NullIf(Value(None), 'name')).values_list('nullif'))
|
37dffde62a4fbcc63482d23e3e20f0684b5daa12b77ba5ac82a93ee99322a7ff | from datetime import datetime, timedelta
from decimal import Decimal
from unittest import skipIf, skipUnless
from django.db import connection
from django.db.models.expressions import RawSQL
from django.db.models.functions import Coalesce, Least
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils import timezone
from ..models import Article, Author, DecimalModel, Fan
class LeastTests(TestCase):
def test_basic(self):
now = timezone.now()
before = now - timedelta(hours=1)
Article.objects.create(title='Testing with Django', written=before, published=now)
articles = Article.objects.annotate(first_updated=Least('written', 'published'))
self.assertEqual(articles.first().first_updated, before)
@skipUnlessDBFeature('greatest_least_ignores_nulls')
def test_ignores_null(self):
now = timezone.now()
Article.objects.create(title='Testing with Django', written=now)
articles = Article.objects.annotate(
first_updated=Least('written', 'published'),
)
self.assertEqual(articles.first().first_updated, now)
@skipIfDBFeature('greatest_least_ignores_nulls')
def test_propagates_null(self):
Article.objects.create(title='Testing with Django', written=timezone.now())
articles = Article.objects.annotate(first_updated=Least('written', 'published'))
self.assertIsNone(articles.first().first_updated)
@skipIf(connection.vendor == 'mysql', "This doesn't work on MySQL")
def test_coalesce_workaround(self):
future = datetime(2100, 1, 1)
now = timezone.now()
Article.objects.create(title='Testing with Django', written=now)
articles = Article.objects.annotate(
last_updated=Least(
Coalesce('written', future),
Coalesce('published', future),
),
)
self.assertEqual(articles.first().last_updated, now)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific workaround")
def test_coalesce_workaround_mysql(self):
future = datetime(2100, 1, 1)
now = timezone.now()
Article.objects.create(title='Testing with Django', written=now)
future_sql = RawSQL("cast(%s as datetime)", (future,))
articles = Article.objects.annotate(
last_updated=Least(
Coalesce('written', future_sql),
Coalesce('published', future_sql),
),
)
self.assertEqual(articles.first().last_updated, now)
def test_all_null(self):
Article.objects.create(title='Testing with Django', written=timezone.now())
articles = Article.objects.annotate(first_updated=Least('published', 'updated'))
self.assertIsNone(articles.first().first_updated)
def test_one_expressions(self):
with self.assertRaisesMessage(ValueError, 'Least must take at least two expressions'):
Least('written')
def test_related_field(self):
author = Author.objects.create(name='John Smith', age=45)
Fan.objects.create(name='Margaret', age=50, author=author)
authors = Author.objects.annotate(lowest_age=Least('age', 'fans__age'))
self.assertEqual(authors.first().lowest_age, 45)
def test_update(self):
author = Author.objects.create(name='James Smith', goes_by='Jim')
Author.objects.update(alias=Least('name', 'goes_by'))
author.refresh_from_db()
self.assertEqual(author.alias, 'James Smith')
def test_decimal_filter(self):
obj = DecimalModel.objects.create(n1=Decimal('1.1'), n2=Decimal('1.2'))
self.assertCountEqual(
DecimalModel.objects.annotate(
least=Least('n1', 'n2'),
).filter(least=Decimal('1.1')),
[obj],
)
|
71590fdf9d74c62e757eeed26c107ecc0fbd5c98b8918b992270eb05d924203d | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Sqrt
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class SqrtTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_sqrt=Sqrt('normal')).first()
self.assertIsNone(obj.null_sqrt)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_sqrt=Sqrt('n1'), n2_sqrt=Sqrt('n2')).first()
self.assertIsInstance(obj.n1_sqrt, Decimal)
self.assertIsInstance(obj.n2_sqrt, Decimal)
self.assertAlmostEqual(obj.n1_sqrt, Decimal(math.sqrt(obj.n1)))
self.assertAlmostEqual(obj.n2_sqrt, Decimal(math.sqrt(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_sqrt=Sqrt('f1'), f2_sqrt=Sqrt('f2')).first()
self.assertIsInstance(obj.f1_sqrt, float)
self.assertIsInstance(obj.f2_sqrt, float)
self.assertAlmostEqual(obj.f1_sqrt, math.sqrt(obj.f1))
self.assertAlmostEqual(obj.f2_sqrt, math.sqrt(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=20, normal=15, big=1)
obj = IntegerModel.objects.annotate(
small_sqrt=Sqrt('small'),
normal_sqrt=Sqrt('normal'),
big_sqrt=Sqrt('big'),
).first()
self.assertIsInstance(obj.small_sqrt, float)
self.assertIsInstance(obj.normal_sqrt, float)
self.assertIsInstance(obj.big_sqrt, float)
self.assertAlmostEqual(obj.small_sqrt, math.sqrt(obj.small))
self.assertAlmostEqual(obj.normal_sqrt, math.sqrt(obj.normal))
self.assertAlmostEqual(obj.big_sqrt, math.sqrt(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Sqrt):
DecimalModel.objects.create(n1=Decimal('6.0'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('1.0'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__sqrt__gt=2).get()
self.assertEqual(obj.n1, Decimal('6.0'))
|
2584387656580ec211275206fddd20a60c042e0b5b47dc4d8ea5498aca42e932 | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Ceil
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class CeilTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_ceil=Ceil('normal')).first()
self.assertIsNone(obj.null_ceil)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_ceil=Ceil('n1'), n2_ceil=Ceil('n2')).first()
self.assertIsInstance(obj.n1_ceil, Decimal)
self.assertIsInstance(obj.n2_ceil, Decimal)
self.assertEqual(obj.n1_ceil, Decimal(math.ceil(obj.n1)))
self.assertEqual(obj.n2_ceil, Decimal(math.ceil(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-12.5, f2=21.33)
obj = FloatModel.objects.annotate(f1_ceil=Ceil('f1'), f2_ceil=Ceil('f2')).first()
self.assertIsInstance(obj.f1_ceil, float)
self.assertIsInstance(obj.f2_ceil, float)
self.assertEqual(obj.f1_ceil, math.ceil(obj.f1))
self.assertEqual(obj.f2_ceil, math.ceil(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-11, normal=0, big=-100)
obj = IntegerModel.objects.annotate(
small_ceil=Ceil('small'),
normal_ceil=Ceil('normal'),
big_ceil=Ceil('big'),
).first()
self.assertIsInstance(obj.small_ceil, int)
self.assertIsInstance(obj.normal_ceil, int)
self.assertIsInstance(obj.big_ceil, int)
self.assertEqual(obj.small_ceil, math.ceil(obj.small))
self.assertEqual(obj.normal_ceil, math.ceil(obj.normal))
self.assertEqual(obj.big_ceil, math.ceil(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Ceil):
DecimalModel.objects.create(n1=Decimal('3.12'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('1.25'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__ceil__gt=3).get()
self.assertEqual(obj.n1, Decimal('3.12'))
|
5111fcad13390eb26d66fafe0909044da07166970ebc7efc4f4536f4d18a9eb7 | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import ATan
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class ATanTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_atan=ATan('normal')).first()
self.assertIsNone(obj.null_atan)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_atan=ATan('n1'), n2_atan=ATan('n2')).first()
self.assertIsInstance(obj.n1_atan, Decimal)
self.assertIsInstance(obj.n2_atan, Decimal)
self.assertAlmostEqual(obj.n1_atan, Decimal(math.atan(obj.n1)))
self.assertAlmostEqual(obj.n2_atan, Decimal(math.atan(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_atan=ATan('f1'), f2_atan=ATan('f2')).first()
self.assertIsInstance(obj.f1_atan, float)
self.assertIsInstance(obj.f2_atan, float)
self.assertAlmostEqual(obj.f1_atan, math.atan(obj.f1))
self.assertAlmostEqual(obj.f2_atan, math.atan(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_atan=ATan('small'),
normal_atan=ATan('normal'),
big_atan=ATan('big'),
).first()
self.assertIsInstance(obj.small_atan, float)
self.assertIsInstance(obj.normal_atan, float)
self.assertIsInstance(obj.big_atan, float)
self.assertAlmostEqual(obj.small_atan, math.atan(obj.small))
self.assertAlmostEqual(obj.normal_atan, math.atan(obj.normal))
self.assertAlmostEqual(obj.big_atan, math.atan(obj.big))
def test_transform(self):
with register_lookup(DecimalField, ATan):
DecimalModel.objects.create(n1=Decimal('3.12'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('-5'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__atan__gt=0).get()
self.assertEqual(obj.n1, Decimal('3.12'))
|
408e27623c8e50936ab0fa2e0947ae1ba2d5096044007a289423d6debdfcab1e | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Floor
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class FloorTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_floor=Floor('normal')).first()
self.assertIsNone(obj.null_floor)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_floor=Floor('n1'), n2_floor=Floor('n2')).first()
self.assertIsInstance(obj.n1_floor, Decimal)
self.assertIsInstance(obj.n2_floor, Decimal)
self.assertEqual(obj.n1_floor, Decimal(math.floor(obj.n1)))
self.assertEqual(obj.n2_floor, Decimal(math.floor(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_floor=Floor('f1'), f2_floor=Floor('f2')).first()
self.assertIsInstance(obj.f1_floor, float)
self.assertIsInstance(obj.f2_floor, float)
self.assertEqual(obj.f1_floor, math.floor(obj.f1))
self.assertEqual(obj.f2_floor, math.floor(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_floor=Floor('small'),
normal_floor=Floor('normal'),
big_floor=Floor('big'),
).first()
self.assertIsInstance(obj.small_floor, int)
self.assertIsInstance(obj.normal_floor, int)
self.assertIsInstance(obj.big_floor, int)
self.assertEqual(obj.small_floor, math.floor(obj.small))
self.assertEqual(obj.normal_floor, math.floor(obj.normal))
self.assertEqual(obj.big_floor, math.floor(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Floor):
DecimalModel.objects.create(n1=Decimal('5.4'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('3.4'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__floor__gt=4).get()
self.assertEqual(obj.n1, Decimal('5.4'))
|
032ea185c86781cb4b24811aef296761e459bd9b734e3621fe2cf2cef24edeb1 | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import ACos
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class ACosTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_acos=ACos('normal')).first()
self.assertIsNone(obj.null_acos)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-0.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_acos=ACos('n1'), n2_acos=ACos('n2')).first()
self.assertIsInstance(obj.n1_acos, Decimal)
self.assertIsInstance(obj.n2_acos, Decimal)
self.assertAlmostEqual(obj.n1_acos, Decimal(math.acos(obj.n1)))
self.assertAlmostEqual(obj.n2_acos, Decimal(math.acos(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-0.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_acos=ACos('f1'), f2_acos=ACos('f2')).first()
self.assertIsInstance(obj.f1_acos, float)
self.assertIsInstance(obj.f2_acos, float)
self.assertAlmostEqual(obj.f1_acos, math.acos(obj.f1))
self.assertAlmostEqual(obj.f2_acos, math.acos(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=0, normal=1, big=-1)
obj = IntegerModel.objects.annotate(
small_acos=ACos('small'),
normal_acos=ACos('normal'),
big_acos=ACos('big'),
).first()
self.assertIsInstance(obj.small_acos, float)
self.assertIsInstance(obj.normal_acos, float)
self.assertIsInstance(obj.big_acos, float)
self.assertAlmostEqual(obj.small_acos, math.acos(obj.small))
self.assertAlmostEqual(obj.normal_acos, math.acos(obj.normal))
self.assertAlmostEqual(obj.big_acos, math.acos(obj.big))
def test_transform(self):
with register_lookup(DecimalField, ACos):
DecimalModel.objects.create(n1=Decimal('0.5'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('-0.9'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__acos__lt=2).get()
self.assertEqual(obj.n1, Decimal('0.5'))
|
e976a3c1b29ea823ccc8ed1c59df2de0c1f8804109390f3c19589765b1f9526d | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Cos
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class CosTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_cos=Cos('normal')).first()
self.assertIsNone(obj.null_cos)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_cos=Cos('n1'), n2_cos=Cos('n2')).first()
self.assertIsInstance(obj.n1_cos, Decimal)
self.assertIsInstance(obj.n2_cos, Decimal)
self.assertAlmostEqual(obj.n1_cos, Decimal(math.cos(obj.n1)))
self.assertAlmostEqual(obj.n2_cos, Decimal(math.cos(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_cos=Cos('f1'), f2_cos=Cos('f2')).first()
self.assertIsInstance(obj.f1_cos, float)
self.assertIsInstance(obj.f2_cos, float)
self.assertAlmostEqual(obj.f1_cos, math.cos(obj.f1))
self.assertAlmostEqual(obj.f2_cos, math.cos(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_cos=Cos('small'),
normal_cos=Cos('normal'),
big_cos=Cos('big'),
).first()
self.assertIsInstance(obj.small_cos, float)
self.assertIsInstance(obj.normal_cos, float)
self.assertIsInstance(obj.big_cos, float)
self.assertAlmostEqual(obj.small_cos, math.cos(obj.small))
self.assertAlmostEqual(obj.normal_cos, math.cos(obj.normal))
self.assertAlmostEqual(obj.big_cos, math.cos(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Cos):
DecimalModel.objects.create(n1=Decimal('-8.0'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('3.14'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__cos__gt=-0.2).get()
self.assertEqual(obj.n1, Decimal('-8.0'))
|
dff6b86d563b259ccf540bacdef74a045d8a9a6d920d7894125655b5c8b4282e | from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Round
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class RoundTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_round=Round('normal')).first()
self.assertIsNone(obj.null_round)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_round=Round('n1'), n2_round=Round('n2')).first()
self.assertIsInstance(obj.n1_round, Decimal)
self.assertIsInstance(obj.n2_round, Decimal)
self.assertAlmostEqual(obj.n1_round, round(obj.n1))
self.assertAlmostEqual(obj.n2_round, round(obj.n2))
def test_float(self):
FloatModel.objects.create(f1=-27.55, f2=0.55)
obj = FloatModel.objects.annotate(f1_round=Round('f1'), f2_round=Round('f2')).first()
self.assertIsInstance(obj.f1_round, float)
self.assertIsInstance(obj.f2_round, float)
self.assertAlmostEqual(obj.f1_round, round(obj.f1))
self.assertAlmostEqual(obj.f2_round, round(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_round=Round('small'),
normal_round=Round('normal'),
big_round=Round('big'),
).first()
self.assertIsInstance(obj.small_round, int)
self.assertIsInstance(obj.normal_round, int)
self.assertIsInstance(obj.big_round, int)
self.assertEqual(obj.small_round, round(obj.small))
self.assertEqual(obj.normal_round, round(obj.normal))
self.assertEqual(obj.big_round, round(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Round):
DecimalModel.objects.create(n1=Decimal('2.0'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('-1.0'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__round__gt=0).get()
self.assertEqual(obj.n1, Decimal('2.0'))
|
f946bab65c15e9b46d0d0424fd6eb2b6603b985c70f5deb4ae41b2784a08ac19 | from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Abs
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class AbsTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_abs=Abs('normal')).first()
self.assertIsNone(obj.null_abs)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-0.8'), n2=Decimal('1.2'))
obj = DecimalModel.objects.annotate(n1_abs=Abs('n1'), n2_abs=Abs('n2')).first()
self.assertIsInstance(obj.n1_abs, Decimal)
self.assertIsInstance(obj.n2_abs, Decimal)
self.assertEqual(obj.n1, -obj.n1_abs)
self.assertEqual(obj.n2, obj.n2_abs)
def test_float(self):
obj = FloatModel.objects.create(f1=-0.5, f2=12)
obj = FloatModel.objects.annotate(f1_abs=Abs('f1'), f2_abs=Abs('f2')).first()
self.assertIsInstance(obj.f1_abs, float)
self.assertIsInstance(obj.f2_abs, float)
self.assertEqual(obj.f1, -obj.f1_abs)
self.assertEqual(obj.f2, obj.f2_abs)
def test_integer(self):
IntegerModel.objects.create(small=12, normal=0, big=-45)
obj = IntegerModel.objects.annotate(
small_abs=Abs('small'),
normal_abs=Abs('normal'),
big_abs=Abs('big'),
).first()
self.assertIsInstance(obj.small_abs, int)
self.assertIsInstance(obj.normal_abs, int)
self.assertIsInstance(obj.big_abs, int)
self.assertEqual(obj.small, obj.small_abs)
self.assertEqual(obj.normal, obj.normal_abs)
self.assertEqual(obj.big, -obj.big_abs)
def test_transform(self):
with register_lookup(DecimalField, Abs):
DecimalModel.objects.create(n1=Decimal('-1.5'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('-0.5'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__abs__gt=1).get()
self.assertEqual(obj.n1, Decimal('-1.5'))
|
f2042ca0b1198739a6d323dee5ab7512a36fade8ecf6a0b3ede510574e12550b | from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Sign
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class SignTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_sign=Sign('normal')).first()
self.assertIsNone(obj.null_sign)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_sign=Sign('n1'), n2_sign=Sign('n2')).first()
self.assertIsInstance(obj.n1_sign, Decimal)
self.assertIsInstance(obj.n2_sign, Decimal)
self.assertEqual(obj.n1_sign, Decimal('-1'))
self.assertEqual(obj.n2_sign, Decimal('1'))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_sign=Sign('f1'), f2_sign=Sign('f2')).first()
self.assertIsInstance(obj.f1_sign, float)
self.assertIsInstance(obj.f2_sign, float)
self.assertEqual(obj.f1_sign, -1.0)
self.assertEqual(obj.f2_sign, 1.0)
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=0, big=20)
obj = IntegerModel.objects.annotate(
small_sign=Sign('small'),
normal_sign=Sign('normal'),
big_sign=Sign('big'),
).first()
self.assertIsInstance(obj.small_sign, int)
self.assertIsInstance(obj.normal_sign, int)
self.assertIsInstance(obj.big_sign, int)
self.assertEqual(obj.small_sign, -1)
self.assertEqual(obj.normal_sign, 0)
self.assertEqual(obj.big_sign, 1)
def test_transform(self):
with register_lookup(DecimalField, Sign):
DecimalModel.objects.create(n1=Decimal('5.4'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('-0.1'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__sign__lt=0, n2__sign=0).get()
self.assertEqual(obj.n1, Decimal('-0.1'))
|
c0c4a110407f1ee64492d9af6d0c8af4a9cda0c3977c882904606a6a056a56fc | import math
from decimal import Decimal
from django.db.models.functions import Log
from django.test import TestCase
from ..models import DecimalModel, FloatModel, IntegerModel
class LogTests(TestCase):
def test_null(self):
IntegerModel.objects.create(big=100)
obj = IntegerModel.objects.annotate(
null_log_small=Log('small', 'normal'),
null_log_normal=Log('normal', 'big'),
).first()
self.assertIsNone(obj.null_log_small)
self.assertIsNone(obj.null_log_normal)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('12.9'), n2=Decimal('3.6'))
obj = DecimalModel.objects.annotate(n_log=Log('n1', 'n2')).first()
self.assertIsInstance(obj.n_log, Decimal)
self.assertAlmostEqual(obj.n_log, Decimal(math.log(obj.n2, obj.n1)))
def test_float(self):
FloatModel.objects.create(f1=2.0, f2=4.0)
obj = FloatModel.objects.annotate(f_log=Log('f1', 'f2')).first()
self.assertIsInstance(obj.f_log, float)
self.assertAlmostEqual(obj.f_log, math.log(obj.f2, obj.f1))
def test_integer(self):
IntegerModel.objects.create(small=4, normal=8, big=2)
obj = IntegerModel.objects.annotate(
small_log=Log('small', 'big'),
normal_log=Log('normal', 'big'),
big_log=Log('big', 'big'),
).first()
self.assertIsInstance(obj.small_log, float)
self.assertIsInstance(obj.normal_log, float)
self.assertIsInstance(obj.big_log, float)
self.assertAlmostEqual(obj.small_log, math.log(obj.big, obj.small))
self.assertAlmostEqual(obj.normal_log, math.log(obj.big, obj.normal))
self.assertAlmostEqual(obj.big_log, math.log(obj.big, obj.big))
|
510b210649918133f1e7c9b11fdadf4c07d61491578cc8072645b050944255b7 | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Cot
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class CotTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_cot=Cot('normal')).first()
self.assertIsNone(obj.null_cot)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_cot=Cot('n1'), n2_cot=Cot('n2')).first()
self.assertIsInstance(obj.n1_cot, Decimal)
self.assertIsInstance(obj.n2_cot, Decimal)
self.assertAlmostEqual(obj.n1_cot, Decimal(1 / math.tan(obj.n1)))
self.assertAlmostEqual(obj.n2_cot, Decimal(1 / math.tan(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_cot=Cot('f1'), f2_cot=Cot('f2')).first()
self.assertIsInstance(obj.f1_cot, float)
self.assertIsInstance(obj.f2_cot, float)
self.assertAlmostEqual(obj.f1_cot, 1 / math.tan(obj.f1))
self.assertAlmostEqual(obj.f2_cot, 1 / math.tan(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-5, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_cot=Cot('small'),
normal_cot=Cot('normal'),
big_cot=Cot('big'),
).first()
self.assertIsInstance(obj.small_cot, float)
self.assertIsInstance(obj.normal_cot, float)
self.assertIsInstance(obj.big_cot, float)
self.assertAlmostEqual(obj.small_cot, 1 / math.tan(obj.small))
self.assertAlmostEqual(obj.normal_cot, 1 / math.tan(obj.normal))
self.assertAlmostEqual(obj.big_cot, 1 / math.tan(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Cot):
DecimalModel.objects.create(n1=Decimal('12.0'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('1.0'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__cot__gt=0).get()
self.assertEqual(obj.n1, Decimal('1.0'))
|
0f65fa021806bf7e232a1dafdedf6b150326e6e393f0220344631ac0acdd414a | import math
from decimal import Decimal
from django.db.models.functions import ATan2
from django.test import TestCase
from ..models import DecimalModel, FloatModel, IntegerModel
class ATan2Tests(TestCase):
def test_null(self):
IntegerModel.objects.create(big=100)
obj = IntegerModel.objects.annotate(
null_atan2_sn=ATan2('small', 'normal'),
null_atan2_nb=ATan2('normal', 'big'),
).first()
self.assertIsNone(obj.null_atan2_sn)
self.assertIsNone(obj.null_atan2_nb)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-9.9'), n2=Decimal('4.6'))
obj = DecimalModel.objects.annotate(n_atan2=ATan2('n1', 'n2')).first()
self.assertIsInstance(obj.n_atan2, Decimal)
self.assertAlmostEqual(obj.n_atan2, Decimal(math.atan2(obj.n1, obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-25, f2=0.33)
obj = FloatModel.objects.annotate(f_atan2=ATan2('f1', 'f2')).first()
self.assertIsInstance(obj.f_atan2, float)
self.assertAlmostEqual(obj.f_atan2, math.atan2(obj.f1, obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=0, normal=1, big=10)
obj = IntegerModel.objects.annotate(
atan2_sn=ATan2('small', 'normal'),
atan2_nb=ATan2('normal', 'big'),
).first()
self.assertIsInstance(obj.atan2_sn, float)
self.assertIsInstance(obj.atan2_nb, float)
self.assertAlmostEqual(obj.atan2_sn, math.atan2(obj.small, obj.normal))
self.assertAlmostEqual(obj.atan2_nb, math.atan2(obj.normal, obj.big))
|
d9c89abe82718f6122948ddf48cd306afe6df7725204c1e728ed38590f387df3 | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Degrees
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class DegreesTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_degrees=Degrees('normal')).first()
self.assertIsNone(obj.null_degrees)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_degrees=Degrees('n1'), n2_degrees=Degrees('n2')).first()
self.assertIsInstance(obj.n1_degrees, Decimal)
self.assertIsInstance(obj.n2_degrees, Decimal)
self.assertAlmostEqual(obj.n1_degrees, Decimal(math.degrees(obj.n1)))
self.assertAlmostEqual(obj.n2_degrees, Decimal(math.degrees(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_degrees=Degrees('f1'), f2_degrees=Degrees('f2')).first()
self.assertIsInstance(obj.f1_degrees, float)
self.assertIsInstance(obj.f2_degrees, float)
self.assertAlmostEqual(obj.f1_degrees, math.degrees(obj.f1))
self.assertAlmostEqual(obj.f2_degrees, math.degrees(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_degrees=Degrees('small'),
normal_degrees=Degrees('normal'),
big_degrees=Degrees('big'),
).first()
self.assertIsInstance(obj.small_degrees, float)
self.assertIsInstance(obj.normal_degrees, float)
self.assertIsInstance(obj.big_degrees, float)
self.assertAlmostEqual(obj.small_degrees, math.degrees(obj.small))
self.assertAlmostEqual(obj.normal_degrees, math.degrees(obj.normal))
self.assertAlmostEqual(obj.big_degrees, math.degrees(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Degrees):
DecimalModel.objects.create(n1=Decimal('5.4'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('-30'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__degrees__gt=0).get()
self.assertEqual(obj.n1, Decimal('5.4'))
|
980cc092cab2cb691df4ea8405322d2bb0b2c4f9998b5da78205297006071812 | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import ASin
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class ASinTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_asin=ASin('normal')).first()
self.assertIsNone(obj.null_asin)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('0.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_asin=ASin('n1'), n2_asin=ASin('n2')).first()
self.assertIsInstance(obj.n1_asin, Decimal)
self.assertIsInstance(obj.n2_asin, Decimal)
self.assertAlmostEqual(obj.n1_asin, Decimal(math.asin(obj.n1)))
self.assertAlmostEqual(obj.n2_asin, Decimal(math.asin(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-0.5, f2=0.87)
obj = FloatModel.objects.annotate(f1_asin=ASin('f1'), f2_asin=ASin('f2')).first()
self.assertIsInstance(obj.f1_asin, float)
self.assertIsInstance(obj.f2_asin, float)
self.assertAlmostEqual(obj.f1_asin, math.asin(obj.f1))
self.assertAlmostEqual(obj.f2_asin, math.asin(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=0, normal=1, big=-1)
obj = IntegerModel.objects.annotate(
small_asin=ASin('small'),
normal_asin=ASin('normal'),
big_asin=ASin('big'),
).first()
self.assertIsInstance(obj.small_asin, float)
self.assertIsInstance(obj.normal_asin, float)
self.assertIsInstance(obj.big_asin, float)
self.assertAlmostEqual(obj.small_asin, math.asin(obj.small))
self.assertAlmostEqual(obj.normal_asin, math.asin(obj.normal))
self.assertAlmostEqual(obj.big_asin, math.asin(obj.big))
def test_transform(self):
with register_lookup(DecimalField, ASin):
DecimalModel.objects.create(n1=Decimal('0.1'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('1.0'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__asin__gt=1).get()
self.assertEqual(obj.n1, Decimal('1.0'))
|
6e74a00d0cd7e9a842867a71110b84f81ba684e9c215e454a0554e55e640fc1f | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Tan
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class TanTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_tan=Tan('normal')).first()
self.assertIsNone(obj.null_tan)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_tan=Tan('n1'), n2_tan=Tan('n2')).first()
self.assertIsInstance(obj.n1_tan, Decimal)
self.assertIsInstance(obj.n2_tan, Decimal)
self.assertAlmostEqual(obj.n1_tan, Decimal(math.tan(obj.n1)))
self.assertAlmostEqual(obj.n2_tan, Decimal(math.tan(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_tan=Tan('f1'), f2_tan=Tan('f2')).first()
self.assertIsInstance(obj.f1_tan, float)
self.assertIsInstance(obj.f2_tan, float)
self.assertAlmostEqual(obj.f1_tan, math.tan(obj.f1))
self.assertAlmostEqual(obj.f2_tan, math.tan(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_tan=Tan('small'),
normal_tan=Tan('normal'),
big_tan=Tan('big'),
).first()
self.assertIsInstance(obj.small_tan, float)
self.assertIsInstance(obj.normal_tan, float)
self.assertIsInstance(obj.big_tan, float)
self.assertAlmostEqual(obj.small_tan, math.tan(obj.small))
self.assertAlmostEqual(obj.normal_tan, math.tan(obj.normal))
self.assertAlmostEqual(obj.big_tan, math.tan(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Tan):
DecimalModel.objects.create(n1=Decimal('0.0'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('12.0'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__tan__lt=0).get()
self.assertEqual(obj.n1, Decimal('12.0'))
|
257c0adf8ea5569d5e2c4afc3b67634b45e17ba62f62888d39197c69ef71de71 | import math
from decimal import Decimal
from django.db.models.functions import Mod
from django.test import TestCase
from ..models import DecimalModel, FloatModel, IntegerModel
class ModTests(TestCase):
def test_null(self):
IntegerModel.objects.create(big=100)
obj = IntegerModel.objects.annotate(
null_mod_small=Mod('small', 'normal'),
null_mod_normal=Mod('normal', 'big'),
).first()
self.assertIsNone(obj.null_mod_small)
self.assertIsNone(obj.null_mod_normal)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-9.9'), n2=Decimal('4.6'))
obj = DecimalModel.objects.annotate(n_mod=Mod('n1', 'n2')).first()
self.assertIsInstance(obj.n_mod, Decimal)
self.assertAlmostEqual(obj.n_mod, Decimal(math.fmod(obj.n1, obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-25, f2=0.33)
obj = FloatModel.objects.annotate(f_mod=Mod('f1', 'f2')).first()
self.assertIsInstance(obj.f_mod, float)
self.assertAlmostEqual(obj.f_mod, math.fmod(obj.f1, obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=20, normal=15, big=1)
obj = IntegerModel.objects.annotate(
small_mod=Mod('small', 'normal'),
normal_mod=Mod('normal', 'big'),
big_mod=Mod('big', 'small'),
).first()
self.assertIsInstance(obj.small_mod, float)
self.assertIsInstance(obj.normal_mod, float)
self.assertIsInstance(obj.big_mod, float)
self.assertEqual(obj.small_mod, math.fmod(obj.small, obj.normal))
self.assertEqual(obj.normal_mod, math.fmod(obj.normal, obj.big))
self.assertEqual(obj.big_mod, math.fmod(obj.big, obj.small))
|
e250cb39d3902553d086a0ab5ba6a74b18b547321f226a7d843b0e0bf3e2dc50 | from decimal import Decimal
from django.db.models.functions import Power
from django.test import TestCase
from ..models import DecimalModel, FloatModel, IntegerModel
class PowerTests(TestCase):
def test_null(self):
IntegerModel.objects.create(big=100)
obj = IntegerModel.objects.annotate(
null_power_small=Power('small', 'normal'),
null_power_normal=Power('normal', 'big'),
).first()
self.assertIsNone(obj.null_power_small)
self.assertIsNone(obj.null_power_normal)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('1.0'), n2=Decimal('-0.6'))
obj = DecimalModel.objects.annotate(n_power=Power('n1', 'n2')).first()
self.assertIsInstance(obj.n_power, Decimal)
self.assertAlmostEqual(obj.n_power, Decimal(obj.n1 ** obj.n2))
def test_float(self):
FloatModel.objects.create(f1=2.3, f2=1.1)
obj = FloatModel.objects.annotate(f_power=Power('f1', 'f2')).first()
self.assertIsInstance(obj.f_power, float)
self.assertAlmostEqual(obj.f_power, obj.f1 ** obj.f2)
def test_integer(self):
IntegerModel.objects.create(small=-1, normal=20, big=3)
obj = IntegerModel.objects.annotate(
small_power=Power('small', 'normal'),
normal_power=Power('normal', 'big'),
big_power=Power('big', 'small'),
).first()
self.assertIsInstance(obj.small_power, float)
self.assertIsInstance(obj.normal_power, float)
self.assertIsInstance(obj.big_power, float)
self.assertAlmostEqual(obj.small_power, obj.small ** obj.normal)
self.assertAlmostEqual(obj.normal_power, obj.normal ** obj.big)
self.assertAlmostEqual(obj.big_power, obj.big ** obj.small)
|
d4ae0acd11cedada2f4299331bd59098bd80232f3acb9a45b06699a7a641f045 | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Radians
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class RadiansTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_radians=Radians('normal')).first()
self.assertIsNone(obj.null_radians)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_radians=Radians('n1'), n2_radians=Radians('n2')).first()
self.assertIsInstance(obj.n1_radians, Decimal)
self.assertIsInstance(obj.n2_radians, Decimal)
self.assertAlmostEqual(obj.n1_radians, Decimal(math.radians(obj.n1)))
self.assertAlmostEqual(obj.n2_radians, Decimal(math.radians(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_radians=Radians('f1'), f2_radians=Radians('f2')).first()
self.assertIsInstance(obj.f1_radians, float)
self.assertIsInstance(obj.f2_radians, float)
self.assertAlmostEqual(obj.f1_radians, math.radians(obj.f1))
self.assertAlmostEqual(obj.f2_radians, math.radians(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_radians=Radians('small'),
normal_radians=Radians('normal'),
big_radians=Radians('big'),
).first()
self.assertIsInstance(obj.small_radians, float)
self.assertIsInstance(obj.normal_radians, float)
self.assertIsInstance(obj.big_radians, float)
self.assertAlmostEqual(obj.small_radians, math.radians(obj.small))
self.assertAlmostEqual(obj.normal_radians, math.radians(obj.normal))
self.assertAlmostEqual(obj.big_radians, math.radians(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Radians):
DecimalModel.objects.create(n1=Decimal('2.0'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('-1.0'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__radians__gt=0).get()
self.assertEqual(obj.n1, Decimal('2.0'))
|
5cc1ef865b853cc54de9811f847ad6a2a80477a49e90c7125e0bf1c1aff3222d | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Sin
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class SinTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_sin=Sin('normal')).first()
self.assertIsNone(obj.null_sin)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_sin=Sin('n1'), n2_sin=Sin('n2')).first()
self.assertIsInstance(obj.n1_sin, Decimal)
self.assertIsInstance(obj.n2_sin, Decimal)
self.assertAlmostEqual(obj.n1_sin, Decimal(math.sin(obj.n1)))
self.assertAlmostEqual(obj.n2_sin, Decimal(math.sin(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_sin=Sin('f1'), f2_sin=Sin('f2')).first()
self.assertIsInstance(obj.f1_sin, float)
self.assertIsInstance(obj.f2_sin, float)
self.assertAlmostEqual(obj.f1_sin, math.sin(obj.f1))
self.assertAlmostEqual(obj.f2_sin, math.sin(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_sin=Sin('small'),
normal_sin=Sin('normal'),
big_sin=Sin('big'),
).first()
self.assertIsInstance(obj.small_sin, float)
self.assertIsInstance(obj.normal_sin, float)
self.assertIsInstance(obj.big_sin, float)
self.assertAlmostEqual(obj.small_sin, math.sin(obj.small))
self.assertAlmostEqual(obj.normal_sin, math.sin(obj.normal))
self.assertAlmostEqual(obj.big_sin, math.sin(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Sin):
DecimalModel.objects.create(n1=Decimal('5.4'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('0.1'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__sin__lt=0).get()
self.assertEqual(obj.n1, Decimal('5.4'))
|
a15f1d865d959ecf587e448bcf7f09b7ea7468dec6589683e525374ab143f83b | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Exp
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class ExpTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_exp=Exp('normal')).first()
self.assertIsNone(obj.null_exp)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('-12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_exp=Exp('n1'), n2_exp=Exp('n2')).first()
self.assertIsInstance(obj.n1_exp, Decimal)
self.assertIsInstance(obj.n2_exp, Decimal)
self.assertAlmostEqual(obj.n1_exp, Decimal(math.exp(obj.n1)))
self.assertAlmostEqual(obj.n2_exp, Decimal(math.exp(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_exp=Exp('f1'), f2_exp=Exp('f2')).first()
self.assertIsInstance(obj.f1_exp, float)
self.assertIsInstance(obj.f2_exp, float)
self.assertAlmostEqual(obj.f1_exp, math.exp(obj.f1))
self.assertAlmostEqual(obj.f2_exp, math.exp(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_exp=Exp('small'),
normal_exp=Exp('normal'),
big_exp=Exp('big'),
).first()
self.assertIsInstance(obj.small_exp, float)
self.assertIsInstance(obj.normal_exp, float)
self.assertIsInstance(obj.big_exp, float)
self.assertAlmostEqual(obj.small_exp, math.exp(obj.small))
self.assertAlmostEqual(obj.normal_exp, math.exp(obj.normal))
self.assertAlmostEqual(obj.big_exp, math.exp(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Exp):
DecimalModel.objects.create(n1=Decimal('12.0'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('-1.0'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__exp__gt=10).get()
self.assertEqual(obj.n1, Decimal('12.0'))
|
1e78dee13e1e53c0cd3e20dcb5439806623d0d9ac8bbe00cf52c80f814487e02 | import math
from django.db.models.functions import Pi
from django.test import TestCase
from ..models import FloatModel
class PiTests(TestCase):
def test(self):
FloatModel.objects.create(f1=2.5, f2=15.9)
obj = FloatModel.objects.annotate(pi=Pi()).first()
self.assertIsInstance(obj.pi, float)
self.assertAlmostEqual(obj.pi, math.pi, places=5)
|
54a201bb783feb35d344917c59ef8a0f2f2b45711147e4d06370a94fb91bcf9f | import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import Ln
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class LnTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_ln=Ln('normal')).first()
self.assertIsNone(obj.null_ln)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('12.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_ln=Ln('n1'), n2_ln=Ln('n2')).first()
self.assertIsInstance(obj.n1_ln, Decimal)
self.assertIsInstance(obj.n2_ln, Decimal)
self.assertAlmostEqual(obj.n1_ln, Decimal(math.log(obj.n1)))
self.assertAlmostEqual(obj.n2_ln, Decimal(math.log(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=27.5, f2=0.33)
obj = FloatModel.objects.annotate(f1_ln=Ln('f1'), f2_ln=Ln('f2')).first()
self.assertIsInstance(obj.f1_ln, float)
self.assertIsInstance(obj.f2_ln, float)
self.assertAlmostEqual(obj.f1_ln, math.log(obj.f1))
self.assertAlmostEqual(obj.f2_ln, math.log(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=20, normal=15, big=1)
obj = IntegerModel.objects.annotate(
small_ln=Ln('small'),
normal_ln=Ln('normal'),
big_ln=Ln('big'),
).first()
self.assertIsInstance(obj.small_ln, float)
self.assertIsInstance(obj.normal_ln, float)
self.assertIsInstance(obj.big_ln, float)
self.assertAlmostEqual(obj.small_ln, math.log(obj.small))
self.assertAlmostEqual(obj.normal_ln, math.log(obj.normal))
self.assertAlmostEqual(obj.big_ln, math.log(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Ln):
DecimalModel.objects.create(n1=Decimal('12.0'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('1.0'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__ln__gt=0).get()
self.assertEqual(obj.n1, Decimal('12.0'))
|
45aae6eebe9486f09b9f7dfd1b2b769df9b20f31d1a859ac18f9426aa3c26478 | import os
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, override_settings
from django.utils import translation
@override_settings(
USE_I18N=True,
LOCALE_PATHS=[
os.path.join(os.path.dirname(__file__), 'locale'),
],
LANGUAGE_CODE='en',
LANGUAGES=[
('en', 'English'),
('fr', 'French'),
],
)
class ContentTypeTests(TestCase):
def test_verbose_name(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
with translation.override('en'):
self.assertEqual(str(company_type), 'i18n | Company')
with translation.override('fr'):
self.assertEqual(str(company_type), 'i18n | Société')
|
0c4647fef1969e19e026bdb236f851b6c2d6084ab86f15bcd5b7d5e3821ed24d | from django.urls import path
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
urlpatterns = [
path('foo/', view, name='not-prefixed-included-url'),
]
|
663c5cf90f9de4d48fee1d733944ac3fca6f68cdf30e796720f80464b6c02f79 | from django.conf.urls.i18n import i18n_patterns
from django.urls import include, re_path
from django.utils.translation import gettext_lazy as _
urlpatterns = i18n_patterns(
re_path(_(r'^account/'), include('i18n.patterns.urls.wrong_namespace', namespace='account')),
)
|
7d84a51c1cb6c7d16c20f412b80a155e43ebadc8df501c323b3de69b503504b9 | from django.conf.urls.i18n import i18n_patterns
from django.urls import include, path, re_path
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
urlpatterns = [
path('not-prefixed/', view, name='not-prefixed'),
path('not-prefixed-include/', include('i18n.patterns.urls.included')),
re_path(_(r'^translated/$'), view, name='no-prefix-translated'),
re_path(_(r'^translated/(?P<slug>[\w-]+)/$'), view, name='no-prefix-translated-slug'),
]
urlpatterns += i18n_patterns(
path('prefixed/', view, name='prefixed'),
path('prefixed.xml', view, name='prefixed_xml'),
re_path(_(r'^users/$'), view, name='users'),
re_path(_(r'^account/'), include('i18n.patterns.urls.namespace', namespace='account')),
)
|
926d3d3d710ee26e204e975a7aaa03bff4f4cb29b2004bb1d9fe78ea13bcf27f | from django.urls import re_path
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
urlpatterns = [
re_path('^nl/foo/', view, name='not-translated'),
]
|
7a6ac21ab984114b3b5930c5a1b45e8c929084243190a336bd5f816aa8897830 | from django.conf.urls.i18n import i18n_patterns
from django.urls import re_path
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
app_name = 'account'
urlpatterns = i18n_patterns(
re_path(_(r'^register/$'), view, name='register'),
)
|
e1c6c14b7fcd773ad5d21fc1550a8aae27df99014424ed3e22152470d5a00274 | from django.urls import path, re_path
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
app_name = 'account'
urlpatterns = [
re_path(_(r'^register/$'), view, name='register'),
re_path(_(r'^register-without-slash$'), view, name='register-without-slash'),
path(_('register-as-path/'), view, name='register-as-path'),
]
|
e625fe22329af0c06b8ea6fe93315c005bf5ad04c72e8839a785231aa12a5651 | from django.conf.urls.i18n import i18n_patterns
from django.urls import path
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
urlpatterns = i18n_patterns(
path('prefixed/', view, name='prefixed'),
)
|
f05f2c413b41ad6781a201f5c61798fc5cf3681222b1dbb73619c457f799f505 | """
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.directives import CodeBlock
from sphinx.domains.std import Cmdoption
from sphinx.errors import ExtensionError
from sphinx.util import logging
from sphinx.util.console import bold
from sphinx.writers.html import HTMLTranslator
logger = logging.getLogger(__name__)
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_object_type(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_directive('django-admin-option', Cmdoption)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
app.set_translator('djangohtml', DjangoHTMLTranslator)
app.set_translator('json', DjangoHTMLTranslator)
app.add_node(
ConsoleNode,
html=(visit_console_html, None),
latex=(visit_console_dummy, depart_console_dummy),
man=(visit_console_dummy, depart_console_dummy),
text=(visit_console_dummy, depart_console_dummy),
texinfo=(visit_console_dummy, depart_console_dummy),
)
app.add_directive('console', ConsoleDirective)
app.connect('html-page-context', html_page_context_hook)
return {'parallel_read_safe': True}
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
try:
env.get_domain('changeset').note_changeset(node)
except ExtensionError:
# Sphinx < 1.8: Domain 'changeset' is not registered
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(HTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum(isinstance(c, addnodes.desc_parameter) for c in node.children)
def depart_desc_parameterlist(self, node):
self.body.append(')')
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if node else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
super().visit_section(node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env.ref_context['std:program'] = command
title = "django-admin %s" % sig
signode += addnodes.desc_name(title, title)
return command
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super().finish()
logger.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatetag" and k == "ref/templates/builtins"
],
"tfilters": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatefilter" and k == "ref/templates/builtins"
],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
class ConsoleNode(nodes.literal_block):
"""
Custom node to override the visit/depart event handlers at registration
time. Wrap a literal_block object and defer to it.
"""
tagname = 'ConsoleNode'
def __init__(self, litblk_obj):
self.wrapped = litblk_obj
def __getattr__(self, attr):
if attr == 'wrapped':
return self.__dict__.wrapped
return getattr(self.wrapped, attr)
def visit_console_dummy(self, node):
"""Defer to the corresponding parent's handler."""
self.visit_literal_block(node)
def depart_console_dummy(self, node):
"""Defer to the corresponding parent's handler."""
self.depart_literal_block(node)
def visit_console_html(self, node):
"""Generate HTML for the console directive."""
if self.builder.name in ('djangohtml', 'json') and node['win_console_text']:
# Put a mark on the document object signaling the fact the directive
# has been used on it.
self.document._console_directive_used_flag = True
uid = node['uid']
self.body.append('''\
<div class="console-block" id="console-block-%(id)s">
<input class="c-tab-unix" id="c-tab-%(id)s-unix" type="radio" name="console-%(id)s" checked>
<label for="c-tab-%(id)s-unix" title="Linux/macOS">/</label>
<input class="c-tab-win" id="c-tab-%(id)s-win" type="radio" name="console-%(id)s">
<label for="c-tab-%(id)s-win" title="Windows"></label>
<section class="c-content-unix" id="c-content-%(id)s-unix">\n''' % {'id': uid})
try:
self.visit_literal_block(node)
except nodes.SkipNode:
pass
self.body.append('</section>\n')
self.body.append('<section class="c-content-win" id="c-content-%(id)s-win">\n' % {'id': uid})
win_text = node['win_console_text']
highlight_args = {'force': True}
if 'linenos' in node:
linenos = node['linenos']
else:
linenos = win_text.count('\n') >= self.highlightlinenothreshold - 1
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(
win_text, 'doscon', warn=warner, linenos=linenos, **highlight_args
)
self.body.append(highlighted)
self.body.append('</section>\n')
self.body.append('</div>\n')
raise nodes.SkipNode
else:
self.visit_literal_block(node)
class ConsoleDirective(CodeBlock):
"""
A reStructuredText directive which renders a two-tab code block in which
the second tab shows a Windows command line equivalent of the usual
Unix-oriented examples.
"""
required_arguments = 0
# The 'doscon' Pygments formatter needs a prompt like this. '>' alone
# won't do it because then it simply paints the whole command line as a
# grey comment with no highlighting at all.
WIN_PROMPT = r'...\> '
def run(self):
def args_to_win(cmdline):
changed = False
out = []
for token in cmdline.split():
if token[:2] == './':
token = token[2:]
changed = True
elif token[:2] == '~/':
token = '%HOMEPATH%\\' + token[2:]
changed = True
elif token == 'make':
token = 'make.bat'
changed = True
if '://' not in token and 'git' not in cmdline:
out.append(token.replace('/', '\\'))
changed = True
else:
out.append(token)
if changed:
return ' '.join(out)
return cmdline
def cmdline_to_win(line):
if line.startswith('# '):
return 'REM ' + args_to_win(line[2:])
if line.startswith('$ # '):
return 'REM ' + args_to_win(line[4:])
if line.startswith('$ ./manage.py'):
return 'manage.py ' + args_to_win(line[13:])
if line.startswith('$ manage.py'):
return 'manage.py ' + args_to_win(line[11:])
if line.startswith('$ ./runtests.py'):
return 'runtests.py ' + args_to_win(line[15:])
if line.startswith('$ ./'):
return args_to_win(line[4:])
if line.startswith('$ python3'):
return 'py ' + args_to_win(line[9:])
if line.startswith('$ python'):
return 'py ' + args_to_win(line[8:])
if line.startswith('$ '):
return args_to_win(line[2:])
return None
def code_block_to_win(content):
bchanged = False
lines = []
for line in content:
modline = cmdline_to_win(line)
if modline is None:
lines.append(line)
else:
lines.append(self.WIN_PROMPT + modline)
bchanged = True
if bchanged:
return ViewList(lines)
return None
env = self.state.document.settings.env
self.arguments = ['console']
lit_blk_obj = super().run()[0]
# Only do work when the djangohtml HTML Sphinx builder is being used,
# invoke the default behavior for the rest.
if env.app.builder.name not in ('djangohtml', 'json'):
return [lit_blk_obj]
lit_blk_obj['uid'] = '%s' % env.new_serialno('console')
# Only add the tabbed UI if there is actually a Windows-specific
# version of the CLI example.
win_content = code_block_to_win(self.content)
if win_content is None:
lit_blk_obj['win_console_text'] = None
else:
self.content = win_content
lit_blk_obj['win_console_text'] = super().run()[0].rawsource
# Replace the literal_node object returned by Sphinx's CodeBlock with
# the ConsoleNode wrapper.
return [ConsoleNode(lit_blk_obj)]
def html_page_context_hook(app, pagename, templatename, context, doctree):
# Put a bool on the context used to render the template. It's used to
# control inclusion of console-tabs.css and activation of the JavaScript.
# This way it's include only from HTML files rendered from reST files where
# the ConsoleDirective is used.
context['include_console_assets'] = getattr(doctree, '_console_directive_used_flag', False)
|
492636caa5beaadb4b49181fd84d24b05326a6b7cbd5c2456ff97d9988eba6e5 | import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 5)
# This check and everything above must remain compatible with Python 2.7.
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""
==========================
Unsupported Python version
==========================
This version of Django requires Python {}.{}, but you're trying to
install it on Python {}.{}.
This may be because you are using a version of pip that doesn't
understand the python_requires classifier. Make sure you
have pip >= 9.0 and setuptools >= 24.2, then try again:
$ python -m pip install --upgrade pip setuptools
$ python -m pip install django
This will install the latest version of Django which works on your
version of Python. If you can't upgrade your pip (or Python), request
an older version of Django:
$ python -m pip install "django<2"
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent Django are
# still present in site-packages. See #18115.
overlay_warning = False
if "install" in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "django"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
EXCLUDE_FROM_PACKAGES = ['django.conf.project_template',
'django.conf.app_template',
'django.bin']
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name='Django',
version=version,
python_requires='>={}.{}'.format(*REQUIRED_PYTHON),
url='https://www.djangoproject.com/',
author='Django Software Foundation',
author_email='[email protected]',
description=('A high-level Python Web framework that encourages '
'rapid development and clean, pragmatic design.'),
long_description=read('README.rst'),
license='BSD',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=['django/bin/django-admin.py'],
entry_points={'console_scripts': [
'django-admin = django.core.management:execute_from_command_line',
]},
install_requires=['pytz', 'sqlparse'],
extras_require={
"bcrypt": ["bcrypt"],
"argon2": ["argon2-cffi >= 16.1.0"],
},
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docs.djangoproject.com/',
'Funding': 'https://www.djangoproject.com/fundraising/',
'Source': 'https://github.com/django/django',
'Tracker': 'https://code.djangoproject.com/',
},
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed Django over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
Django. This is known to cause a variety of problems. You
should manually remove the
%(existing_path)s
directory and re-install Django.
""" % {"existing_path": existing_path})
|
69e4521174527d578f146e17e5ba4f7a4e70b49dd6c96933ca0d83810e1724f8 | #!/usr/bin/env python
import argparse
import atexit
import copy
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import warnings
import django
from django.apps import apps
from django.conf import settings
from django.db import connection, connections
from django.test import TestCase, TransactionTestCase
from django.test.runner import default_test_processes
from django.test.selenium import SeleniumTestCaseBase
from django.test.utils import get_runner
from django.utils.deprecation import (
RemovedInDjango31Warning, RemovedInDjango40Warning,
)
from django.utils.log import DEFAULT_LOGGING
try:
import MySQLdb
except ImportError:
pass
else:
# Ignore informational warnings from QuerySet.explain().
warnings.filterwarnings('ignore', r'\(1003, *', category=MySQLdb.Warning)
# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter("error", RemovedInDjango40Warning)
warnings.simplefilter('error', RemovedInDjango31Warning)
# Make runtime warning errors to ensure no usage of error prone patterns.
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io')
RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
# Removing the temporary TMPDIR.
atexit.register(shutil.rmtree, TMPDIR)
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_runner_apps',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and isn't in an application in INSTALLED_APPS."
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [(None, RUNTESTS_DIR)]
if connection.features.gis_enabled:
# GIS tests are in nested apps
discovery_paths.append(('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')))
else:
SUBDIRS_TO_SKIP.append('gis_tests')
for modpath, dirpath in discovery_paths:
for f in os.scandir(dirpath):
if ('.' not in f.name and
os.path.basename(f.name) not in SUBDIRS_TO_SKIP and
not f.is_file() and
os.path.exists(os.path.join(f.path, '__init__.py'))):
modules.append((modpath, f.name))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels, parallel):
# Reduce the given test labels to just the app module path.
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
if verbosity >= 1:
msg = "Testing against Django installed in '%s'" % os.path.dirname(django.__file__)
max_parallel = default_test_processes() if parallel == 0 else parallel
if max_parallel > 1:
msg += " with up to %d processes" % max_parallel
print(msg)
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE': settings.MIDDLEWARE,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE = ALWAYS_MIDDLEWARE
settings.MIGRATION_MODULES = {
# This lets us skip creating migrations for the test models as many of
# them depend on one of the following contrib applications.
'auth': None,
'contenttypes': None,
'sessions': None,
}
log_config = copy.deepcopy(DEFAULT_LOGGING)
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
settings.SILENCED_SYSTEM_CHECKS = [
'fields.W342', # ForeignKey(unique=True) -> OneToOneField
]
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# It would be nice to put this validation earlier but it must come after
# django.setup() so that connection.features.gis_enabled can be accessed
# without raising AppRegistryNotReady when running gis_tests in isolation
# on some backends (e.g. PostGIS).
if 'gis_tests' in test_labels_set and not connection.features.gis_enabled:
print('Aborting: A GIS database backend is required to run gis_tests.')
sys.exit(1)
# Load all the test model apps.
test_modules = get_test_modules()
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = modpath + '.' + module_name
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
module_found_in_labels = not test_labels or any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set
)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
# Discard the multiprocessing.util finalizer that tries to remove a
# temporary directory that's already removed by this script's
# atexit.register(shutil.rmtree, TMPDIR) handler. Prevents
# FileNotFoundError at the end of a test run on Python 3.6+ (#27890).
from multiprocessing.util import _finalizer_registry
_finalizer_registry.pop((-100, 0), None)
def actual_test_processes(parallel):
if parallel == 0:
# This doesn't work before django.setup() on some databases.
if all(conn.features.can_clone_databases for conn in connections.all()):
return default_test_processes()
else:
return 1
else:
return parallel
class ActionSelenium(argparse.Action):
"""
Validate the comma-separated list of requested browsers.
"""
def __call__(self, parser, namespace, values, option_string=None):
browsers = values.split(',')
for browser in browsers:
try:
SeleniumTestCaseBase.import_webdriver(browser)
except ImportError:
raise argparse.ArgumentError(self, "Selenium browser specification '%s' is not valid." % browser)
setattr(namespace, self.dest, browsers)
def django_tests(verbosity, interactive, failfast, keepdb, reverse,
test_labels, debug_sql, parallel, tags, exclude_tags):
state = setup(verbosity, test_labels, parallel)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
parallel=actual_test_processes(parallel),
tags=tags,
exclude_tags=exclude_tags,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures
def get_subprocess_args(options):
subprocess_args = [
sys.executable, __file__, '--settings=%s' % options.settings
]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
if options.tags:
subprocess_args.append('--tag=%s' % options.tags)
if options.exclude_tags:
subprocess_args.append('--exclude_tag=%s' % options.exclude_tags)
return subprocess_args
def bisect_tests(bisection_label, options, test_labels, parallel):
state = setup(options.verbosity, test_labels, parallel)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration += 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration += 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels, parallel):
state = setup(options.verbosity, test_labels, parallel)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Django test suite.")
parser.add_argument(
'modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".',
)
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output',
)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--failfast', action='store_true',
help='Tells Django to stop running the test suite after first failed test.',
)
parser.add_argument(
'-k', '--keepdb', action='store_true',
help='Tells Django to preserve the test database between runs.',
)
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.',
)
parser.add_argument(
'--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.',
)
parser.add_argument(
'--pair',
help='Run the test suite in pairs with the named test to find problem pairs.',
)
parser.add_argument(
'--reverse', action='store_true',
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.',
)
parser.add_argument(
'--selenium', action=ActionSelenium, metavar='BROWSERS',
help='A comma-separated list of browsers to run the Selenium tests against.',
)
parser.add_argument(
'--selenium-hub',
help='A URL for a selenium hub instance to use in combination with --selenium.',
)
parser.add_argument(
'--external-host', default=socket.gethostname(),
help='The external host that can be reached by the selenium hub instance when running Selenium '
'tests via Selenium Hub.',
)
parser.add_argument(
'--debug-sql', action='store_true',
help='Turn on the SQL query logger within tests.',
)
parser.add_argument(
'--parallel', nargs='?', default=0, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', dest='tags', action='append',
help='Run only tests with the specified tags. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', dest='exclude_tags', action='append',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
options = parser.parse_args()
using_selenium_hub = options.selenium and options.selenium_hub
if options.selenium_hub and not options.selenium:
parser.error('--selenium-hub and --external-host require --selenium to be used.')
if using_selenium_hub and not options.external_host:
parser.error('--selenium-hub and --external-host must be used together.')
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_sqlite')
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.selenium:
if not options.tags:
options.tags = ['selenium']
elif 'selenium' not in options.tags:
options.tags.append('selenium')
if options.selenium_hub:
SeleniumTestCaseBase.selenium_hub = options.selenium_hub
SeleniumTestCaseBase.external_host = options.external_host
SeleniumTestCaseBase.browsers = options.selenium
if options.bisect:
bisect_tests(options.bisect, options, options.modules, options.parallel)
elif options.pair:
paired_tests(options.pair, options, options.modules, options.parallel)
else:
failures = django_tests(
options.verbosity, options.interactive, options.failfast,
options.keepdb, options.reverse, options.modules,
options.debug_sql, options.parallel, options.tags,
options.exclude_tags,
)
if failures:
sys.exit(1)
|
d0c8aedbc25ac531881f2a7de1105d419a463068bd7f9467122a9338c8bbac0d | # Django documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 27 09:06:53 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't picklable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from os.path import abspath, dirname, join
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000 but this isn't enough for
# building docs/ref/settings.txt sometimes.
# https://groups.google.com/d/topic/sphinx-dev/MtRf64eGtv4/discussion
sys.setrecursionlimit(2000)
# Make sure we get the version of this copy of Django
sys.path.insert(1, dirname(dirname(abspath(__file__))))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(abspath(join(dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.6.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"djangodocs",
'sphinx.ext.extlinks',
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
extlinks = {
'cve': ('https://nvd.nist.gov/view/vuln/detail?vulnId=%s', 'CVE-'),
'ticket': ('https://code.djangoproject.com/ticket/%s', '#'),
}
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append("sphinxcontrib.spelling")
# Spelling language.
spelling_lang = 'en_US'
# Location of word list.
spelling_word_list_filename = 'spelling_wordlist'
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Django'
copyright = 'Django Software Foundation and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0'
# The full version, including alpha/beta/rc tags.
try:
from django import VERSION, get_version
except ImportError:
release = version
else:
def django_release():
pep440ver = get_version()
if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep440ver:
return pep440ver + '.dev'
return pep440ver
release = django_release()
# The "development version" of Django
django_next_version = '3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# Location for .po/.mo translation files used when language is set
locale_dirs = ['locale/']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_theme']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Links to Python's docs should reference the most recent version of the 3.x
# branch, which is located at this URL.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('http://www.sphinx-doc.org/en/master/', None),
'psycopg2': ('http://initd.org/psycopg/docs/', None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
# The 'versionadded' and 'versionchanged' directives are overridden.
suppress_warnings = ['app.add_directive']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Content template for the index page.
# html_index = ''
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Djangodoc'
modindex_common_prefix = ["django."]
# Appended to every page
rst_epilog = """
.. |django-users| replace:: :ref:`django-users <django-users-mailing-list>`
.. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>`
.. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>`
.. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>`
.. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>`
"""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'preamble': (
'\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}'
'\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}'
'\\DeclareUnicodeCharacter{2665}{[unicode-heart]}'
'\\DeclareUnicodeCharacter{2713}{[unicode-checkmark]}'
),
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
# latex_documents = []
latex_documents = [
('contents', 'django.tex', 'Django Documentation',
'Django Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(
'ref/django-admin',
'django-admin',
'Utility script for the Django Web framework',
['Django Software Foundation'],
1
)]
# -- Options for Texinfo output ------------------------------------------------
# List of tuples (startdocname, targetname, title, author, dir_entry,
# description, category, toctree_only)
texinfo_documents = [(
master_doc, "django", "", "", "Django",
"Documentation of the Django framework", "Web development", False
)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = 'Django Software Foundation'
epub_publisher = 'Django Software Foundation'
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'Django'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = 'djangodocs-epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ('', 'epub-cover.html')
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
|
1e98482108b1b0fdee516a845db53ec2091049e2c4db7efdb346d63a995e49b8 | import sys
import unittest
from contextlib import contextmanager
from django.test import LiveServerTestCase, tag
from django.utils.decorators import classproperty
from django.utils.module_loading import import_string
from django.utils.text import capfirst
class SeleniumTestCaseBase(type(LiveServerTestCase)):
# List of browsers to dynamically create test classes for.
browsers = []
# A selenium hub URL to test against.
selenium_hub = None
# The external host Selenium Hub can reach.
external_host = None
# Sentinel value to differentiate browser-specific instances.
browser = None
def __new__(cls, name, bases, attrs):
"""
Dynamically create new classes and add them to the test module when
multiple browsers specs are provided (e.g. --selenium=firefox,chrome).
"""
test_class = super().__new__(cls, name, bases, attrs)
# If the test class is either browser-specific or a test base, return it.
if test_class.browser or not any(name.startswith('test') and callable(value) for name, value in attrs.items()):
return test_class
elif test_class.browsers:
# Reuse the created test class to make it browser-specific.
# We can't rename it to include the browser name or create a
# subclass like we do with the remaining browsers as it would
# either duplicate tests or prevent pickling of its instances.
first_browser = test_class.browsers[0]
test_class.browser = first_browser
# Listen on an external interface if using a selenium hub.
host = test_class.host if not test_class.selenium_hub else '0.0.0.0'
test_class.host = host
test_class.external_host = cls.external_host
# Create subclasses for each of the remaining browsers and expose
# them through the test's module namespace.
module = sys.modules[test_class.__module__]
for browser in test_class.browsers[1:]:
browser_test_class = cls.__new__(
cls,
"%s%s" % (capfirst(browser), name),
(test_class,),
{
'browser': browser,
'host': host,
'external_host': cls.external_host,
'__module__': test_class.__module__,
}
)
setattr(module, browser_test_class.__name__, browser_test_class)
return test_class
# If no browsers were specified, skip this class (it'll still be discovered).
return unittest.skip('No browsers specified.')(test_class)
@classmethod
def import_webdriver(cls, browser):
return import_string("selenium.webdriver.%s.webdriver.WebDriver" % browser)
@classmethod
def get_capability(cls, browser):
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
return getattr(DesiredCapabilities, browser.upper())
def create_webdriver(self):
if self.selenium_hub:
from selenium import webdriver
return webdriver.Remote(
command_executor=self.selenium_hub,
desired_capabilities=self.get_capability(self.browser),
)
return self.import_webdriver(self.browser)()
@tag('selenium')
class SeleniumTestCase(LiveServerTestCase, metaclass=SeleniumTestCaseBase):
implicit_wait = 10
external_host = None
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (cls.external_host or cls.host, cls.server_thread.port)
@classproperty
def allowed_host(cls):
return cls.external_host or cls.host
@classmethod
def setUpClass(cls):
cls.selenium = cls.create_webdriver()
cls.selenium.implicitly_wait(cls.implicit_wait)
super().setUpClass()
@classmethod
def _tearDownClassInternal(cls):
# quit() the WebDriver before attempting to terminate and join the
# single-threaded LiveServerThread to avoid a dead lock if the browser
# kept a connection alive.
if hasattr(cls, 'selenium'):
cls.selenium.quit()
super()._tearDownClassInternal()
@contextmanager
def disable_implicit_wait(self):
"""Disable the default implicit wait."""
self.selenium.implicitly_wait(0)
try:
yield
finally:
self.selenium.implicitly_wait(self.implicit_wait)
|
1ca63cb1463db8c726820f5fa5a7c25eee504f5eaa494e59476f8c91276f1eb7 | import ctypes
import itertools
import logging
import multiprocessing
import os
import pickle
import textwrap
import unittest
from importlib import import_module
from io import StringIO
from django.core.management import call_command
from django.db import connections
from django.test import SimpleTestCase, TestCase
from django.test.utils import (
setup_databases as _setup_databases, setup_test_environment,
teardown_databases as _teardown_databases, teardown_test_environment,
)
from django.utils.datastructures import OrderedSet
try:
import tblib.pickling_support
except ImportError:
tblib = None
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger('django.db.backends')
self.logger.setLevel(logging.DEBUG)
super().__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super().startTest(test)
def stopTest(self, test):
super().stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super().addError(test, err)
self.debug_sql_stream.seek(0)
self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),)
def addFailure(self, test, err):
super().addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def addSubTest(self, test, subtest, err):
super().addSubTest(test, subtest, err)
if err is not None:
self.debug_sql_stream.seek(0)
errors = self.failures if issubclass(err[0], test.failureException) else self.errors
errors[-1] = errors[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
self.stream.writeln(self.separator2)
self.stream.writeln(sql_debug)
class RemoteTestResult:
"""
Record information about which tests have succeeded and which have failed.
The sole purpose of this class is to record events in the child processes
so they can be replayed in the master process. As a consequence it doesn't
inherit unittest.TestResult and doesn't attempt to implement all its API.
The implementation matches the unpythonic coding style of unittest2.
"""
def __init__(self):
if tblib is not None:
tblib.pickling_support.install()
self.events = []
self.failfast = False
self.shouldStop = False
self.testsRun = 0
@property
def test_index(self):
return self.testsRun - 1
def _confirm_picklable(self, obj):
"""
Confirm that obj can be pickled and unpickled as multiprocessing will
need to pickle the exception in the child process and unpickle it in
the parent process. Let the exception rise, if not.
"""
pickle.loads(pickle.dumps(obj))
def _print_unpicklable_subtest(self, test, subtest, pickle_exc):
print("""
Subtest failed:
test: {}
subtest: {}
Unfortunately, the subtest that failed cannot be pickled, so the parallel
test runner cannot handle it cleanly. Here is the pickling error:
> {}
You should re-run this test with --parallel=1 to reproduce the failure
with a cleaner failure message.
""".format(test, subtest, pickle_exc))
def check_picklable(self, test, err):
# Ensure that sys.exc_info() tuples are picklable. This displays a
# clear multiprocessing.pool.RemoteTraceback generated in the child
# process instead of a multiprocessing.pool.MaybeEncodingError, making
# the root cause easier to figure out for users who aren't familiar
# with the multiprocessing module. Since we're in a forked process,
# our best chance to communicate with them is to print to stdout.
try:
self._confirm_picklable(err)
except Exception as exc:
original_exc_txt = repr(err[1])
original_exc_txt = textwrap.fill(original_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
pickle_exc_txt = repr(exc)
pickle_exc_txt = textwrap.fill(pickle_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
if tblib is None:
print("""
{} failed:
{}
Unfortunately, tracebacks cannot be pickled, making it impossible for the
parallel test runner to handle this exception cleanly.
In order to see the traceback, you should install tblib:
pip install tblib
""".format(test, original_exc_txt))
else:
print("""
{} failed:
{}
Unfortunately, the exception it raised cannot be pickled, making it impossible
for the parallel test runner to handle it cleanly.
Here's the error encountered while trying to pickle the exception:
{}
You should re-run this test with the --parallel=1 option to reproduce the
failure and get a correct traceback.
""".format(test, original_exc_txt, pickle_exc_txt))
raise
def check_subtest_picklable(self, test, subtest):
try:
self._confirm_picklable(subtest)
except Exception as exc:
self._print_unpicklable_subtest(test, subtest, exc)
raise
def stop_if_failfast(self):
if self.failfast:
self.stop()
def stop(self):
self.shouldStop = True
def startTestRun(self):
self.events.append(('startTestRun',))
def stopTestRun(self):
self.events.append(('stopTestRun',))
def startTest(self, test):
self.testsRun += 1
self.events.append(('startTest', self.test_index))
def stopTest(self, test):
self.events.append(('stopTest', self.test_index))
def addError(self, test, err):
self.check_picklable(test, err)
self.events.append(('addError', self.test_index, err))
self.stop_if_failfast()
def addFailure(self, test, err):
self.check_picklable(test, err)
self.events.append(('addFailure', self.test_index, err))
self.stop_if_failfast()
def addSubTest(self, test, subtest, err):
# Follow Python 3.5's implementation of unittest.TestResult.addSubTest()
# by not doing anything when a subtest is successful.
if err is not None:
# Call check_picklable() before check_subtest_picklable() since
# check_picklable() performs the tblib check.
self.check_picklable(test, err)
self.check_subtest_picklable(test, subtest)
self.events.append(('addSubTest', self.test_index, subtest, err))
self.stop_if_failfast()
def addSuccess(self, test):
self.events.append(('addSuccess', self.test_index))
def addSkip(self, test, reason):
self.events.append(('addSkip', self.test_index, reason))
def addExpectedFailure(self, test, err):
# If tblib isn't installed, pickling the traceback will always fail.
# However we don't want tblib to be required for running the tests
# when they pass or fail as expected. Drop the traceback when an
# expected failure occurs.
if tblib is None:
err = err[0], err[1], None
self.check_picklable(test, err)
self.events.append(('addExpectedFailure', self.test_index, err))
def addUnexpectedSuccess(self, test):
self.events.append(('addUnexpectedSuccess', self.test_index))
self.stop_if_failfast()
class RemoteTestRunner:
"""
Run tests and record everything but don't display anything.
The implementation matches the unpythonic coding style of unittest2.
"""
resultclass = RemoteTestResult
def __init__(self, failfast=False, resultclass=None):
self.failfast = failfast
if resultclass is not None:
self.resultclass = resultclass
def run(self, test):
result = self.resultclass()
unittest.registerResult(result)
result.failfast = self.failfast
test(result)
return result
def default_test_processes():
"""Default number of test processes when using the --parallel option."""
# The current implementation of the parallel test runner requires
# multiprocessing to start subprocesses with fork().
if multiprocessing.get_start_method() != 'fork':
return 1
try:
return int(os.environ['DJANGO_TEST_PROCESSES'])
except KeyError:
return multiprocessing.cpu_count()
_worker_id = 0
def _init_worker(counter):
"""
Switch to databases dedicated to this worker.
This helper lives at module-level because of the multiprocessing module's
requirements.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
for alias in connections:
connection = connections[alias]
settings_dict = connection.creation.get_test_db_clone_settings(str(_worker_id))
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close()
def _run_subsuite(args):
"""
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult.
This helper lives at module-level and its arguments are wrapped in a tuple
because of the multiprocessing module's requirements.
"""
runner_class, subsuite_index, subsuite, failfast = args
runner = runner_class(failfast=failfast)
result = runner.run(subsuite)
return subsuite_index, result.events
class ParallelTestSuite(unittest.TestSuite):
"""
Run a series of tests in parallel in several processes.
While the unittest module's documentation implies that orchestrating the
execution of tests is the responsibility of the test runner, in practice,
it appears that TestRunner classes are more concerned with formatting and
displaying test results.
Since there are fewer use cases for customizing TestSuite than TestRunner,
implementing parallelization at the level of the TestSuite improves
interoperability with existing custom test runners. A single instance of a
test runner can still collect results from all tests without being aware
that they have been run in parallel.
"""
# In case someone wants to modify these in a subclass.
init_worker = _init_worker
run_subsuite = _run_subsuite
runner_class = RemoteTestRunner
def __init__(self, suite, processes, failfast=False):
self.subsuites = partition_suite_by_case(suite)
self.processes = processes
self.failfast = failfast
super().__init__()
def run(self, result):
"""
Distribute test cases across workers.
Return an identifier of each test case with its result in order to use
imap_unordered to show results as soon as they're available.
To minimize pickling errors when getting results from workers:
- pass back numeric indexes in self.subsuites instead of tests
- make tracebacks picklable with tblib, if available
Even with tblib, errors may still occur for dynamically created
exception classes which cannot be unpickled.
"""
counter = multiprocessing.Value(ctypes.c_int, 0)
pool = multiprocessing.Pool(
processes=self.processes,
initializer=self.init_worker.__func__,
initargs=[counter],
)
args = [
(self.runner_class, index, subsuite, self.failfast)
for index, subsuite in enumerate(self.subsuites)
]
test_results = pool.imap_unordered(self.run_subsuite.__func__, args)
while True:
if result.shouldStop:
pool.terminate()
break
try:
subsuite_index, events = test_results.next(timeout=0.1)
except multiprocessing.TimeoutError:
continue
except StopIteration:
pool.close()
break
tests = list(self.subsuites[subsuite_index])
for event in events:
event_name = event[0]
handler = getattr(result, event_name, None)
if handler is None:
continue
test = tests[event[1]]
args = event[2:]
handler(test, *args)
pool.join()
return result
def __iter__(self):
return iter(self.subsuites)
class DiscoverRunner:
"""A Django test runner that uses unittest2 test discovery."""
test_suite = unittest.TestSuite
parallel_test_suite = ParallelTestSuite
test_runner = unittest.TextTestRunner
test_loader = unittest.defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(self, pattern=None, top_level=None, verbosity=1,
interactive=True, failfast=False, keepdb=False,
reverse=False, debug_mode=False, debug_sql=False, parallel=0,
tags=None, exclude_tags=None, **kwargs):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_mode = debug_mode
self.debug_sql = debug_sql
self.parallel = parallel
self.tags = set(tags or [])
self.exclude_tags = set(exclude_tags or [])
@classmethod
def add_arguments(cls, parser):
parser.add_argument(
'-t', '--top-level-directory', dest='top_level',
help='Top level of project for unittest discovery.',
)
parser.add_argument(
'-p', '--pattern', default="test*.py",
help='The test matching pattern. Defaults to test*.py.',
)
parser.add_argument(
'-k', '--keepdb', action='store_true',
help='Preserves the test DB between runs.'
)
parser.add_argument(
'-r', '--reverse', action='store_true',
help='Reverses test cases order.',
)
parser.add_argument(
'--debug-mode', action='store_true',
help='Sets settings.DEBUG to True.',
)
parser.add_argument(
'-d', '--debug-sql', action='store_true',
help='Prints logged SQL queries on failure.',
)
parser.add_argument(
'--parallel', nargs='?', default=1, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', action='append', dest='tags',
help='Run only tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', action='append', dest='exclude_tags',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
def setup_test_environment(self, **kwargs):
setup_test_environment(debug=self.debug_mode)
unittest.installHandler()
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
suite = self.test_suite()
test_labels = test_labels or ['.']
extra_tests = extra_tests or []
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
if self.tags or self.exclude_tags:
if self.verbosity >= 2:
if self.tags:
print('Including test tag(s): %s.' % ', '.join(sorted(self.tags)))
if self.exclude_tags:
print('Excluding test tag(s): %s.' % ', '.join(sorted(self.exclude_tags)))
suite = filter_tests_by_tags(suite, self.tags, self.exclude_tags)
suite = reorder_suite(suite, self.reorder_by, self.reverse)
if self.parallel > 1:
parallel_suite = self.parallel_test_suite(suite, self.parallel, self.failfast)
# Since tests are distributed across processes on a per-TestCase
# basis, there's no need for more processes than TestCases.
parallel_units = len(parallel_suite.subsuites)
self.parallel = min(self.parallel, parallel_units)
# If there's only one TestCase, parallelization isn't needed.
if self.parallel > 1:
suite = parallel_suite
return suite
def setup_databases(self, **kwargs):
return _setup_databases(
self.verbosity, self.interactive, self.keepdb, self.debug_sql,
self.parallel, **kwargs
)
def get_resultclass(self):
return DebugSQLTextTestResult if self.debug_sql else None
def get_test_runner_kwargs(self):
return {
'failfast': self.failfast,
'resultclass': self.get_resultclass(),
'verbosity': self.verbosity,
}
def run_checks(self):
# Checks are run after database creation since some checks require
# database access.
call_command('check', verbosity=self.verbosity)
def run_suite(self, suite, **kwargs):
kwargs = self.get_test_runner_kwargs()
runner = self.test_runner(**kwargs)
return runner.run(suite)
def teardown_databases(self, old_config, **kwargs):
"""Destroy all the non-mirror databases."""
_teardown_databases(
old_config,
verbosity=self.verbosity,
parallel=self.parallel,
keepdb=self.keepdb,
)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def _get_databases(self, suite):
databases = set()
for test in suite:
if isinstance(test, unittest.TestCase):
test_databases = getattr(test, 'databases', None)
if test_databases == '__all__':
return set(connections)
if test_databases:
databases.update(test_databases)
else:
databases.update(self._get_databases(test))
return databases
def get_databases(self, suite):
databases = self._get_databases(suite)
if self.verbosity >= 2:
unused_databases = [alias for alias in connections if alias not in databases]
if unused_databases:
print('Skipping setup of unused database(s): %s.' % ', '.join(sorted(unused_databases)))
return databases
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Return the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
databases = self.get_databases(suite)
old_config = self.setup_databases(aliases=databases)
run_failed = False
try:
self.run_checks()
result = self.run_suite(suite)
except Exception:
run_failed = True
raise
finally:
try:
self.teardown_databases(old_config)
self.teardown_test_environment()
except Exception:
# Silence teardown exceptions if an exception was raised during
# runs to avoid shadowing it.
if not run_failed:
raise
return self.suite_result(suite, result)
def is_discoverable(label):
"""
Check if a test label points to a Python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
def reorder_suite(suite, classes, reverse=False):
"""
Reorder a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, sort tests within classes in opposite order but
don't reverse test classes.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [OrderedSet() for i in range(class_count + 1)]
partition_suite_by_type(suite, classes, bins, reverse=reverse)
reordered_suite = suite_class()
for i in range(class_count + 1):
reordered_suite.addTests(bins[i])
return reordered_suite
def partition_suite_by_type(suite, classes, bins, reverse=False):
"""
Partition a test suite by test type. Also prevent duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
if reverse:
suite = reversed(tuple(suite))
for test in suite:
if isinstance(test, suite_class):
partition_suite_by_type(test, classes, bins, reverse=reverse)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].add(test)
break
else:
bins[-1].add(test)
def partition_suite_by_case(suite):
"""Partition a test suite by test case, preserving the order of tests."""
groups = []
suite_class = type(suite)
for test_type, test_group in itertools.groupby(suite, type):
if issubclass(test_type, unittest.TestCase):
groups.append(suite_class(test_group))
else:
for item in test_group:
groups.extend(partition_suite_by_case(item))
return groups
def filter_tests_by_tags(suite, tags, exclude_tags):
suite_class = type(suite)
filtered_suite = suite_class()
for test in suite:
if isinstance(test, suite_class):
filtered_suite.addTests(filter_tests_by_tags(test, tags, exclude_tags))
else:
test_tags = set(getattr(test, 'tags', set()))
test_fn_name = getattr(test, '_testMethodName', str(test))
test_fn = getattr(test, test_fn_name, test)
test_fn_tags = set(getattr(test_fn, 'tags', set()))
all_tags = test_tags.union(test_fn_tags)
matched_tags = all_tags.intersection(tags)
if (matched_tags or not tags) and not all_tags.intersection(exclude_tags):
filtered_suite.addTest(test)
return filtered_suite
|
8820e26a234b28907f7dc64b0e4e2ceea945834779f124a3eb63865cdf85d16f | import json
import mimetypes
import os
import re
import sys
from copy import copy
from functools import partial
from http import HTTPStatus
from importlib import import_module
from io import BytesIO
from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile(r'.*; charset=([\w\d-]+);?')
# Structured suffix spec: https://tools.ietf.org/html/rfc6838#section-4.2.8
JSON_CONTENT_TYPE_RE = re.compile(r'^application\/(.+\+)?json')
class RedirectCycleError(Exception):
"""The test client has been asked to follow a redirect loop."""
def __init__(self, message, last_response):
super().__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload:
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in real life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
yield from iterable
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
def conditional_content_removal(request, response):
"""
Simulate the behavior of most Web servers by removing the content of
responses for HEAD requests, 1xx, 204, and 304 responses. Ensure
compliance with RFC 7230, section 3.3.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Use the WSGI
interface to compose requests, but return the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Simulate behaviors of most Web servers.
conditional_content_removal(request, response)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# Emulate a WSGI server by calling the close method on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Store templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
if 'context' not in store:
store['context'] = ContextList()
store['context'].append(copy(context))
def encode_multipart(boundary, data):
"""
Encode multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if value is None:
raise TypeError(
'Cannot encode None as POST data. Did you mean to pass an '
'empty string or omit the value?'
)
elif is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, str) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# file.name might not be a string. For example, it's an int for
# tempfile.TemporaryFile().
file_has_string_name = hasattr(file, 'name') and isinstance(file.name, str)
filename = os.path.basename(file.name) if file_has_string_name else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
filename = filename or key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory:
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, *, json_encoder=DjangoJSONEncoder, **defaults):
self.json_encoder = json_encoder
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See https://www.python.org/dev/peps/pep-3333/#environ-variables
return {
'HTTP_COOKIE': '; '.join(sorted(
'%s=%s' % (morsel.key, morsel.coded_value)
for morsel in self.cookies.values()
)),
'PATH_INFO': '/',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
**self.defaults,
**request,
}
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _encode_json(self, data, content_type):
"""
Return encoded JSON if data is a dict, list, or tuple and content_type
is application/json.
"""
should_encode = JSON_CONTENT_TYPE_RE.match(content_type) and isinstance(data, (dict, list, tuple))
return json.dumps(data, cls=self.json_encoder) if should_encode else data
def _get_path(self, parsed):
path = parsed.path
# If there are parameters, add them
if parsed.params:
path += ";" + parsed.params
path = unquote_to_bytes(path)
# Replace the behavior where non-ASCII values in the WSGI environ are
# arbitrarily decoded with ISO-8859-1.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode('iso-8859-1')
def get(self, path, data=None, secure=False, **extra):
"""Construct a GET request."""
data = {} if data is None else data
return self.generic('GET', path, secure=secure, **{
'QUERY_STRING': urlencode(data, doseq=True),
**extra,
})
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"""Construct a POST request."""
data = self._encode_json({} if data is None else data, content_type)
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"""Construct a HEAD request."""
data = {} if data is None else data
return self.generic('HEAD', path, secure=secure, **{
'QUERY_STRING': urlencode(data, doseq=True),
**extra,
})
def trace(self, path, secure=False, **extra):
"""Construct a TRACE request."""
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PUT request."""
data = self._encode_json(data, content_type)
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PATCH request."""
data = self._encode_json(data, content_type)
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a DELETE request."""
data = self._encode_json(data, content_type)
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Construct an arbitrary HTTP request."""
parsed = urlparse(str(path)) # path can be lazy
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': method,
'SERVER_PORT': '443' if secure else '80',
'wsgi.url_scheme': 'https' if secure else 'http',
}
if data:
r.update({
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': content_type,
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
# WSGI requires latin-1 encoded strings. See get_path_info().
query_string = parsed[4].encode().decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super().__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""Store exceptions when they are generated by a view."""
self.exc_info = sys.exc_info()
@property
def session(self):
"""Return the current session variables."""
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
def request(self, **request):
"""
The master request method. Compose the environment dictionary and pass
to the handler, return the result of the handler. Assume defaults for
the query environment, which can be overridden using the arguments to
the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = partial(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
_, exc_value, _ = self.exc_info
self.exc_info = None
raise exc_value
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = partial(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(lambda: resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using GET."""
response = super().get(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""Request a response from the server using POST."""
response = super().post(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using HEAD."""
response = super().head(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Request a response from the server using OPTIONS."""
response = super().options(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PUT."""
response = super().put(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PATCH."""
response = super().patch(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a DELETE request to the server."""
response = super().delete(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""Send a TRACE request to the server."""
response = super().trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def login(self, **credentials):
"""
Set the Factory to appear as if it has successfully logged into a site.
Return True if login is possible; False if the provided credentials
are incorrect.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if user:
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
def get_backend():
from django.contrib.auth import load_backend
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
if hasattr(backend, 'get_user'):
return backend_path
if backend is None:
backend = get_backend()
user.backend = backend
self._login(user, backend)
def _login(self, user, backend=None):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user, backend)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""Log out the user by removing the cookies and session object."""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if not hasattr(response, '_json'):
if not JSON_CONTENT_TYPE_RE.match(response.get('Content-Type')):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
response._json = json.loads(response.content.decode(), **extra)
return response._json
def _handle_redirects(self, response, data='', content_type='', **extra):
"""
Follow any redirects by requesting responses from the server using GET.
"""
response.redirect_chain = []
redirect_status_codes = (
HTTPStatus.MOVED_PERMANENTLY,
HTTPStatus.FOUND,
HTTPStatus.SEE_OTHER,
HTTPStatus.TEMPORARY_REDIRECT,
HTTPStatus.PERMANENT_REDIRECT,
)
while response.status_code in redirect_status_codes:
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
# Prepend the request path to handle relative path redirects
path = url.path
if not path.startswith('/'):
path = urljoin(response.request['PATH_INFO'], path)
if response.status_code in (HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT):
# Preserve request method post-redirect for 307/308 responses.
request_method = getattr(self, response.request['REQUEST_METHOD'].lower())
else:
request_method = self.get
data = QueryDict(url.query)
content_type = None
response = request_method(path, data=data, content_type=content_type, follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
|
9843f306fb99d9876d41c1d5a617b9d438698e36e9c47bc655cafd88cdc51356 | import difflib
import json
import posixpath
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy
from difflib import get_close_matches
from functools import wraps
from unittest.util import safe_repr
from urllib.parse import (
parse_qsl, unquote, urlencode, urljoin, urlparse, urlsplit, urlunparse,
)
from urllib.request import url2pathname
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils.decorators import classproperty
from django.utils.deprecation import RemovedInDjango31Warning
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Put value into a list if it's not already one. Return an empty list if
value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super().__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
'%d. %s' % (i, query['sql']) for i, query in enumerate(self.captured_queries, start=1)
)
)
)
class _AssertTemplateUsedContext:
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if self.rendered_templates:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names)
)
else:
message += ' No template was rendered.'
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _DatabaseFailure:
def __init__(self, wrapped, message):
self.wrapped = wrapped
self.message = message
def __call__(self):
raise AssertionError(self.message)
class _SimpleTestCaseDatabasesDescriptor:
"""Descriptor for SimpleTestCase.allow_database_queries deprecation."""
def __get__(self, instance, cls=None):
try:
allow_database_queries = cls.allow_database_queries
except AttributeError:
pass
else:
msg = (
'`SimpleTestCase.allow_database_queries` is deprecated. '
'Restrict the databases available during the execution of '
'%s.%s with the `databases` attribute instead.'
) % (cls.__module__, cls.__qualname__)
warnings.warn(msg, RemovedInDjango31Warning)
if allow_database_queries:
return {DEFAULT_DB_ALIAS}
return set()
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
databases = _SimpleTestCaseDatabasesDescriptor()
_disallowed_database_msg = (
'Database %(operation)s to %(alias)r are not allowed in SimpleTestCase '
'subclasses. Either subclass TestCase or TransactionTestCase to ensure '
'proper test isolation or add %(alias)r to %(test)s.databases to silence '
'this failure.'
)
_disallowed_connection_methods = [
('connect', 'connections'),
('temporary_connection', 'connections'),
('cursor', 'queries'),
('chunked_cursor', 'queries'),
]
@classmethod
def setUpClass(cls):
super().setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
cls._add_databases_failures()
@classmethod
def _validate_databases(cls):
if cls.databases == '__all__':
return frozenset(connections)
for alias in cls.databases:
if alias not in connections:
message = '%s.%s.databases refers to %r which is not defined in settings.DATABASES.' % (
cls.__module__,
cls.__qualname__,
alias,
)
close_matches = get_close_matches(alias, list(connections))
if close_matches:
message += ' Did you mean %r?' % close_matches[0]
raise ImproperlyConfigured(message)
return frozenset(cls.databases)
@classmethod
def _add_databases_failures(cls):
cls.databases = cls._validate_databases()
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, operation in cls._disallowed_connection_methods:
message = cls._disallowed_database_msg % {
'test': '%s.%s' % (cls.__module__, cls.__qualname__),
'alias': alias,
'operation': operation,
}
method = getattr(connection, name)
setattr(connection, name, _DatabaseFailure(method, message))
@classmethod
def _remove_databases_failures(cls):
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, _ in cls._disallowed_connection_methods:
method = getattr(connection, name)
setattr(connection, name, method.wrapped)
@classmethod
def tearDownClass(cls):
cls._remove_databases_failures()
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super().tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (
getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)
)
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super().__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""
Perform pre-test setup:
* Create a test client.
* Clear the mail test outbox.
"""
self.client = self.client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the
original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, msg_prefix='',
fetch_redirect_response=True):
"""
Assert that a response redirected to a specific URL and that the
redirect URL can be loaded.
Won't work for external links since it uses the test client to do a
request (use fetch_redirect_response=False to check such links without
fetching them).
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(
response.redirect_chain,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
self.assertEqual(
response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)"
% (response.redirect_chain[0][1], status_code)
)
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(
response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)"
% (response.status_code, target_status_code)
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith('/'):
url = urljoin(response.request['PATH_INFO'], url)
path = urljoin(response.request['PATH_INFO'], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
redirect_response = response.client.get(path, QueryDict(query), secure=(scheme == 'https'))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(
redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)"
% (path, redirect_response.status_code, target_status_code)
)
self.assertURLEqual(
url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)
)
def assertURLEqual(self, url1, url2, msg_prefix=''):
"""
Assert that two URLs are the same, ignoring the order of query string
parameters except for parameters with the same name.
For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but
/path/?a=1&a=2 isn't equal to /path/?a=2&a=1.
"""
def normalize(url):
"""Sort the URL's query string parameters."""
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse((scheme, netloc, path, params, urlencode(query_parts), fragment))
self.assertEqual(
normalize(url1), normalize(url2),
msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2)
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if hasattr(response, 'render') and callable(response.render) and not response.is_rendered:
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code)
)
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = str(text)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Assert that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors))
)
elif field in context[form].fields:
self.fail(
msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" %
(field, form, i)
)
else:
self.fail(
msg_prefix + "The form '%s' in context %d does not contain the field '%s'" %
(form, i, field)
)
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(
err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors or 'none')
)
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Assert that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err, repr(field_errors))
)
elif field in context[formset].forms[form_index].fields:
self.fail(
msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors"
% (field, formset, form_index, i)
)
else:
self.fail(
msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'"
% (formset, form_index, i, field)
)
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(
not non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain any non-field errors." % (formset, form_index, i)
)
self.assertTrue(
err in non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain the non-field error '%s' (actual errors: %s)"
% (formset, form_index, i, err, repr(non_field_errors))
)
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(
not non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain any non-form errors." % (formset, i)
)
self.assertTrue(
err in non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain the non-form error '%s' (actual errors: %s)"
% (formset, i, err, repr(non_form_errors))
)
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Assert that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ', '.join(template_names))
)
if count is not None:
self.assertEqual(
template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Assert that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name
)
@contextmanager
def _assert_raises_or_warns_cm(self, func, cm_attr, expected_exception, expected_message):
with func(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(getattr(cm, cm_attr)))
def _assertFooMessage(self, func, cm_attr, expected_exception, expected_message, *args, **kwargs):
callable_obj = None
if args:
callable_obj, *args = args
cm = self._assert_raises_or_warns_cm(func, cm_attr, expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Assert that expected_message is found in the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
return self._assertFooMessage(
self.assertRaises, 'exception', expected_exception, expected_message,
*args, **kwargs
)
def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs):
"""
Same as assertRaisesMessage but for assertWarns() instead of
assertRaises().
"""
return self._assertFooMessage(
self.assertWarns, 'warning', expected_warning, expected_message,
*args, **kwargs
)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Assert that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **{**field_kwargs, 'required': False})
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [required.error_messages['required']]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Assert that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
str(dom1).splitlines(), str(dom2).splitlines(),
)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Assert that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except json.JSONDecodeError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are semantically the same.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(xml1.splitlines(), xml2.splitlines())
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class _TransactionTestCaseDatabasesDescriptor:
"""Descriptor for TransactionTestCase.multi_db deprecation."""
msg = (
'`TransactionTestCase.multi_db` is deprecated. Databases available '
'during this test can be defined using %s.%s.databases.'
)
def __get__(self, instance, cls=None):
try:
multi_db = cls.multi_db
except AttributeError:
pass
else:
msg = self.msg % (cls.__module__, cls.__qualname__)
warnings.warn(msg, RemovedInDjango31Warning)
if multi_db:
return set(connections)
return {DEFAULT_DB_ALIAS}
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
databases = _TransactionTestCaseDatabasesDescriptor()
_disallowed_database_msg = (
'Database %(operation)s to %(alias)r are not allowed in this test. '
'Add %(alias)r to %(test)s.databases to ensure proper test isolation '
'and silence this failure.'
)
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
def _pre_setup(self):
"""
Perform pre-test setup:
* If the class has an 'available_apps' attribute, restrict the app
registry to these applications, then fire the post_migrate signal --
it must run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, install those fixtures.
"""
super()._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False,
)
raise
# Clear the queries_log so that it's less likely to overflow (a single
# test probably won't execute 9K queries). If queries_log overflows,
# then assertNumQueries() doesn't work.
for db_name in self._databases_names(include_mirrors=False):
connections[db_name].queries_log.clear()
@classmethod
def _databases_names(cls, include_mirrors=True):
# Only consider allowed database aliases, including mirrors or not.
return [
alias for alias in connections
if alias in cls.databases and (
include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']
)
]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
with conn.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# Provide replica initial data from migrated apps, if needed.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""
Perform post-test things:
* Flush the contents of the database to leave a clean slate. If the
class has an 'available_apps' attribute, don't fire post_migrate.
* Force-close the connection so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super()._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None or
( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs):
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions(aliases=None):
"""
Return whether or not all (or specified) connections support
transactions.
"""
conns = connections.all() if aliases is None else (connections[alias] for alias in aliases)
return all(conn.features.supports_transactions for conn in conns)
class _TestCaseDatabasesDescriptor(_TransactionTestCaseDatabasesDescriptor):
"""Descriptor for TestCase.multi_db deprecation."""
msg = (
'`TestCase.multi_db` is deprecated. Databases available during this '
'test can be defined using %s.%s.databases.'
)
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but use `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
databases = _TestCaseDatabasesDescriptor()
@classmethod
def _enter_atomics(cls):
"""Open atomic blocks for multiple databases."""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened by the previous method."""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def _databases_support_transactions(cls):
return connections_support_transactions(cls.databases)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not cls._databases_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{'verbosity': 0, 'database': db_name})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
cls._remove_databases_failures()
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
cls._remove_databases_failures()
raise
@classmethod
def tearDownClass(cls):
if cls._databases_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super().tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase."""
pass
def _should_reload_connections(self):
if self._databases_support_transactions():
return False
return super()._should_reload_connections()
def _fixture_setup(self):
if not self._databases_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super()._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not self._databases_support_transactions():
return super()._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks and
not connection.needs_rollback and connection.is_usable()
)
class CheckCondition:
"""Descriptor class for deferred condition checking."""
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions, (condition, reason))
def __get__(self, instance, cls=None):
# Trigger access for all bases.
if any(getattr(base, '__unittest_skip__', False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
# Override this descriptor's value and set the skip reason.
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason, name):
def decorator(test_func):
nonlocal condition
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if (args and isinstance(args[0], unittest.TestCase) and
connection.alias not in getattr(args[0], 'databases', {})):
raise ValueError(
"%s cannot be used on %s as %s doesn't allow queries "
"against the %r database." % (
name,
args[0],
args[0].__class__.__qualname__,
connection.alias,
)
)
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
databases = getattr(test_item, 'databases', None)
if not databases or connection.alias not in databases:
# Defer raising to allow importing test class's module.
def condition():
raise ValueError(
"%s cannot be used on %s as it doesn't allow queries "
"against the '%s' database." % (
name, test_item, connection.alias,
)
)
# Retrieve the possibly existing value from the class's dict to
# avoid triggering the descriptor.
skip = test_func.__dict__.get('__unittest_skip__')
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
"""Skip a test if a database has at least one of the named features."""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features),
'skipIfDBFeature',
)
def skipUnlessDBFeature(*features):
"""Skip a test unless a database has all the named features."""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features),
'skipUnlessDBFeature',
)
def skipUnlessAnyDBFeature(*features):
"""Skip a test unless a database has any of the named features."""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features),
'skipUnlessAnyDBFeature',
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
A WSGIRequestHandler that doesn't log to standard output any of the
requests received, so as to not clutter the test result output.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super().__init__()
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""Return the relative path to the file on disk for the given URL."""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super().get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super().__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""Thread for running a live http server while the tests are running."""
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super().__init__()
def run(self):
"""
Set up the live server and databases, and then loop over handling
HTTP requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server()
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self):
return ThreadedWSGIServer((self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Do basically the same as TransactionTestCase but also launch a live HTTP
server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
It inherits from TransactionTestCase instead of TestCase because the
threads don't share the same transactions (unless if using in-memory sqlite)
and each thread needs to commit all their transactions so that the other
thread can see the changes.
"""
host = 'localhost'
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (cls.host, cls.server_thread.port)
@classproperty
def allowed_host(cls):
return cls.host
@classmethod
def setUpClass(cls):
super().setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={'append': cls.allowed_host},
)
cls._live_server_modified_settings.enable()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
# Restore sqlite in-memory database connections' non-shareability
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
cls._live_server_modified_settings.disable()
super().tearDownClass()
class SerializeMixin:
"""
Enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass()/tearDownClass().
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._lockfile.close()
|
c4018839bff0d38e4a7cf2ec0013af7103cd65b61765f29a1a5ec2e2d8522704 | """Compare two HTML documents."""
import re
from html.parser import HTMLParser
WHITESPACE = re.compile(r'\s+')
def normalize_whitespace(string):
return WHITESPACE.sub(' ', string)
class Element:
def __init__(self, name, attributes):
self.name = name
self.attributes = sorted(attributes)
self.children = []
def append(self, element):
if isinstance(element, str):
element = normalize_whitespace(element)
if self.children:
if isinstance(self.children[-1], str):
self.children[-1] += element
self.children[-1] = normalize_whitespace(self.children[-1])
return
elif self.children:
# removing last children if it is only whitespace
# this can result in incorrect dom representations since
# whitespace between inline tags like <span> is significant
if isinstance(self.children[-1], str):
if self.children[-1].isspace():
self.children.pop()
if element:
self.children.append(element)
def finalize(self):
def rstrip_last_element(children):
if children:
if isinstance(children[-1], str):
children[-1] = children[-1].rstrip()
if not children[-1]:
children.pop()
children = rstrip_last_element(children)
return children
rstrip_last_element(self.children)
for i, child in enumerate(self.children):
if isinstance(child, str):
self.children[i] = child.strip()
elif hasattr(child, 'finalize'):
child.finalize()
def __eq__(self, element):
if not hasattr(element, 'name') or self.name != element.name:
return False
if len(self.attributes) != len(element.attributes):
return False
if self.attributes != element.attributes:
# attributes without a value is same as attribute with value that
# equals the attributes name:
# <input checked> == <input checked="checked">
for i in range(len(self.attributes)):
attr, value = self.attributes[i]
other_attr, other_value = element.attributes[i]
if value is None:
value = attr
if other_value is None:
other_value = other_attr
if attr != other_attr or value != other_value:
return False
return self.children == element.children
def __hash__(self):
return hash((self.name, *(a for a in self.attributes)))
def _count(self, element, count=True):
if not isinstance(element, str):
if self == element:
return 1
if isinstance(element, RootElement):
if self.children == element.children:
return 1
i = 0
for child in self.children:
# child is text content and element is also text content, then
# make a simple "text" in "text"
if isinstance(child, str):
if isinstance(element, str):
if count:
i += child.count(element)
elif element in child:
return 1
else:
i += child._count(element, count=count)
if not count and i:
return i
return i
def __contains__(self, element):
return self._count(element, count=False) > 0
def count(self, element):
return self._count(element, count=True)
def __getitem__(self, key):
return self.children[key]
def __str__(self):
output = '<%s' % self.name
for key, value in self.attributes:
if value:
output += ' %s="%s"' % (key, value)
else:
output += ' %s' % key
if self.children:
output += '>\n'
output += ''.join(str(c) for c in self.children)
output += '\n</%s>' % self.name
else:
output += '>'
return output
def __repr__(self):
return str(self)
class RootElement(Element):
def __init__(self):
super().__init__(None, ())
def __str__(self):
return ''.join(str(c) for c in self.children)
class HTMLParseError(Exception):
pass
class Parser(HTMLParser):
SELF_CLOSING_TAGS = (
'br', 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base',
'col',
)
def __init__(self):
super().__init__(convert_charrefs=False)
self.root = RootElement()
self.open_tags = []
self.element_positions = {}
def error(self, msg):
raise HTMLParseError(msg, self.getpos())
def format_position(self, position=None, element=None):
if not position and element:
position = self.element_positions[element]
if position is None:
position = self.getpos()
if hasattr(position, 'lineno'):
position = position.lineno, position.offset
return 'Line %d, Column %d' % position
@property
def current(self):
if self.open_tags:
return self.open_tags[-1]
else:
return self.root
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
if tag not in self.SELF_CLOSING_TAGS:
self.handle_endtag(tag)
def handle_starttag(self, tag, attrs):
# Special case handling of 'class' attribute, so that comparisons of DOM
# instances are not sensitive to ordering of classes.
attrs = [
(name, " ".join(sorted(value.split(" "))))
if name == "class"
else (name, value)
for name, value in attrs
]
element = Element(tag, attrs)
self.current.append(element)
if tag not in self.SELF_CLOSING_TAGS:
self.open_tags.append(element)
self.element_positions[element] = self.getpos()
def handle_endtag(self, tag):
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
while element.name != tag:
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
def handle_data(self, data):
self.current.append(data)
def handle_charref(self, name):
self.current.append('&%s;' % name)
def handle_entityref(self, name):
self.current.append('&%s;' % name)
def parse_html(html):
"""
Take a string that contains *valid* HTML and turn it into a Python object
structure that can be easily compared against other HTML on semantic
equivalence. Syntactical differences like which quotation is used on
arguments will be ignored.
"""
parser = Parser()
parser.feed(html)
parser.close()
document = parser.root
document.finalize()
# Removing ROOT element if it's not necessary
if len(document.children) == 1:
if not isinstance(document.children[0], str):
document = document.children[0]
return document
|
d37dbb723c69c35a0a7bd1f977c5dfedc76d5c4e1d97d5eba432f9e3195daf69 | import collections
import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from io import StringIO
from itertools import chain
from types import SimpleNamespace
from unittest import TestCase, skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.apps.registry import Apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import request_started
from django.db import DEFAULT_DB_ALIAS, connections, reset_queries
from django.db.models.options import Options
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.urls import get_script_prefix, set_script_prefix
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate:
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
return self.val == other or round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""
A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, str):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super().__getitem__(key)
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
return set(chain.from_iterable(d for subcontext in self for d in subcontext))
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal that can be
intercepted by the test Client.
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
class _TestState:
pass
def setup_test_environment(debug=None):
"""
Perform global pre-test setup, such as installing the instrumented template
renderer and setting the email backend to the locmem email backend.
"""
if hasattr(_TestState, 'saved_data'):
# Executing this function twice would overwrite the saved values.
raise RuntimeError(
"setup_test_environment() was already called and can't be called "
"again without first calling teardown_test_environment()."
)
if debug is None:
debug = settings.DEBUG
saved_data = SimpleNamespace()
_TestState.saved_data = saved_data
saved_data.allowed_hosts = settings.ALLOWED_HOSTS
# Add the default host of the test client.
settings.ALLOWED_HOSTS = [*settings.ALLOWED_HOSTS, 'testserver']
saved_data.debug = settings.DEBUG
settings.DEBUG = debug
saved_data.email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
saved_data.template_render = Template._render
Template._render = instrumented_test_render
mail.outbox = []
deactivate()
def teardown_test_environment():
"""
Perform any global post-test teardown, such as restoring the original
template renderer and restoring the email sending functions.
"""
saved_data = _TestState.saved_data
settings.ALLOWED_HOSTS = saved_data.allowed_hosts
settings.DEBUG = saved_data.debug
settings.EMAIL_BACKEND = saved_data.email_backend
Template._render = saved_data.template_render
del _TestState.saved_data
del mail.outbox
def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, parallel=0, aliases=None, **kwargs):
"""Create the test databases."""
test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases)
old_names = []
for db_name, aliases in test_databases.values():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict.get('TEST', {}).get('SERIALIZE', True),
)
if parallel > 1:
for index in range(parallel):
connection.creation.clone_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
# Configure all other connections as mirrors of the first one
else:
connections[alias].creation.set_as_test_mirror(connections[first_alias].settings_dict)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all its aliases
dependencies_map = {}
# Check that no database depends on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases
)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all its dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def get_unique_databases_and_mirrors(aliases=None):
"""
Figure out which databases actually need to be created.
Deduplicate entries in DATABASES that correspond the same database or are
configured as test mirrors.
Return two values:
- test_databases: ordered mapping of signatures to (name, list of aliases)
where all aliases share the same underlying database.
- mirrored_aliases: mapping of mirror aliases to original aliases.
"""
if aliases is None:
aliases = connections
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict['TEST']
if test_settings['MIRROR']:
# If the database is marked as a test mirror, save the alias.
mirrored_aliases[alias] = test_settings['MIRROR']
elif alias in aliases:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'DEPENDENCIES' in test_settings:
dependencies[alias] = test_settings['DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])
test_databases = dependency_ordered(test_databases.items(), dependencies)
test_databases = collections.OrderedDict(test_databases)
return test_databases, mirrored_aliases
def teardown_databases(old_config, verbosity, parallel=0, keepdb=False):
"""Destroy all the non-mirror databases."""
for connection, old_name, destroy in old_config:
if destroy:
if parallel > 1:
for index in range(parallel):
connection.creation.destroy_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
connection.creation.destroy_test_db(old_name, verbosity, keepdb)
def get_runner(settings, test_runner_class=None):
test_runner_class = test_runner_class or settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
test_runner = getattr(test_module, test_path[-1])
return test_runner
class TestContextDecorator:
"""
A base class that can either be used as a context manager during tests
or as a test function or unittest.TestCase subclass decorator to perform
temporary alterations.
`attr_name`: attribute assigned the return value of enable() if used as
a class decorator.
`kwarg_name`: keyword argument passing the return value of enable() if
used as a function decorator.
"""
def __init__(self, attr_name=None, kwarg_name=None):
self.attr_name = attr_name
self.kwarg_name = kwarg_name
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def __enter__(self):
return self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def decorate_class(self, cls):
if issubclass(cls, TestCase):
decorated_setUp = cls.setUp
decorated_tearDown = cls.tearDown
def setUp(inner_self):
context = self.enable()
if self.attr_name:
setattr(inner_self, self.attr_name, context)
try:
decorated_setUp(inner_self)
except Exception:
self.disable()
raise
def tearDown(inner_self):
decorated_tearDown(inner_self)
self.disable()
cls.setUp = setUp
cls.tearDown = tearDown
return cls
raise TypeError('Can only decorate subclasses of unittest.TestCase')
def decorate_callable(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return func(*args, **kwargs)
return inner
def __call__(self, decorated):
if isinstance(decorated, type):
return self.decorate_class(decorated)
elif callable(decorated):
return self.decorate_callable(decorated)
raise TypeError('Cannot decorate object of type %s' % type(decorated))
class override_settings(TestContextDecorator):
"""
Act as either a decorator or a context manager. If it's a decorator, take a
function and return a wrapped function. If it's a contextmanager, use it
with the ``with`` statement. In either event, entering/exiting are called
before and after, respectively, the function/block is executed.
"""
enable_exception = None
def __init__(self, **kwargs):
self.options = kwargs
super().__init__()
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
try:
setting_changed.send(
sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True,
)
except Exception as exc:
self.enable_exception = exc
self.disable()
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
responses = []
for key in self.options:
new_value = getattr(settings, key, None)
responses_for_setting = setting_changed.send_robust(
sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False,
)
responses.extend(responses_for_setting)
if self.enable_exception is not None:
exc = self.enable_exception
self.enable_exception = None
raise exc
for _, response in responses:
if isinstance(response, Exception):
raise response
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = {
**test_func._overridden_settings,
**self.options,
}
def decorate_class(self, cls):
from django.test import SimpleTestCase
if not issubclass(cls, SimpleTestCase):
raise ValueError(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(cls)
return cls
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend, or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
super(override_settings, self).__init__()
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, str):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super().enable()
class override_system_checks(TestContextDecorator):
"""
Act as a decorator. Override list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks.
"""
def __init__(self, new_checks, deployment_checks=None):
from django.core.checks.registry import registry
self.registry = registry
self.new_checks = new_checks
self.deployment_checks = deployment_checks
super().__init__()
def enable(self):
self.old_checks = self.registry.registered_checks
self.registry.registered_checks = set()
for check in self.new_checks:
self.registry.register(check, *getattr(check, 'tags', ()))
self.old_deployment_checks = self.registry.deployment_checks
if self.deployment_checks is not None:
self.registry.deployment_checks = set()
for check in self.deployment_checks:
self.registry.register(check, *getattr(check, 'tags', ()), deploy=True)
def disable(self):
self.registry.registered_checks = self.old_checks
self.registry.deployment_checks = self.old_deployment_checks
def compare_xml(want, got):
"""
Try to do a 'xml-comparison' of want and got. Plain string comparison
doesn't always work because, for example, attribute ordering should not be
important. Ignore comment nodes and leading and trailing whitespace.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
return all(check_element(want, got) for want, got in zip(want_children, got_children))
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def str_prefix(s):
return s % {'_': ''}
class CaptureQueriesContext:
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
# Run any initialization queries if needed so that they won't be
# included as part of the count.
self.connection.ensure_connection()
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(TestContextDecorator):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
super().__init__()
def enable(self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
def disable(self):
self.catch_warnings.__exit__(*sys.exc_info())
@contextmanager
def patch_logger(logger_name, log_level, log_kwargs=False):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received.
Use unittest.assertLogs() if you only need Python 3 support. This
private API will be removed after Python 2 EOL in 2020 (#27753).
"""
calls = []
def replacement(msg, *args, **kwargs):
call = msg % args
calls.append((call, kwargs) if log_kwargs else call)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(
TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that."
)
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class override_script_prefix(TestContextDecorator):
"""Decorator or context manager to temporary override the script prefix."""
def __init__(self, prefix):
self.prefix = prefix
super().__init__()
def enable(self):
self.old_prefix = get_script_prefix()
set_script_prefix(self.prefix)
def disable(self):
set_script_prefix(self.old_prefix)
class LoggingCaptureMixin:
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
class isolate_apps(TestContextDecorator):
"""
Act as either a decorator or a context manager to register models defined
in its wrapped context to an isolated registry.
The list of installed apps the isolated registry should contain must be
passed as arguments.
Two optional keyword arguments can be specified:
`attr_name`: attribute assigned the isolated registry if used as a class
decorator.
`kwarg_name`: keyword argument passing the isolated registry if used as a
function decorator.
"""
def __init__(self, *installed_apps, **kwargs):
self.installed_apps = installed_apps
super().__init__(**kwargs)
def enable(self):
self.old_apps = Options.default_apps
apps = Apps(self.installed_apps)
setattr(Options, 'default_apps', apps)
return apps
def disable(self):
setattr(Options, 'default_apps', self.old_apps)
def tag(*tags):
"""Decorator to add tags to a test class or method."""
def decorator(obj):
if hasattr(obj, 'tags'):
obj.tags = obj.tags.union(tags)
else:
setattr(obj, 'tags', set(tags))
return obj
return decorator
@contextmanager
def register_lookup(field, *lookups, lookup_name=None):
"""
Context manager to temporarily register lookups on a model field using
lookup_name (or the lookup's lookup_name if not provided).
"""
try:
for lookup in lookups:
field.register_lookup(lookup, lookup_name)
yield
finally:
for lookup in lookups:
field._unregister_lookup(lookup, lookup_name)
|
f27f6c64f29e7d693b8b9a583d8701ba07bd37299dba4c2947680bb0936f1e57 | import functools
import sys
import threading
import warnings
from collections import Counter, OrderedDict, defaultdict
from functools import partial
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from .config import AppConfig
class Apps:
"""
A registry that stores the configuration of installed applications.
It also keeps track of models, e.g. to provide reverse relations.
"""
def __init__(self, installed_apps=()):
# installed_apps is set to None when creating the master registry
# because it cannot be populated at that point. Other registries must
# provide a list of installed apps and are populated immediately.
if installed_apps is None and hasattr(sys.modules[__name__], 'apps'):
raise RuntimeError("You must supply an installed_apps argument.")
# Mapping of app labels => model names => model classes. Every time a
# model is imported, ModelBase.__new__ calls apps.register_model which
# creates an entry in all_models. All imported models are registered,
# regardless of whether they're defined in an installed application
# and whether the registry has been populated. Since it isn't possible
# to reimport a module safely (it could reexecute initialization code)
# all_models is never overridden or reset.
self.all_models = defaultdict(OrderedDict)
# Mapping of labels to AppConfig instances for installed apps.
self.app_configs = OrderedDict()
# Stack of app_configs. Used to store the current state in
# set_available_apps and set_installed_apps.
self.stored_app_configs = []
# Whether the registry is populated.
self.apps_ready = self.models_ready = self.ready = False
# For the autoreloader.
self.ready_event = threading.Event()
# Lock for thread-safe population.
self._lock = threading.RLock()
self.loading = False
# Maps ("app_label", "modelname") tuples to lists of functions to be
# called when the corresponding model is ready. Used by this class's
# `lazy_model_operation()` and `do_pending_operations()` methods.
self._pending_operations = defaultdict(list)
# Populate apps and models, unless it's the master registry.
if installed_apps is not None:
self.populate(installed_apps)
def populate(self, installed_apps=None):
"""
Load application configurations and models.
Import each application module and then each model module.
It is thread-safe and idempotent, but not reentrant.
"""
if self.ready:
return
# populate() might be called by two threads in parallel on servers
# that create threads before initializing the WSGI callable.
with self._lock:
if self.ready:
return
# An RLock prevents other threads from entering this section. The
# compare and set operation below is atomic.
if self.loading:
# Prevent reentrant calls to avoid running AppConfig.ready()
# methods twice.
raise RuntimeError("populate() isn't reentrant")
self.loading = True
# Phase 1: initialize app configs and import app modules.
for entry in installed_apps:
if isinstance(entry, AppConfig):
app_config = entry
else:
app_config = AppConfig.create(entry)
if app_config.label in self.app_configs:
raise ImproperlyConfigured(
"Application labels aren't unique, "
"duplicates: %s" % app_config.label)
self.app_configs[app_config.label] = app_config
app_config.apps = self
# Check for duplicate app names.
counts = Counter(
app_config.name for app_config in self.app_configs.values())
duplicates = [
name for name, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Application names aren't unique, "
"duplicates: %s" % ", ".join(duplicates))
self.apps_ready = True
# Phase 2: import models modules.
for app_config in self.app_configs.values():
app_config.import_models()
self.clear_cache()
self.models_ready = True
# Phase 3: run ready() methods of app configs.
for app_config in self.get_app_configs():
app_config.ready()
self.ready = True
self.ready_event.set()
def check_apps_ready(self):
"""Raise an exception if all apps haven't been imported yet."""
if not self.apps_ready:
from django.conf import settings
# If "not ready" is due to unconfigured settings, accessing
# INSTALLED_APPS raises a more helpful ImproperlyConfigured
# exception.
settings.INSTALLED_APPS
raise AppRegistryNotReady("Apps aren't loaded yet.")
def check_models_ready(self):
"""Raise an exception if all models haven't been imported yet."""
if not self.models_ready:
raise AppRegistryNotReady("Models aren't loaded yet.")
def get_app_configs(self):
"""Import applications and return an iterable of app configs."""
self.check_apps_ready()
return self.app_configs.values()
def get_app_config(self, app_label):
"""
Import applications and returns an app config for the given label.
Raise LookupError if no application exists with this label.
"""
self.check_apps_ready()
try:
return self.app_configs[app_label]
except KeyError:
message = "No installed app with label '%s'." % app_label
for app_config in self.get_app_configs():
if app_config.name == app_label:
message += " Did you mean '%s'?" % app_config.label
break
raise LookupError(message)
# This method is performance-critical at least for Django's test suite.
@functools.lru_cache(maxsize=None)
def get_models(self, include_auto_created=False, include_swapped=False):
"""
Return a list of all installed models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
"""
self.check_models_ready()
result = []
for app_config in self.app_configs.values():
result.extend(app_config.get_models(include_auto_created, include_swapped))
return result
def get_model(self, app_label, model_name=None, require_ready=True):
"""
Return the model matching the given app_label and model_name.
As a shortcut, app_label may be in the form <app_label>.<model_name>.
model_name is case-insensitive.
Raise LookupError if no application exists with this label, or no
model exists with this name in the application. Raise ValueError if
called with a single argument that doesn't contain exactly one dot.
"""
if require_ready:
self.check_models_ready()
else:
self.check_apps_ready()
if model_name is None:
app_label, model_name = app_label.split('.')
app_config = self.get_app_config(app_label)
if not require_ready and app_config.models is None:
app_config.import_models()
return app_config.get_model(model_name, require_ready=require_ready)
def register_model(self, app_label, model):
# Since this method is called when models are imported, it cannot
# perform imports because of the risk of import loops. It mustn't
# call get_app_config().
model_name = model._meta.model_name
app_models = self.all_models[app_label]
if model_name in app_models:
if (model.__name__ == app_models[model_name].__name__ and
model.__module__ == app_models[model_name].__module__):
warnings.warn(
"Model '%s.%s' was already registered. "
"Reloading models is not advised as it can lead to inconsistencies, "
"most notably with related models." % (app_label, model_name),
RuntimeWarning, stacklevel=2)
else:
raise RuntimeError(
"Conflicting '%s' models in application '%s': %s and %s." %
(model_name, app_label, app_models[model_name], model))
app_models[model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def is_installed(self, app_name):
"""
Check whether an application with this name exists in the registry.
app_name is the full name of the app e.g. 'django.contrib.admin'.
"""
self.check_apps_ready()
return any(ac.name == app_name for ac in self.app_configs.values())
def get_containing_app_config(self, object_name):
"""
Look for an app config containing a given object.
object_name is the dotted Python path to the object.
Return the app config for the inner application in case of nesting.
Return None if the object isn't in any registered app config.
"""
self.check_apps_ready()
candidates = []
for app_config in self.app_configs.values():
if object_name.startswith(app_config.name):
subpath = object_name[len(app_config.name):]
if subpath == '' or subpath[0] == '.':
candidates.append(app_config)
if candidates:
return sorted(candidates, key=lambda ac: -len(ac.name))[0]
def get_registered_model(self, app_label, model_name):
"""
Similar to get_model(), but doesn't require that an app exists with
the given app_label.
It's safe to call this method at import time, even while the registry
is being populated.
"""
model = self.all_models[app_label].get(model_name.lower())
if model is None:
raise LookupError(
"Model '%s.%s' not registered." % (app_label, model_name))
return model
@functools.lru_cache(maxsize=None)
def get_swappable_settings_name(self, to_string):
"""
For a given model string (e.g. "auth.User"), return the name of the
corresponding settings name if it refers to a swappable model. If the
referred model is not swappable, return None.
This method is decorated with lru_cache because it's performance
critical when it comes to migrations. Since the swappable settings don't
change after Django has loaded the settings, there is no reason to get
the respective settings attribute over and over again.
"""
for model in self.get_models(include_swapped=True):
swapped = model._meta.swapped
# Is this model swapped out for the model given by to_string?
if swapped and swapped == to_string:
return model._meta.swappable
# Is this model swappable and the one given by to_string?
if model._meta.swappable and model._meta.label == to_string:
return model._meta.swappable
return None
def set_available_apps(self, available):
"""
Restrict the set of installed apps used by get_app_config[s].
available must be an iterable of application names.
set_available_apps() must be balanced with unset_available_apps().
Primarily used for performance optimization in TransactionTestCase.
This method is safe in the sense that it doesn't trigger any imports.
"""
available = set(available)
installed = {app_config.name for app_config in self.get_app_configs()}
if not available.issubset(installed):
raise ValueError(
"Available apps isn't a subset of installed apps, extra apps: %s"
% ", ".join(available - installed)
)
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict(
(label, app_config)
for label, app_config in self.app_configs.items()
if app_config.name in available)
self.clear_cache()
def unset_available_apps(self):
"""Cancel a previous call to set_available_apps()."""
self.app_configs = self.stored_app_configs.pop()
self.clear_cache()
def set_installed_apps(self, installed):
"""
Enable a different set of installed apps for get_app_config[s].
installed must be an iterable in the same format as INSTALLED_APPS.
set_installed_apps() must be balanced with unset_installed_apps(),
even if it exits with an exception.
Primarily used as a receiver of the setting_changed signal in tests.
This method may trigger new imports, which may add new models to the
registry of all imported models. They will stay in the registry even
after unset_installed_apps(). Since it isn't possible to replay
imports safely (e.g. that could lead to registering listeners twice),
models are registered when they're imported and never removed.
"""
if not self.ready:
raise AppRegistryNotReady("App registry isn't ready yet.")
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict()
self.apps_ready = self.models_ready = self.loading = self.ready = False
self.clear_cache()
self.populate(installed)
def unset_installed_apps(self):
"""Cancel a previous call to set_installed_apps()."""
self.app_configs = self.stored_app_configs.pop()
self.apps_ready = self.models_ready = self.ready = True
self.clear_cache()
def clear_cache(self):
"""
Clear all internal caches, for methods that alter the app registry.
This is mostly used in tests.
"""
# Call expire cache on each model. This will purge
# the relation tree and the fields cache.
self.get_models.cache_clear()
if self.ready:
# Circumvent self.get_models() to prevent that the cache is refilled.
# This particularly prevents that an empty value is cached while cloning.
for app_config in self.app_configs.values():
for model in app_config.get_models(include_auto_created=True):
model._meta._expire_cache()
def lazy_model_operation(self, function, *model_keys):
"""
Take a function and a number of ("app_label", "modelname") tuples, and
when all the corresponding models have been imported and registered,
call the function with the model classes as its arguments.
The function passed to this method must accept exactly n models as
arguments, where n=len(model_keys).
"""
# Base case: no arguments, just execute the function.
if not model_keys:
function()
# Recursive case: take the head of model_keys, wait for the
# corresponding model class to be imported and registered, then apply
# that argument to the supplied function. Pass the resulting partial
# to lazy_model_operation() along with the remaining model args and
# repeat until all models are loaded and all arguments are applied.
else:
next_model, *more_models = model_keys
# This will be executed after the class corresponding to next_model
# has been imported and registered. The `func` attribute provides
# duck-type compatibility with partials.
def apply_next_model(model):
next_function = partial(apply_next_model.func, model)
self.lazy_model_operation(next_function, *more_models)
apply_next_model.func = function
# If the model has already been imported and registered, partially
# apply it to the function now. If not, add it to the list of
# pending operations for the model, where it will be executed with
# the model class as its sole argument once the model is ready.
try:
model_class = self.get_registered_model(*next_model)
except LookupError:
self._pending_operations[next_model].append(apply_next_model)
else:
apply_next_model(model_class)
def do_pending_operations(self, model):
"""
Take a newly-prepared model and pass it to each function waiting for
it. This is called at the very end of Apps.register_model().
"""
key = model._meta.app_label, model._meta.model_name
for function in self._pending_operations.pop(key, []):
function(model)
apps = Apps(installed_apps=None)
|
9274581891ef44402640912912de61b2e9192684c67f33fcd012722d059421f9 | from urllib.parse import quote
from django.http import (
HttpResponseBadRequest, HttpResponseForbidden, HttpResponseNotFound,
HttpResponseServerError,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.views.decorators.csrf import requires_csrf_token
ERROR_404_TEMPLATE_NAME = '404.html'
ERROR_403_TEMPLATE_NAME = '403.html'
ERROR_400_TEMPLATE_NAME = '400.html'
ERROR_500_TEMPLATE_NAME = '500.html'
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def page_not_found(request, exception, template_name=ERROR_404_TEMPLATE_NAME):
"""
Default 404 handler.
Templates: :template:`404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/'). It's
quoted to prevent a content injection attack.
exception
The message from the exception which triggered the 404 (if one was
supplied), or the exception class name
"""
exception_repr = exception.__class__.__name__
# Try to get an "interesting" exception message, if any (and not the ugly
# Resolver404 dictionary)
try:
message = exception.args[0]
except (AttributeError, IndexError):
pass
else:
if isinstance(message, str):
exception_repr = message
context = {
'request_path': quote(request.path),
'exception': exception_repr,
}
try:
template = loader.get_template(template_name)
body = template.render(context, request)
content_type = None # Django will use 'text/html'.
except TemplateDoesNotExist:
if template_name != ERROR_404_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
# Render template (even though there are no substitutions) to allow
# inspecting the context in tests.
template = Engine().from_string(
'<h1>Not Found</h1>'
'<p>The requested resource was not found on this server.</p>')
body = template.render(Context(context))
content_type = 'text/html'
return HttpResponseNotFound(body, content_type=content_type)
@requires_csrf_token
def server_error(request, template_name=ERROR_500_TEMPLATE_NAME):
"""
500 error handler.
Templates: :template:`500.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_500_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseServerError('<h1>Server Error (500)</h1>', content_type='text/html')
return HttpResponseServerError(template.render())
@requires_csrf_token
def bad_request(request, exception, template_name=ERROR_400_TEMPLATE_NAME):
"""
400 error handler.
Templates: :template:`400.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_400_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseBadRequest('<h1>Bad Request (400)</h1>', content_type='text/html')
# No exception content is passed to the template, to not disclose any sensitive information.
return HttpResponseBadRequest(template.render())
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def permission_denied(request, exception, template_name=ERROR_403_TEMPLATE_NAME):
"""
Permission denied (403) handler.
Templates: :template:`403.html`
Context: None
If the template does not exist, an Http403 response containing the text
"403 Forbidden" (as per RFC 7231) will be returned.
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_403_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseForbidden('<h1>403 Forbidden</h1>', content_type='text/html')
return HttpResponseForbidden(
template.render(request=request, context={'exception': str(exception)})
)
|
f0e4d331827f23c5e80d08c2ac6fc87ca6e2db604272096a60818faf01b43cfa | import functools
import re
import sys
import types
from pathlib import Path
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import pprint
from django.urls import Resolver404, resolve
from django.utils import timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting. Templates are
# read directly from the filesystem so that the error handler
# works even if the template loader is broken.
DEBUG_ENGINE = Engine(
debug=True,
libraries={'i18n': 'django.templatetags.i18n'},
)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.IGNORECASE)
CLEANSED_SUBSTITUTE = '********************'
CURRENT_DIR = Path(__file__).parent
class CallableSettingWrapper:
"""
Object to wrap callable appearing in settings.
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes
(#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""
Cleanse an individual setting key/value of sensitive content. If the value
is a dictionary, recursively cleanse the keys in that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"""
Return a dictionary of the settings module with values of sensitive
settings replaced with stars (*********).
"""
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain; charset=utf-8')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@functools.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter:
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(tb_frame.f_locals.items())
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replace the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replace the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k in cleansed:
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replace the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name in tb_frame.f_locals:
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter:
"""Organize and coordinate reporting on exceptions."""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.postmortem = None
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# Trim large blobs of data
if len(v) > 4096:
v = '%s… <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, v))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = force_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
if self.request is None:
user_str = None
else:
try:
user_str = str(self.request.user)
except Exception:
# request.user may raise OperationalError if the database is
# unavailable, for example.
user_str = '[unable to retrieve the current user]'
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'user_str': user_str,
'filtered_POST_items': list(self.filter.get_post_parameters(self.request).items()),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'postmortem': self.postmortem,
}
if self.request is not None:
c['request_GET_items'] = self.request.GET.items()
c['request_FILES_items'] = self.request.FILES.items()
c['request_COOKIES_items'] = self.request.COOKIES.items()
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = str(self.exc_value)
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"""Return HTML version of debug 500 HTTP error page."""
with Path(CURRENT_DIR, 'templates', 'technical_500.html').open() as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"""Return plain text version of debug 500 HTTP error page."""
with Path(CURRENT_DIR, 'templates', 'technical_500.txt').open() as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Return context_lines before and after lineno from file.
Return (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (https://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [str(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = self.tb if not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is None:
pre_context_lineno = lineno
pre_context = []
context_line = '<source code not available>'
post_context = []
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def technical_404_response(request, exception):
"""Create a technical 404 error response. `exception` is the Http404."""
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried or ( # empty URLconf
request.path == '/' and
len(tried) == 1 and # default URLconf
len(tried[0]) == 1 and
getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin'
)):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
with Path(CURRENT_DIR, 'templates', 'technical_404.html').open() as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': str(exception),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"""Create an empty URLconf 404 error response."""
with Path(CURRENT_DIR, 'templates', 'default_urlconf.html').open() as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context({
'version': get_docs_version(),
})
return HttpResponse(t.render(c), content_type='text/html')
|
ec43c9e0632a7823fe35b30ed34663642d0fea2d2e0d5510841b48174ce90554 | """
Settings and configuration for Django.
Read values from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global_settings.py
for a list of all possible variables.
"""
import importlib
import os
import time
import traceback
import warnings
from pathlib import Path
import django
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import RemovedInDjango31Warning
from django.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
FILE_CHARSET_DEPRECATED_MSG = (
'The FILE_CHARSET setting is deprecated. Starting with Django 3.1, all '
'files read from disk must be UTF-8 encoded.'
)
class SettingsReference(str):
"""
String subclass which references a current settings value. It's treated as
the value in memory but serializes to a settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time settings are needed, if the user hasn't
configured settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<LazySettings [Unevaluated]>'
return '<LazySettings "%(settings_module)s">' % {
'settings_module': self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
"""Return the value of a setting and cache it in self.__dict__."""
if self._wrapped is empty:
self._setup(name)
val = getattr(self._wrapped, name)
self.__dict__[name] = val
return val
def __setattr__(self, name, value):
"""
Set the value of setting. Clear all cached values if _wrapped changes
(@override_settings does this) or clear single values when set.
"""
if name == '_wrapped':
self.__dict__.clear()
else:
self.__dict__.pop(name, None)
super().__setattr__(name, value)
def __delattr__(self, name):
"""Delete a setting and clear it from cache if needed."""
super().__delattr__(name)
self.__dict__.pop(name, None)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""Return True if the settings have already been configured."""
return self._wrapped is not empty
@property
def FILE_CHARSET(self):
stack = traceback.extract_stack()
# Show a warning if the setting is used outside of Django.
# Stack index: -1 this line, -2 the caller.
filename, _line_number, _function_name, _text = stack[-2]
if not filename.startswith(os.path.dirname(django.__file__)):
warnings.warn(
FILE_CHARSET_DEPRECATED_MSG,
RemovedInDjango31Warning,
stacklevel=2,
)
return self.__getattr__('FILE_CHARSET')
class Settings:
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. " % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if self.is_overridden('FILE_CHARSET'):
warnings.warn(FILE_CHARSET_DEPRECATED_MSG, RemovedInDjango31Warning)
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = Path('/usr/share/zoneinfo')
zone_info_file = zoneinfo_root.joinpath(*self.TIME_ZONE.split('/'))
if zoneinfo_root.exists() and not zone_info_file.exists():
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
class UserSettingsHolder:
"""Holder for user configured settings."""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
if name == 'FILE_CHARSET':
warnings.warn(FILE_CHARSET_DEPRECATED_MSG, RemovedInDjango31Warning)
super().__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super().__delattr__(name)
def __dir__(self):
return sorted(
s for s in [*self.__dict__, *dir(self.default_settings)]
if s not in self._deleted
)
def is_overridden(self, setting):
deleted = (setting in self._deleted)
set_locally = (setting in self.__dict__)
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
return deleted or set_locally or set_on_default
def __repr__(self):
return '<%(cls)s>' % {
'cls': self.__class__.__name__,
}
settings = LazySettings()
|
ce4ddfe498d5763714d9f0292e59d3834f85305adca40018b4a3d92b79b6db34 | """
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', '[email protected]'), ('Full Name', '[email protected]')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('hy', gettext_noop('Armenian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default charset to use for all HttpResponse objects, if a MIME type isn't
# manually specified. It's used to construct the Content-Type header.
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', or None to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
|
b216e6e2197d1199e41926ab53c9023427ef8b77d1cfecb8229327ddc2e7fe3d | "Functions that help with dynamically creating decorators for views."
# For backwards compatibility in Django 2.0.
from contextlib import ContextDecorator # noqa
from functools import WRAPPER_ASSIGNMENTS, partial, update_wrapper, wraps
class classonlymethod(classmethod):
def __get__(self, instance, cls=None):
if instance is not None:
raise AttributeError("This method is available only on the class, not on instances.")
return super().__get__(instance, cls)
def _update_method_wrapper(_wrapper, decorator):
# _multi_decorate()'s bound_method isn't available in this scope. Cheat by
# using it on a dummy function.
@decorator
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
def _multi_decorate(decorators, method):
"""
Decorate `method` with one or more function decorators. `decorators` can be
a single decorator or an iterable of decorators.
"""
if hasattr(decorators, '__iter__'):
# Apply a list/tuple of decorators if 'decorators' is one. Decorator
# functions are applied so that the call order is the same as the
# order in which they appear in the iterable.
decorators = decorators[::-1]
else:
decorators = [decorators]
def _wrapper(self, *args, **kwargs):
# bound_method has the signature that 'decorator' expects i.e. no
# 'self' argument, but it's a closure over self so it can call
# 'func'. Also, wrap method.__get__() in a function because new
# attributes can't be set on bound method objects, only on functions.
bound_method = partial(method.__get__(self, type(self)))
for dec in decorators:
bound_method = dec(bound_method)
return bound_method(*args, **kwargs)
# Copy any attributes that a decorator adds to the function it decorates.
for dec in decorators:
_update_method_wrapper(_wrapper, dec)
# Preserve any existing attributes of 'method', including the name.
update_wrapper(_wrapper, method)
return _wrapper
def method_decorator(decorator, name=''):
"""
Convert a function decorator into a method decorator
"""
# 'obj' can be a class or a function. If 'obj' is a function at the time it
# is passed to _dec, it will eventually be a method of the class it is
# defined on. If 'obj' is a class, the 'name' is required to be the name
# of the method that will be decorated.
def _dec(obj):
if not isinstance(obj, type):
return _multi_decorate(decorator, obj)
if not (name and hasattr(obj, name)):
raise ValueError(
"The keyword argument `name` must be the name of a method "
"of the decorated class: %s. Got '%s' instead." % (obj, name)
)
method = getattr(obj, name)
if not callable(method):
raise TypeError(
"Cannot decorate '%s' as it isn't a callable attribute of "
"%s (%s)." % (name, obj, method)
)
_wrapper = _multi_decorate(decorator, method)
setattr(obj, name, _wrapper)
return obj
# Don't worry about making _dec look similar to a list/tuple as it's rather
# meaningless.
if not hasattr(decorator, '__iter__'):
update_wrapper(_dec, decorator)
# Change the name to aid debugging.
obj = decorator if hasattr(decorator, '__name__') else decorator.__class__
_dec.__name__ = 'method_decorator(%s)' % obj.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but return a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), return a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
# Unused, for backwards compatibility in Django 2.0.
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This was required as a workaround for https://bugs.python.org/issue3445
under Python 2.
"""
return WRAPPER_ASSIGNMENTS
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception as e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(response, 'render') and callable(response.render):
if hasattr(middleware, 'process_template_response'):
response = middleware.process_template_response(request, response)
# Defer running of process_response until after the template
# has been rendered:
if hasattr(middleware, 'process_response'):
def callback(response):
return middleware.process_response(request, response)
response.add_post_render_callback(callback)
else:
if hasattr(middleware, 'process_response'):
return middleware.process_response(request, response)
return response
return _wrapped_view
return _decorator
return _make_decorator
class classproperty:
def __init__(self, method=None):
self.fget = method
def __get__(self, instance, cls=None):
return self.fget(cls)
def getter(self, method):
self.fget = method
return self
|
9168e49ec0c41a0addda587aa96eb71cd779ee52483042ec5e375e1d98c7af2e | import functools
import itertools
import logging
import os
import pathlib
import signal
import subprocess
import sys
import threading
import time
import traceback
from collections import defaultdict
from pathlib import Path
from types import ModuleType
from zipimport import zipimporter
from django.apps import apps
from django.core.signals import request_finished
from django.dispatch import Signal
from django.utils.functional import cached_property
from django.utils.version import get_version_tuple
autoreload_started = Signal()
file_changed = Signal(providing_args=['file_path', 'kind'])
DJANGO_AUTORELOAD_ENV = 'RUN_MAIN'
logger = logging.getLogger('django.utils.autoreload')
# If an error is raised while importing a file, it's not placed in sys.modules.
# This means that any future modifications aren't caught. Keep a list of these
# file paths to allow watching them in the future.
_error_files = []
_exception = None
try:
import termios
except ImportError:
termios = None
try:
import pywatchman
except ImportError:
pywatchman = None
def check_errors(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
raise _exception[0](_exception[1]).with_traceback(_exception[2])
def ensure_echo_on():
if termios:
fd = sys.stdin
if fd.isatty():
attr_list = termios.tcgetattr(fd)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(fd, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def iter_all_python_module_files():
# This is a hot path during reloading. Create a stable sorted list of
# modules based on the module name and pass it to iter_modules_and_files().
# This ensures cached results are returned in the usual case that modules
# aren't loaded on the fly.
modules_view = sorted(list(sys.modules.items()), key=lambda i: i[0])
modules = tuple(m[1] for m in modules_view)
return iter_modules_and_files(modules, frozenset(_error_files))
@functools.lru_cache(maxsize=1)
def iter_modules_and_files(modules, extra_files):
"""Iterate through all modules needed to be watched."""
sys_file_paths = []
for module in modules:
# During debugging (with PyDev) the 'typing.io' and 'typing.re' objects
# are added to sys.modules, however they are types not modules and so
# cause issues here.
if not isinstance(module, ModuleType) or module.__spec__ is None:
continue
spec = module.__spec__
# Modules could be loaded from places without a concrete location. If
# this is the case, skip them.
if spec.has_location:
origin = spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin
sys_file_paths.append(origin)
results = set()
for filename in itertools.chain(sys_file_paths, extra_files):
if not filename:
continue
path = pathlib.Path(filename)
if not path.exists():
# The module could have been removed, don't fail loudly if this
# is the case.
continue
results.add(path.resolve().absolute())
return frozenset(results)
@functools.lru_cache(maxsize=1)
def common_roots(paths):
"""
Return a tuple of common roots that are shared between the given paths.
File system watchers operate on directories and aren't cheap to create.
Try to find the minimum set of directories to watch that encompass all of
the files that need to be watched.
"""
# Inspired from Werkzeug:
# https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py
# Create a sorted list of the path components, longest first.
path_parts = sorted([x.parts for x in paths], key=len, reverse=True)
tree = {}
for chunks in path_parts:
node = tree
# Add each part of the path to the tree.
for chunk in chunks:
node = node.setdefault(chunk, {})
# Clear the last leaf in the tree.
node.clear()
# Turn the tree into a list of Path instances.
def _walk(node, path):
for prefix, child in node.items():
yield from _walk(child, path + (prefix,))
if not node:
yield Path(*path)
return tuple(_walk(tree, ()))
def sys_path_directories():
"""
Yield absolute directories from sys.path, ignoring entries that don't
exist.
"""
for path in sys.path:
path = Path(path)
if not path.exists():
continue
path = path.resolve().absolute()
# If the path is a file (like a zip file), watch the parent directory.
if path.is_file():
yield path.parent
else:
yield path
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import django.__main__
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if sys.argv[0] == django.__main__.__file__:
# The server was started with `python -m django runserver`.
args += ['-m', 'django']
args += sys.argv[1:]
else:
args += sys.argv
return args
def trigger_reload(filename):
logger.info('%s changed, reloading.', filename)
sys.exit(3)
def restart_with_reloader():
new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: 'true'}
args = get_child_arguments()
while True:
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
if exit_code != 3:
return exit_code
class BaseReloader:
def __init__(self):
self.extra_files = set()
self.directory_globs = defaultdict(set)
self._stop_condition = threading.Event()
def watch_dir(self, path, glob):
path = Path(path)
if not path.is_absolute():
raise ValueError('%s must be absolute.' % path)
logger.debug('Watching dir %s with glob %s.', path, glob)
self.directory_globs[path].add(glob)
def watch_file(self, path):
path = Path(path)
if not path.is_absolute():
raise ValueError('%s must be absolute.' % path)
logger.debug('Watching file %s.', path)
self.extra_files.add(path)
def watched_files(self, include_globs=True):
"""
Yield all files that need to be watched, including module files and
files within globs.
"""
yield from iter_all_python_module_files()
yield from self.extra_files
if include_globs:
for directory, patterns in self.directory_globs.items():
for pattern in patterns:
yield from directory.glob(pattern)
def wait_for_apps_ready(self, app_reg, django_main_thread):
"""
Wait until Django reports that the apps have been loaded. If the given
thread has terminated before the apps are ready, then a SyntaxError or
other non-recoverable error has been raised. In that case, stop waiting
for the apps_ready event and continue processing.
Return True if the thread is alive and the ready event has been
triggered, or False if the thread is terminated while waiting for the
event.
"""
while django_main_thread.is_alive():
if app_reg.ready_event.wait(timeout=0.1):
return True
else:
logger.debug('Main Django thread has terminated before apps are ready.')
return False
def run(self, django_main_thread):
logger.debug('Waiting for apps ready_event.')
self.wait_for_apps_ready(apps, django_main_thread)
from django.urls import get_resolver
# Prevent a race condition where URL modules aren't loaded when the
# reloader starts by accessing the urlconf_module property.
get_resolver().urlconf_module
logger.debug('Apps ready_event triggered. Sending autoreload_started signal.')
autoreload_started.send(sender=self)
self.run_loop()
def run_loop(self):
ticker = self.tick()
while not self.should_stop:
try:
next(ticker)
except StopIteration:
break
self.stop()
def tick(self):
"""
This generator is called in a loop from run_loop. It's important that
the method takes care of pausing or otherwise waiting for a period of
time. This split between run_loop() and tick() is to improve the
testability of the reloader implementations by decoupling the work they
do from the loop.
"""
raise NotImplementedError('subclasses must implement tick().')
@classmethod
def check_availability(cls):
raise NotImplementedError('subclasses must implement check_availability().')
def notify_file_changed(self, path):
results = file_changed.send(sender=self, file_path=path)
logger.debug('%s notified as changed. Signal results: %s.', path, results)
if not any(res[1] for res in results):
trigger_reload(path)
# These are primarily used for testing.
@property
def should_stop(self):
return self._stop_condition.is_set()
def stop(self):
self._stop_condition.set()
class StatReloader(BaseReloader):
SLEEP_TIME = 1 # Check for changes once per second.
def tick(self):
state, previous_timestamp = {}, time.time()
while True:
state.update(self.loop_files(state, previous_timestamp))
previous_timestamp = time.time()
time.sleep(self.SLEEP_TIME)
yield
def loop_files(self, previous_times, previous_timestamp):
updated_times = {}
for path, mtime in self.snapshot_files():
previous_time = previous_times.get(path)
# If there are overlapping globs, a file may be iterated twice.
if path in updated_times:
continue
# A new file has been detected. This could happen due to it being
# imported at runtime and only being polled now, or because the
# file was just created. Compare the file's mtime to the
# previous_timestamp and send a notification if it was created
# since the last poll.
is_newly_created = previous_time is None and mtime > previous_timestamp
is_changed = previous_time is not None and previous_time != mtime
if is_newly_created or is_changed:
logger.debug('File %s. is_changed: %s, is_new: %s', path, is_changed, is_newly_created)
logger.debug('File %s previous mtime: %s, current mtime: %s', path, previous_time, mtime)
self.notify_file_changed(path)
updated_times[path] = mtime
return updated_times
def snapshot_files(self):
for file in self.watched_files():
try:
mtime = file.stat().st_mtime
except OSError:
# This is thrown when the file does not exist.
continue
yield file, mtime
@classmethod
def check_availability(cls):
return True
class WatchmanUnavailable(RuntimeError):
pass
class WatchmanReloader(BaseReloader):
def __init__(self):
self.roots = defaultdict(set)
self.processed_request = threading.Event()
super().__init__()
@cached_property
def client(self):
return pywatchman.client()
def _watch_root(self, root):
# In practice this shouldn't occur, however, it's possible that a
# directory that doesn't exist yet is being watched. If it's outside of
# sys.path then this will end up a new root. How to handle this isn't
# clear: Not adding the root will likely break when subscribing to the
# changes, however, as this is currently an internal API, no files
# will be being watched outside of sys.path. Fixing this by checking
# inside watch_glob() and watch_dir() is expensive, instead this could
# could fall back to the StatReloader if this case is detected? For
# now, watching its parent, if possible, is sufficient.
if not root.exists():
if not root.parent.exists():
logger.warning('Unable to watch root dir %s as neither it or its parent exist.', root)
return
root = root.parent
result = self.client.query('watch-project', str(root.absolute()))
if 'warning' in result:
logger.warning('Watchman warning: %s', result['warning'])
logger.debug('Watchman watch-project result: %s', result)
return result['watch'], result.get('relative_path')
@functools.lru_cache()
def _get_clock(self, root):
return self.client.query('clock', root)['clock']
def _subscribe(self, directory, name, expression):
root, rel_path = self._watch_root(directory)
query = {
'expression': expression,
'fields': ['name'],
'since': self._get_clock(root),
'dedup_results': True,
}
if rel_path:
query['relative_root'] = rel_path
logger.debug('Issuing watchman subscription %s, for root %s. Query: %s', name, root, query)
self.client.query('subscribe', root, name, query)
def _subscribe_dir(self, directory, filenames):
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'files-parent-%s' % directory.name
filenames = ['%s/%s' % (directory.name, filename) for filename in filenames]
directory = directory.parent
expression = ['name', filenames, 'wholename']
else:
prefix = 'files'
expression = ['name', filenames]
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def _watch_glob(self, directory, patterns):
"""
Watch a directory with a specific glob. If the directory doesn't yet
exist, attempt to watch the parent directory and amend the patterns to
include this. It's important this method isn't called more than one per
directory when updating all subscriptions. Subsequent calls will
overwrite the named subscription, so it must include all possible glob
expressions.
"""
prefix = 'glob'
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'glob-parent-%s' % directory.name
patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns]
directory = directory.parent
expression = ['anyof']
for pattern in patterns:
expression.append(['match', pattern, 'wholename'])
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def watched_roots(self, watched_files):
extra_directories = self.directory_globs.keys()
watched_file_dirs = [f.parent for f in watched_files]
sys_paths = list(sys_path_directories())
return frozenset((*extra_directories, *watched_file_dirs, *sys_paths))
def _update_watches(self):
watched_files = list(self.watched_files(include_globs=False))
found_roots = common_roots(self.watched_roots(watched_files))
logger.debug('Watching %s files', len(watched_files))
logger.debug('Found common roots: %s', found_roots)
# Setup initial roots for performance, shortest roots first.
for root in sorted(found_roots):
self._watch_root(root)
for directory, patterns in self.directory_globs.items():
self._watch_glob(directory, patterns)
# Group sorted watched_files by their parent directory.
sorted_files = sorted(watched_files, key=lambda p: p.parent)
for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent):
# These paths need to be relative to the parent directory.
self._subscribe_dir(directory, [str(p.relative_to(directory)) for p in group])
def update_watches(self):
try:
self._update_watches()
except Exception as ex:
# If the service is still available, raise the original exception.
if self.check_server_status(ex):
raise
def _check_subscription(self, sub):
subscription = self.client.getSubscription(sub)
if not subscription:
return
logger.debug('Watchman subscription %s has results.', sub)
for result in subscription:
# When using watch-project, it's not simple to get the relative
# directory without storing some specific state. Store the full
# path to the directory in the subscription name, prefixed by its
# type (glob, files).
root_directory = Path(result['subscription'].split(':', 1)[1])
logger.debug('Found root directory %s', root_directory)
for file in result.get('files', []):
self.notify_file_changed(root_directory / file)
def request_processed(self, **kwargs):
logger.debug('Request processed. Setting update_watches event.')
self.processed_request.set()
def tick(self):
request_finished.connect(self.request_processed)
self.update_watches()
while True:
if self.processed_request.is_set():
self.update_watches()
self.processed_request.clear()
try:
self.client.receive()
except pywatchman.WatchmanError as ex:
self.check_server_status(ex)
else:
for sub in list(self.client.subs.keys()):
self._check_subscription(sub)
yield
def stop(self):
self.client.close()
super().stop()
def check_server_status(self, inner_ex=None):
"""Return True if the server is available."""
try:
self.client.query('version')
except Exception:
raise WatchmanUnavailable(str(inner_ex)) from inner_ex
return True
@classmethod
def check_availability(cls):
if not pywatchman:
raise WatchmanUnavailable('pywatchman not installed.')
client = pywatchman.client(timeout=0.01)
try:
result = client.capabilityCheck()
except Exception:
# The service is down?
raise WatchmanUnavailable('Cannot connect to the watchman service.')
version = get_version_tuple(result['version'])
# Watchman 4.9 includes multiple improvements to watching project
# directories as well as case insensitive filesystems.
logger.debug('Watchman version %s', version)
if version < (4, 9):
raise WatchmanUnavailable('Watchman 4.9 or later is required.')
def get_reloader():
"""Return the most suitable reloader for this environment."""
try:
WatchmanReloader.check_availability()
except WatchmanUnavailable:
return StatReloader()
return WatchmanReloader()
def start_django(reloader, main_func, *args, **kwargs):
ensure_echo_on()
main_func = check_errors(main_func)
django_main_thread = threading.Thread(target=main_func, args=args, kwargs=kwargs)
django_main_thread.setDaemon(True)
django_main_thread.start()
while not reloader.should_stop:
try:
reloader.run(django_main_thread)
except WatchmanUnavailable as ex:
# It's possible that the watchman service shuts down or otherwise
# becomes unavailable. In that case, use the StatReloader.
reloader = StatReloader()
logger.error('Error connecting to Watchman: %s', ex)
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
def run_with_reloader(main_func, *args, **kwargs):
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get(DJANGO_AUTORELOAD_ENV) == 'true':
reloader = get_reloader()
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
start_django(reloader, main_func, *args, **kwargs)
else:
try:
WatchmanReloader.check_availability()
except WatchmanUnavailable as e:
logger.info('Watchman unavailable: %s', e)
exit_code = restart_with_reloader()
sys.exit(exit_code)
except KeyboardInterrupt:
pass
|
d9330ad516093543593b729949ae2a12d646e77ce9f35345d7a70370da94ffe3 | import html.entities
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils.functional import SimpleLazyObject, keep_lazy_text, lazy
from django.utils.translation import gettext as _, gettext_lazy, pgettext
@keep_lazy_text
def capfirst(x):
"""Capitalize the first letter of a string."""
return x and str(x)[0].upper() + str(x)[1:]
# Set up regular expressions
re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.S)
re_chars = re.compile(r'<.*?>|(.)', re.S)
re_tag = re.compile(r'<(/)?(\S+?)(?:(\s*/)|\s.*?)?>', re.S)
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@keep_lazy_text
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks. Expects that
existing line breaks are posix newlines.
Preserve all white space except added line breaks consume the space on
which they break the line.
Don't wrap long words, thus the output text may have lines longer than
``width``.
"""
def _generator():
for line in text.splitlines(True): # True keeps trailing linebreaks
max_width = min((line.endswith('\n') and width + 1 or width), width)
while len(line) > max_width:
space = line[:max_width + 1].rfind(' ') + 1
if space == 0:
space = line.find(' ') + 1
if space == 0:
yield line
line = ''
break
yield '%s\n' % line[:space - 1]
line = line[space:]
max_width = min((line.endswith('\n') and width + 1 or width), width)
if line:
yield line
return ''.join(_generator())
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super().__init__(lambda: str(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s…')
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None, html=False):
"""
Return the text truncated to be no longer than the specified number
of characters.
`truncate` specifies what should be used to notify that the string has
been truncated, defaulting to a translatable string of an ellipsis.
"""
self._setup()
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
def _text_chars(self, length, truncate, text, truncate_len):
"""Truncate a string after a certain number of chars."""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncate a string after a certain number of words. `truncate` specifies
what should be used to notify that the string has been truncated,
defaulting to ellipsis.
"""
self._setup()
length = int(num)
if html:
return self._truncate_html(length, truncate, self._wrapped, length, True)
return self._text_words(length, truncate)
def _text_words(self, length, truncate):
"""
Truncate a string after a certain number of words.
Strip newlines in the string.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _truncate_html(self, length, truncate, text, truncate_len, words):
"""
Truncate HTML to a certain number of chars (not counting tags and
comments), or, if words is True, then to a certain number of words.
Close opened tags if they were correctly closed in the given HTML.
Preserve newlines in the HTML.
"""
if words and length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML chars/words and keep note of open tags
pos = 0
end_text_pos = 0
current_len = 0
open_tags = []
regex = re_words if words else re_chars
while current_len <= length:
m = regex.search(text, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word or char
current_len += 1
if current_len == truncate_len:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or current_len >= truncate_len:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if current_len <= length:
return text
out = text[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
@keep_lazy_text
def get_valid_filename(s):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
@keep_lazy_text
def get_text_list(list_, last_word=gettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if not list_:
return ''
if len(list_) == 1:
return str(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join(str(i) for i in list_[:-1]), str(last_word), str(list_[-1])
)
@keep_lazy_text
def normalize_newlines(text):
"""Normalize CRLF and CR newlines to just LF."""
return re_newlines.sub('\n', str(text))
@keep_lazy_text
def phone2numeric(phone):
"""Convert a phone number with letters into its numeric equivalent."""
char2number = {
'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4',
'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6',
'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8',
'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}
return ''.join(char2number.get(c, c) for c in phone.lower())
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
with GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0) as zfile:
zfile.write(s)
return zbuf.getvalue()
class StreamingBuffer(BytesIO):
def read(self):
ret = self.getvalue()
self.seek(0)
self.truncate()
return ret
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
with GzipFile(mode='wb', compresslevel=6, fileobj=buf, mtime=0) as zfile:
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
for bit in smart_split_re.finditer(str(text)):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return chr(c)
except ValueError:
return match.group(0)
else:
try:
return chr(html.entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
@keep_lazy_text
def unescape_entities(text):
return _entity_re.sub(_replace_entity, str(text))
@keep_lazy_text
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
@keep_lazy_text
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return re.sub(r'[-\s]+', '-', value)
def camel_case_to_spaces(value):
"""
Split CamelCase and convert to lowercase. Strip surrounding whitespace.
"""
return re_camel_case.sub(r' \1', value).strip().lower()
def _format_lazy(format_string, *args, **kwargs):
"""
Apply str.format() on 'format_string' where format_string, args,
and/or kwargs might be lazy.
"""
return format_string.format(*args, **kwargs)
format_lazy = lazy(_format_lazy, str)
|
ef81653082a122432c65bf512fd459e04a6d786855a310deb29504aa07c33dac | """
Utilities for XML generation/parsing.
"""
import re
from collections import OrderedDict
from xml.sax.saxutils import XMLGenerator
class UnserializableContentError(ValueError):
pass
class SimplerXMLGenerator(XMLGenerator):
def addQuickElement(self, name, contents=None, attrs=None):
"Convenience method for adding an element with no children"
if attrs is None:
attrs = {}
self.startElement(name, attrs)
if contents is not None:
self.characters(contents)
self.endElement(name)
def characters(self, content):
if content and re.search(r'[\x00-\x08\x0B-\x0C\x0E-\x1F]', content):
# Fail loudly when content has control chars (unsupported in XML 1.0)
# See https://www.w3.org/International/questions/qa-controls
raise UnserializableContentError("Control characters are not supported in XML 1.0")
XMLGenerator.characters(self, content)
def startElement(self, name, attrs):
# Sort attrs for a deterministic output.
sorted_attrs = OrderedDict(sorted(attrs.items())) if attrs else attrs
super().startElement(name, sorted_attrs)
|
4179d1924b0ab6865133c694020f8ad8eda9620de030377bc789ec8ca714054e | from decimal import Decimal
from django.conf import settings
from django.utils.safestring import mark_safe
def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False, use_l10n=None):
"""
Get a number (as a number or string), and return it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator.
For non-uniform digit grouping, it can be a sequence with the number
of digit group sizes following the format used by the Python locale
module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)).
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = (use_l10n or (use_l10n is None and settings.USE_L10N)) and settings.USE_THOUSAND_SEPARATOR
use_grouping = use_grouping or force_grouping
use_grouping = use_grouping and grouping != 0
# Make the common case fast
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(number)
# sign
sign = ''
if isinstance(number, Decimal):
str_number = '{:f}'.format(number)
else:
str_number = str(number)
if str_number[0] == '-':
sign = '-'
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos is not None:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos is not None:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
dec_part = dec_part and decimal_sep + dec_part
# grouping
if use_grouping:
try:
# if grouping is a sequence
intervals = list(grouping)
except TypeError:
# grouping is a single value
intervals = [grouping, 0]
active_interval = intervals.pop(0)
int_part_gd = ''
cnt = 0
for digit in int_part[::-1]:
if cnt and cnt == active_interval:
if intervals:
active_interval = intervals.pop(0) or active_interval
int_part_gd += thousand_sep[::-1]
cnt = 0
int_part_gd += digit
cnt += 1
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
|
ee10db393538a2192218ce6526e64dbe5f75a655d1d79493f560977169bf6dda | import codecs
import datetime
import locale
from decimal import Decimal
from urllib.parse import quote
from django.utils import six
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
super().__init__(*args)
def __str__(self):
return '%s. You passed in %r (%s)' % (super().__str__(), self.obj, type(self.obj))
# For backwards compatibility. (originally in Django, then added to six 1.9)
python_2_unicode_compatible = six.python_2_unicode_compatible
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Return a string representing 's'. Treat bytestrings using the 'encoding'
codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
_PROTECTED_TYPES = (
type(None), int, float, Decimal, datetime.datetime, datetime.date, datetime.time,
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), str):
return s
if strings_only and is_protected_type(s):
return s
try:
if isinstance(s, bytes):
s = str(s, encoding, errors)
else:
s = str(s)
except UnicodeDecodeError as e:
raise DjangoUnicodeDecodeError(s, *e.args)
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Return a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, memoryview):
return bytes(s)
return str(s).encode(encoding, errors)
smart_str = smart_text
force_str = force_text
smart_str.__doc__ = """
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987, slightly simplified
since the input is assumed to be a string rather than an arbitrary byte
stream.
Take an IRI (string or UTF-8 bytes, e.g. '/I ♥ Django/' or
b'/I \xe2\x99\xa5 Django/') and return a string containing the encoded
result with ASCII chars only (e.g. '/I%20%E2%99%A5%20Django/').
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.parse.quote() already considers all
# but the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
elif isinstance(iri, Promise):
iri = str(iri)
return quote(iri, safe="/#%[]=:;$&()+,!?*@'~")
# List of byte values that uri_to_iri() decodes from percent encoding.
# First, the unreserved characters from RFC 3986:
_ascii_ranges = [[45, 46, 95, 126], range(65, 91), range(97, 123)]
_hextobyte = {
(fmt % char).encode(): bytes((char,))
for ascii_range in _ascii_ranges
for char in ascii_range
for fmt in ['%02x', '%02X']
}
# And then everything above 128, because bytes ≥ 128 are part of multibyte
# unicode characters.
_hexdig = '0123456789ABCDEFabcdef'
_hextobyte.update({
(a + b).encode(): bytes.fromhex(a + b)
for a in _hexdig[8:] for b in _hexdig
})
def uri_to_iri(uri):
"""
Convert a Uniform Resource Identifier(URI) into an Internationalized
Resource Identifier(IRI).
This is the algorithm from section 3.2 of RFC 3987, excluding step 4.
Take an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and return
a string containing the encoded result (e.g. '/I%20♥%20Django/').
"""
if uri is None:
return uri
uri = force_bytes(uri)
# Fast selective unqote: First, split on '%' and then starting with the
# second block, decode the first 2 bytes if they represent a hex code to
# decode. The rest of the block is the part after '%AB', not containing
# any '%'. Add that to the output without further processing.
bits = uri.split(b'%')
if len(bits) == 1:
iri = uri
else:
parts = [bits[0]]
append = parts.append
hextobyte = _hextobyte
for item in bits[1:]:
hex = item[:2]
if hex in hextobyte:
append(hextobyte[item[:2]])
append(item[2:])
else:
append(b'%')
append(item)
iri = b''.join(parts)
return repercent_broken_unicode(iri).decode()
def escape_uri_path(path):
"""
Escape the unsafe characters from the path portion of a Uniform Resource
Identifier (URI).
"""
# These are the "reserved" and "unreserved" characters specified in
# sections 2.2 and 2.3 of RFC 2396:
# reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
# unreserved = alphanum | mark
# mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")"
# The list of safe characters here is constructed subtracting ";", "=",
# and "?" according to section 3.3 of RFC 2396.
# The reason for not subtracting and escaping "/" is that we are escaping
# the entire path, not a path segment.
return quote(path, safe="/:@&+$,-_.!~*'()")
def repercent_broken_unicode(path):
"""
As per section 3.2 of RFC 3987, step three of converting a URI into an IRI,
repercent-encode any octet produced that is not part of a strictly legal
UTF-8 octet sequence.
"""
try:
path.decode()
except UnicodeDecodeError as e:
repercent = quote(path[e.start:e.end], safe=b"/#%[]=:;$&()+,!?*@'~")
path = repercent_broken_unicode(
path[:e.start] + force_bytes(repercent) + path[e.end:])
return path
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
Encode certain chars that would normally be recognized as special chars
for URIs. Do not encode the ' character, as it is a valid character
within URIs. See the encodeURIComponent() JavaScript function for details.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(path.replace("\\", "/"), safe="/~!*()'")
def get_system_encoding():
"""
The encoding of the default system locale. Fallback to 'ascii' if the
#encoding is unsupported by Python or could not be determined. See tickets
#10335 and #5846.
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
|
590342ff71de3945d5e0a2c40f3300740e4ac410b606a2e7538d22db38953837 | import copy
from collections import OrderedDict
from collections.abc import Mapping
class OrderedSet:
"""
A set which keeps the ordering of the inserted items.
Currently backs onto OrderedDict.
"""
def __init__(self, iterable=None):
self.dict = OrderedDict.fromkeys(iterable or ())
def add(self, item):
self.dict[item] = None
def remove(self, item):
del self.dict[item]
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
def __iter__(self):
return iter(self.dict)
def __contains__(self, item):
return item in self.dict
def __bool__(self):
return bool(self.dict)
def __len__(self):
return len(self.dict)
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super().__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, super().__repr__())
def __getitem__(self, key):
"""
Return the last data value for this key, or [] if it's an empty list;
raise KeyError if not found.
"""
try:
list_ = super().__getitem__(key)
except KeyError:
raise MultiValueDictKeyError(key)
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super().__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo):
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
return {**self.__dict__, '_data': {k: self._getlist(k) for k in self}}
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def _getlist(self, key, default=None, force_list=False):
"""
Return a list of values for the key.
Used internally to manipulate values list. If force_list is True,
return a new copy of values.
"""
try:
values = super().__getitem__(key)
except KeyError:
if default is None:
return []
return default
else:
if force_list:
values = list(values) if values is not None else None
return values
def getlist(self, key, default=None):
"""
Return the list of values for the key. If key doesn't exist, return a
default value.
"""
return self._getlist(key, default, force_list=True)
def setlist(self, key, list_):
super().__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
# Do not return default here because __setitem__() may store
# another value -- QueryDict.__setitem__() does. Look it up.
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
# Do not return default_list here because setlist() may store
# another value -- QueryDict.setlist() does. Look it up.
return self._getlist(key)
def appendlist(self, key, value):
"""Append an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def items(self):
"""
Yield (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def lists(self):
"""Yield (key, list) pairs."""
return iter(super().items())
def values(self):
"""Yield the last value on every key list."""
for key in self:
yield self[key]
def copy(self):
"""Return a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""Extend rather than replace existing key lists."""
if len(args) > 1:
raise TypeError("update expected at most 1 argument, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.items():
self.setlistdefault(key).append(value)
def dict(self):
"""Return current object as a dict with singular values."""
return {key: self[key] for key in self}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, warning='ImmutableList object is immutable.', **kwargs):
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wrap accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super().__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieve the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
use_func = key.startswith(self.prefix)
if use_func:
key = key[len(self.prefix):]
value = super().__getitem__(key)
if use_func:
return self.func(value)
return value
def _destruct_iterable_mapping_values(data):
for i, elem in enumerate(data):
if len(elem) != 2:
raise ValueError(
'dictionary update sequence element #{} has '
'length {}; 2 is required.'.format(i, len(elem))
)
if not isinstance(elem[0], str):
raise ValueError('Element key %r invalid, only strings are allowed' % elem[0])
yield tuple(elem)
class CaseInsensitiveMapping(Mapping):
"""
Mapping allowing case-insensitive key lookups. Original case of keys is
preserved for iteration and string representation.
Example::
>>> ci_map = CaseInsensitiveMapping({'name': 'Jane'})
>>> ci_map['Name']
Jane
>>> ci_map['NAME']
Jane
>>> ci_map['name']
Jane
>>> ci_map # original case preserved
{'name': 'Jane'}
"""
def __init__(self, data):
if not isinstance(data, Mapping):
data = {k: v for k, v in _destruct_iterable_mapping_values(data)}
self._store = {k.lower(): (k, v) for k, v in data.items()}
def __getitem__(self, key):
return self._store[key.lower()][1]
def __len__(self):
return len(self._store)
def __eq__(self, other):
return isinstance(other, Mapping) and {
k.lower(): v for k, v in self.items()
} == {
k.lower(): v for k, v in other.items()
}
def __iter__(self):
return (original_key for original_key, value in self._store.values())
def __repr__(self):
return repr({key: value for key, value in self._store.values()})
def copy(self):
return self
|
edac52f8da1abb4796d72bc57b8b5e0b972cefbe1e52aadc4925a37fed6d9a5e | import copy
import os
from importlib import import_module
from importlib.util import find_spec as importlib_find
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError as err:
raise ImportError("%s doesn't look like a module path" % dotted_path) from err
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as err:
raise ImportError('Module "%s" does not define a "%s" attribute/class' % (
module_path, class_name)
) from err
def autodiscover_modules(*args, **kwargs):
"""
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
"""
from django.apps import apps
register_to = kwargs.get('register_to')
for app_config in apps.get_app_configs():
for module_to_search in args:
# Attempt to import the app's module.
try:
if register_to:
before_import_registry = copy.copy(register_to._registry)
import_module('%s.%s' % (app_config.name, module_to_search))
except Exception:
# Reset the registry to the state before the last import
# as this import will have to reoccur on the next request and
# this could raise NotRegistered and AlreadyRegistered
# exceptions (see #8245).
if register_to:
register_to._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have the module in question, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module_to_search):
raise
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
try:
package_name = package.__name__
package_path = package.__path__
except AttributeError:
# package isn't a package.
return False
full_module_name = package_name + '.' + module_name
try:
return importlib_find(full_module_name, package_path) is not None
except (ImportError, AttributeError):
# When module_name is an invalid dotted path, Python raises ImportError
# (or ModuleNotFoundError in Python 3.6+). AttributeError may be raised
# if the penultimate part of the path is not a package.
# (https://bugs.python.org/issue30436)
return False
def module_dir(module):
"""
Find the name of the directory that contains a module, if possible.
Raise ValueError otherwise, e.g. for namespace packages that are split
over several directories.
"""
# Convert to list because _NamespacePath does not support indexing on 3.3.
paths = list(getattr(module, '__path__', []))
if len(paths) == 1:
return paths[0]
else:
filename = getattr(module, '__file__', None)
if filename is not None:
return os.path.dirname(filename)
raise ValueError("Cannot determine directory containing %s" % module)
|
a144411117ba593129d19200e6155a8d4ad26579665dcbcf1ecb615cf6be30d6 | """HTML utilities suitable for global use."""
import json
import re
from html.parser import HTMLParser
from urllib.parse import (
parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit,
)
from django.utils.functional import Promise, keep_lazy, keep_lazy_text
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils.text import normalize_newlines
# Configuration for urlize() function.
TRAILING_PUNCTUATION_CHARS = '.,:;!'
WRAPPING_PUNCTUATION = [('(', ')'), ('[', ']')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'''([\s<>"']+)''')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
_html_escapes = {
ord('&'): '&',
ord('<'): '<',
ord('>'): '>',
ord('"'): '"',
ord("'"): ''',
}
@keep_lazy(str, SafeText)
def escape(text):
"""
Return the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
Always escape input, even if it's already escaped and marked as such.
This may result in double-escaping. If this is a concern, use
conditional_escape() instead.
"""
return mark_safe(str(text).translate(_html_escapes))
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('`'): '\\u0060',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
@keep_lazy(str, SafeText)
def escapejs(value):
"""Hex encode characters for use in JavaScript strings."""
return mark_safe(str(value).translate(_js_escapes))
_json_script_escapes = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
}
def json_script(value, element_id):
"""
Escape all the HTML/XML special characters with their unicode escapes, so
value is safe to be output anywhere except for inside a tag attribute. Wrap
the escaped JSON in a script tag.
"""
from django.core.serializers.json import DjangoJSONEncoder
json_str = json.dumps(value, cls=DjangoJSONEncoder).translate(_json_script_escapes)
return format_html(
'<script id="{}" type="application/json">{}</script>',
element_id, mark_safe(json_str)
)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if isinstance(text, Promise):
text = str(text)
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but pass all arguments through conditional_escape(),
and call mark_safe() on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *args)
for args in args_generator
))
@keep_lazy_text
def linebreaks(value, autoescape=False):
"""Convert newlines into <p> and <br>s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', str(value))
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br>') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br>') for p in paras]
return '\n\n'.join(paras)
class MLStripper(HTMLParser):
def __init__(self):
super().__init__(convert_charrefs=False)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
s.feed(value)
s.close()
return s.get_data()
@keep_lazy_text
def strip_tags(value):
"""Return the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
value = str(value)
while '<' in value and '>' in value:
new_value = _strip_once(value)
if len(new_value) >= len(value):
# _strip_once was not able to detect more tags
break
value = new_value
return value
@keep_lazy_text
def strip_spaces_between_tags(value):
"""Return the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', str(value))
def smart_urlquote(url):
"""Quote a URL if it isn't already quoted."""
def unquote_quote(segment):
segment = unquote(segment)
# Tilde is part of RFC3986 Unreserved Characters
# https://tools.ietf.org/html/rfc3986#section-2.3
# See also https://bugs.python.org/issue16285
return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + '~')
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(q[0]), unquote(q[1]))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
@keep_lazy_text
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Convert any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, truncate the URLs in the link text longer
than this limit to trim_url_limit - 1 characters and append an ellipsis.
If nofollow is True, give the links a rel="nofollow" attribute.
If autoescape is True, autoescape the link text and URLs.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s…' % x[:max(0, limit - 1)]
def unescape(text):
"""
If input URL is HTML-escaped, unescape it so that it can be safely fed
to smart_urlquote. For example:
http://example.com?x=1&y=<2> => http://example.com?x=1&y=<2>
"""
return text.replace('&', '&').replace('<', '<').replace(
'>', '>').replace('"', '"').replace(''', "'")
def trim_punctuation(lead, middle, trail):
"""
Trim trailing and wrapping punctuation from `middle`. Return the items
of the new state.
"""
# Continue trimming until middle remains unchanged.
trimmed_something = True
while trimmed_something:
trimmed_something = False
# Trim wrapping punctuation.
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead += opening
trimmed_something = True
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
trimmed_something = True
# Trim trailing punctuation (after trimming wrapping punctuation,
# as encoded entities contain ';'). Unescape entites to avoid
# breaking them by removing ';'.
middle_unescaped = unescape(middle)
stripped = middle_unescaped.rstrip(TRAILING_PUNCTUATION_CHARS)
if middle_unescaped != stripped:
trail = middle[len(stripped):] + trail
middle = middle[:len(stripped) - len(middle_unescaped)]
trimmed_something = True
return lead, middle, trail
def is_email_simple(value):
"""Return True if value looks like an email address."""
# An @ must be in the middle of the value.
if '@' not in value or value.startswith('@') or value.endswith('@'):
return False
try:
p1, p2 = value.split('@')
except ValueError:
# value contains more than one @.
return False
# Dot must be in p2 (e.g. example.com)
if '.' not in p2 or p2.startswith('.'):
return False
return True
words = word_split_re.split(str(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# lead: Current punctuation trimmed from the beginning of the word.
# middle: Current state of the word.
# trail: Current punctuation trimmed from the end of the word.
lead, middle, trail = '', word, ''
# Deal with punctuation.
lead, middle, trail = trim_punctuation(lead, middle, trail)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote(unescape(middle))
elif simple_url_2_re.match(middle):
url = smart_urlquote('http://%s' % unescape(middle))
elif ':' not in middle and is_email_simple(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeText.
"""
if '__html__' in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if '__str__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
|
079c82651264660ddc1ef7e41e11f005f106ea1a0f2f9ec9ddf5441d1173494c | """
Functions for working with "safe strings": strings that can be displayed safely
without further escaping in HTML. Marking something as a "safe string" means
that the producer of the string has already turned characters that should not
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
"""
from django.utils.functional import wraps
class SafeData:
def __html__(self):
"""
Return the html representation of a string for interoperability.
This allows other template engines to understand Django's SafeData.
"""
return self
class SafeBytes(bytes, SafeData):
"""
A bytes subclass that has been specifically marked as "safe" (requires no
further escaping) for HTML output purposes.
Kept in Django 2.0 for usage by apps supporting Python 2. Shouldn't be used
in Django anymore.
"""
def __add__(self, rhs):
"""
Concatenating a safe byte string with another safe byte string or safe
string is safe. Otherwise, the result is no longer safe.
"""
t = super().__add__(rhs)
if isinstance(rhs, SafeText):
return SafeText(t)
elif isinstance(rhs, SafeBytes):
return SafeBytes(t)
return t
class SafeText(str, SafeData):
"""
A str subclass that has been specifically marked as "safe" for HTML output
purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe string with another safe byte string or
safe string is safe. Otherwise, the result is no longer safe.
"""
t = super().__add__(rhs)
if isinstance(rhs, SafeData):
return SafeText(t)
return t
def __str__(self):
return self
SafeString = SafeText
def _safety_decorator(safety_marker, func):
@wraps(func)
def wrapped(*args, **kwargs):
return safety_marker(func(*args, **kwargs))
return wrapped
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string is appropriate.
If used on a method as a decorator, mark the returned data as safe.
Can be called multiple times on a single string.
"""
if hasattr(s, '__html__'):
return s
if callable(s):
return _safety_decorator(mark_safe, s)
return SafeText(s)
|
495f01e3d2ca25660c4cb3d24ee76a9b841d44d65cdb2a2c7ca1c008b7efce18 | import copy
import itertools
import operator
from functools import total_ordering, wraps
from django.utils.version import PY36, get_docs_version
# You can't trivially replace this with `functools.partial` because this binds
# to classes and returns bound instances, whereas functools.partial (on
# CPython) is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*args, *moreargs, **{**kwargs, **morekwargs})
return _curried
class cached_property:
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
A cached property can be made out of an existing method:
(e.g. ``url = cached_property(get_absolute_url)``).
On Python < 3.6, the optional ``name`` argument must be provided, e.g.
``url = cached_property(get_absolute_url, name='url')``.
"""
name = None
@staticmethod
def func(instance):
raise TypeError(
'Cannot use cached_property instance without calling '
'__set_name__() on it.'
)
@staticmethod
def _is_mangled(name):
return name.startswith('__') and not name.endswith('__')
def __init__(self, func, name=None):
if PY36:
self.real_func = func
else:
func_name = func.__name__
name = name or func_name
if not (isinstance(name, str) and name.isidentifier()):
raise ValueError(
"%r can't be used as the name of a cached_property." % name,
)
if self._is_mangled(name):
raise ValueError(
'cached_property does not work with mangled methods on '
'Python < 3.6 without the appropriate `name` argument. See '
'https://docs.djangoproject.com/en/%s/ref/utils/'
'#cached-property-mangled-name' % get_docs_version(),
)
self.name = name
self.func = func
self.__doc__ = getattr(func, '__doc__')
def __set_name__(self, owner, name):
if self.name is None:
self.name = name
self.func = self.real_func
elif name != self.name:
raise TypeError(
"Cannot assign the same cached_property to two different names "
"(%r and %r)." % (self.name, name)
)
def __get__(self, instance, cls=None):
"""
Call the function and put the return value in instance.__dict__ so that
subsequent attribute access on the instance returns the cached value
instead of calling cached_property.__get__().
"""
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
class Promise:
"""
Base class for the proxy class created in the closure of the lazy function.
It's used to recognize promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turn any callable into a lazy evaluated callable. result classes or types
is required -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__prepared = False
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if not self.__prepared:
self.__prepare_class__()
self.__prepared = True
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
def __repr__(self):
return repr(self.__cast())
@classmethod
def __prepare_class__(cls):
for resultclass in resultclasses:
for type_ in resultclass.mro():
for method_name in type_.__dict__:
# All __promise__ return the same wrapper method, they
# look up the correct implementation when called.
if hasattr(cls, method_name):
continue
meth = cls.__promise__(method_name)
setattr(cls, method_name, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = str in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), (
"Cannot call lazy() with both bytes and text return types.")
if cls._delegate_text:
cls.__str__ = cls.__text_cast
elif cls._delegate_bytes:
cls.__bytes__ = cls.__bytes_cast
@classmethod
def __promise__(cls, method_name):
# Builds a wrapper around some magic method
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
return getattr(res, method_name)(*args, **kw)
return __wrapper__
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __bytes_cast_encoded(self):
return func(*self.__args, **self.__kw).encode()
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __str__(self):
# object defines __str__(), so __prepare_class__() won't overload
# a __str__() method from the proxied class.
return str(self.__cast())
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_text:
return str(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def lazystr(text):
"""
Shortcut for the common case of a lazy callable that returns str.
"""
return lazy(str, str)(text)
def keep_lazy(*resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
if not resultclasses:
raise TypeError("You must pass at least one argument to keep_lazy().")
def decorator(func):
lazy_func = lazy(func, *resultclasses)
@wraps(func)
def wrapper(*args, **kwargs):
if any(isinstance(arg, Promise) for arg in itertools.chain(args, kwargs.values())):
return lazy_func(*args, **kwargs)
return func(*args, **kwargs)
return wrapper
return decorator
def keep_lazy_text(func):
"""
A decorator for functions that accept lazy arguments and return text.
"""
return keep_lazy(str)(func)
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject:
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
# Avoid infinite recursion when tracing __init__ (#19456).
_wrapped = None
def __init__(self):
# Note: if a subclass overrides __init__(), it will likely need to
# override __copy__() and __deepcopy__() as well.
self._wrapped = empty
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialize the wrapped object.
"""
raise NotImplementedError('subclasses of LazyObject must provide a _setup() method')
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. We're going to have to initialize the wrapped
# object to successfully pickle it, so we might as well just pickle the
# wrapped object since they're supposed to act the same way.
#
# Unfortunately, if we try to simply act like the wrapped object, the ruse
# will break down when pickle gets our id(). Thus we end up with pickle
# thinking, in effect, that we are a distinct object from the wrapped
# object, but with the same __dict__. This can cause problems (see #25389).
#
# So instead, we define our own __reduce__ method and custom unpickler. We
# pickle the wrapped object as the unpickler's argument, so that pickle
# will pickle it normally, and then the unpickler simply returns its
# argument.
def __reduce__(self):
if self._wrapped is empty:
self._setup()
return (unpickle_lazyobject, (self._wrapped,))
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use type(self), not
# self.__class__, because the latter is proxied.
return type(self)()
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use type(self), not self.__class__, because the
# latter is proxied.
result = type(self)()
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
__bool__ = new_method_proxy(bool)
# Introspection support
__dir__ = new_method_proxy(dir)
# Need to pretend to be the wrapped class, for the sake of objects that
# care about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__lt__ = new_method_proxy(operator.lt)
__gt__ = new_method_proxy(operator.gt)
__ne__ = new_method_proxy(operator.ne)
__hash__ = new_method_proxy(hash)
# List/Tuple/Dictionary methods support
__getitem__ = new_method_proxy(operator.getitem)
__setitem__ = new_method_proxy(operator.setitem)
__delitem__ = new_method_proxy(operator.delitem)
__iter__ = new_method_proxy(iter)
__len__ = new_method_proxy(len)
__contains__ = new_method_proxy(operator.contains)
def unpickle_lazyobject(wrapped):
"""
Used to unpickle lazy objects. Just return its argument, which will be the
wrapped object.
"""
return wrapped
class SimpleLazyObject(LazyObject):
"""
A lazy object initialized from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
super().__init__()
def _setup(self):
self._wrapped = self._setupfunc()
# Return a meaningful representation of the lazy object for debugging
# without evaluating the wrapped object.
def __repr__(self):
if self._wrapped is empty:
repr_attr = self._setupfunc
else:
repr_attr = self._wrapped
return '<%s: %r>' % (type(self).__name__, repr_attr)
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use SimpleLazyObject, not
# self.__class__, because the latter is proxied.
return SimpleLazyObject(self._setupfunc)
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
def partition(predicate, values):
"""
Split the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
|
6a3d81de80e8672133a9bf12094ed5be3d8e85af6bf1d099f992aef42dceb998 | import base64
import calendar
import datetime
import re
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.functional import keep_lazy_text
# based on RFC 7232, Appendix C
ETAG_MATCH = re.compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
query_params = []
for key, value in query:
if value is None:
raise TypeError(
'Cannot encode None in a query string. Did you mean to pass '
'an empty string or omit the value?'
)
elif isinstance(value, (str, bytes)):
query_val = value
else:
try:
itr = iter(value)
except TypeError:
query_val = value
else:
# Consume generators and iterators, even when doseq=True, to
# work around https://bugs.python.org/issue31706.
query_val = []
for item in itr:
if item is None:
raise TypeError(
'Cannot encode None in a query string. Did you '
'mean to pass an empty string or omit the value?'
)
elif not isinstance(item, bytes):
item = str(item)
query_val.append(item)
query_params.append((key, query_val))
return original_urlencode(query_params, doseq)
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring to a base64 string for use in URLs. Strip any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=').decode('ascii')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = s.encode()
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match.group(1) for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, allowed_hosts, require_https=False):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
elif isinstance(allowed_hosts, str):
allowed_hosts = {allowed_hosts}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (_is_safe_url(url, allowed_hosts, require_https=require_https) and
_is_safe_url(url.replace('\\', '/'), allowed_hosts, require_https=require_https))
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _is_safe_url(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if nv[1] or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
def escape_leading_slashes(url):
"""
If redirecting to an absolute path (two leading slashes), a slash must be
escaped to prevent browsers from handling the path as schemaless and
redirecting to another host.
"""
if url.startswith('//'):
url = '/%2F{}'.format(url[2:])
return url
|
3bc4dd47b7f45595092ed109f3046e014e0edf4683b7b8d10493ded2e271e855 | import os
import tempfile
from os.path import abspath, dirname, join, normcase, sep
from django.core.exceptions import SuspiciousFileOperation
# For backwards-compatibility in Django 2.0
abspathu = abspath
def upath(path):
"""Always return a unicode path (did something for Python 2)."""
return path
def npath(path):
"""
Always return a native path, that is unicode on Python 3 and bytestring on
Python 2. Noop for Python 3.
"""
return path
def safe_join(base, *paths):
"""
Join one or more path components to the base path component intelligently.
Return a normalized, absolute version of the final path.
Raise ValueError if the final path isn't located inside of the base path
component.
"""
final_path = abspath(join(base, *paths))
base_path = abspath(base)
# Ensure final_path starts with base_path (using normcase to ensure we
# don't false-negative on case insensitive operating systems like Windows),
# further, one of the following conditions must be true:
# a) The next character is the path separator (to prevent conditions like
# safe_join("/dir", "/../d"))
# b) The final path must be the same as the base path.
# c) The base path must be the most root path (meaning either "/" or "C:\\")
if (not normcase(final_path).startswith(normcase(base_path + sep)) and
normcase(final_path) != normcase(base_path) and
dirname(normcase(base_path)) != normcase(base_path)):
raise SuspiciousFileOperation(
'The joined path ({}) is located outside of the base path '
'component ({})'.format(final_path, base_path))
return final_path
def symlinks_supported():
"""
Return whether or not creating symlinks are supported in the host platform
and/or if they are allowed to be created (e.g. on Windows it requires admin
permissions).
"""
with tempfile.TemporaryDirectory() as temp_dir:
original_path = os.path.join(temp_dir, 'original')
symlink_path = os.path.join(temp_dir, 'symlink')
os.makedirs(original_path)
try:
os.symlink(original_path, symlink_path)
supported = True
except (OSError, NotImplementedError):
supported = False
return supported
|
807e0fb4f3efda4a10c45eb3e433c80354522e99f4ef81bc4645c079c7baa303 | """
This is the Django template system.
How it works:
The Lexer.tokenize() method converts a template string (i.e., a string
containing markup with custom template tags) to tokens, which can be either
plain text (TokenType.TEXT), variables (TokenType.VAR), or block statements
(TokenType.BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
Usage:
The only thing you should ever use directly in this file is the Template class.
Create a compiled template object with a template_string, then call render()
with a context. In the compilation stage, the TemplateSyntaxError exception
will be raised if the template doesn't have proper syntax.
Sample code:
>>> from django import template
>>> s = '<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>'
>>> t = template.Template(s)
(t is now a compiled template, and its render() method can be called multiple
times with multiple contexts)
>>> c = template.Context({'test':True, 'varvalue': 'Hello'})
>>> t.render(c)
'<html><h1>Hello</h1></html>'
>>> c = template.Context({'test':False, 'varvalue': 'Hello'})
>>> t.render(c)
'<html></html>'
"""
import logging
import re
from enum import Enum
from inspect import getcallargs, getfullargspec, unwrap
from django.template.context import ( # NOQA: imported for backwards compatibility
BaseContext, Context, ContextPopException, RequestContext,
)
from django.utils.formats import localize
from django.utils.html import conditional_escape, escape
from django.utils.safestring import SafeData, mark_safe
from django.utils.text import (
get_text_list, smart_split, unescape_string_literal,
)
from django.utils.timezone import template_localtime
from django.utils.translation import gettext_lazy, pgettext_lazy
from .exceptions import TemplateSyntaxError
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
logger = logging.getLogger('django.template')
class TokenType(Enum):
TEXT = 0
VAR = 1
BLOCK = 2
COMMENT = 3
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % self.params
class Origin:
def __init__(self, name, template_name=None, loader=None):
self.name = name
self.template_name = template_name
self.loader = loader
def __str__(self):
return self.name
def __eq__(self, other):
return (
isinstance(other, Origin) and
self.name == other.name and
self.loader == other.loader
)
@property
def loader_name(self):
if self.loader:
return '%s.%s' % (
self.loader.__module__, self.loader.__class__.__name__,
)
class Template:
def __init__(self, template_string, origin=None, name=None, engine=None):
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if origin is None:
origin = Origin(UNKNOWN_SOURCE)
self.name = name
self.origin = origin
self.engine = engine
self.source = str(template_string) # May be lazy.
self.nodelist = self.compile_nodelist()
def __iter__(self):
for node in self.nodelist:
yield from node
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
with context.render_context.push_state(self):
if context.template is None:
with context.bind_template(self):
context.template_name = self.name
return self._render(context)
else:
return self._render(context)
def compile_nodelist(self):
"""
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
is annotated with contextual line information where it occurred in the
template source.
"""
if self.engine.debug:
lexer = DebugLexer(self.source)
else:
lexer = Lexer(self.source)
tokens = lexer.tokenize()
parser = Parser(
tokens, self.engine.template_libraries, self.engine.template_builtins,
self.origin,
)
try:
return parser.parse()
except Exception as e:
if self.engine.debug:
e.template_debug = self.get_exception_info(e, e.token)
raise
def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = escape(self.source[upto:start])
during = escape(self.source[start:end])
after = escape(self.source[end:next])
source_lines.append((num, escape(self.source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# string.
try:
message = str(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = '(Could not get exception message)'
return {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': self.origin.name,
'start': start,
'end': end,
}
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class Token:
def __init__(self, token_type, contents, position=None, lineno=None):
"""
A token representing a string from the template.
token_type
A TokenType, either .TEXT, .VAR, .BLOCK, or .COMMENT.
contents
The token source string.
position
An optional tuple containing the start and end index of the token
in the template source. This is used for traceback information
when debug is on.
lineno
The line number the token appears on in the template source.
This is used for traceback information and gettext files.
"""
self.token_type, self.contents = token_type, contents
self.lineno = lineno
self.position = position
def __str__(self):
token_name = self.token_type.name.capitalize()
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = smart_split(self.contents)
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith(('_("', "_('")):
sentinel = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinel):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer:
def __init__(self, template_string):
self.template_string = template_string
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
lineno = 1
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, None, lineno, in_tag))
in_tag = not in_tag
lineno += bit.count('\n')
return result
def create_token(self, token_string, position, lineno, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
return Token(TokenType.VAR, token_string[2:-2].strip(), position, lineno)
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
return Token(TokenType.BLOCK, block_content, position, lineno)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
return Token(TokenType.COMMENT, content, position, lineno)
else:
return Token(TokenType.TEXT, token_string, position, lineno)
class DebugLexer(Lexer):
def tokenize(self):
"""
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so only use it when debug is True.
"""
lineno = 1
result = []
upto = 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
token_string = self.template_string[upto:start]
result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False))
lineno += token_string.count('\n')
upto = start
token_string = self.template_string[start:end]
result.append(self.create_token(token_string, (start, end), lineno, in_tag=True))
lineno += token_string.count('\n')
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False))
return result
class Parser:
def __init__(self, tokens, libraries=None, builtins=None, origin=None):
self.tokens = tokens
self.tags = {}
self.filters = {}
self.command_stack = []
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.libraries = libraries
for builtin in builtins:
self.add_library(builtin)
self.origin = origin
def parse(self, parse_until=None):
"""
Iterate through the parser tokens and compiles each one into a node.
If parse_until is provided, parsing will stop once one of the
specified tokens has been reached. This is formatted as a list of
tokens, e.g. ['elif', 'else', 'endif']. If no matching token is
reached, raise an exception with the unclosed block tag details.
"""
if parse_until is None:
parse_until = []
nodelist = NodeList()
while self.tokens:
token = self.next_token()
# Use the raw values here for TokenType.* for a tiny performance boost.
if token.token_type.value == 0: # TokenType.TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type.value == 1: # TokenType.VAR
if not token.contents:
raise self.error(token, 'Empty variable tag on line %d' % token.lineno)
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
raise self.error(token, e)
var_node = VariableNode(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type.value == 2: # TokenType.BLOCK
try:
command = token.contents.split()[0]
except IndexError:
raise self.error(token, 'Empty block tag on line %d' % token.lineno)
if command in parse_until:
# A matching token has been reached. Return control to
# the caller. Put the token back on the token list so the
# caller knows where it terminated.
self.prepend_token(token)
return nodelist
# Add the token to the command stack. This is used for error
# messages if further parsing fails due to an unclosed block
# tag.
self.command_stack.append((command, token))
# Get the tag callback function from the ones registered with
# the parser.
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
# Compile the callback into a node object and add it to
# the node list.
try:
compiled_result = compile_func(self, token)
except Exception as e:
raise self.error(token, e)
self.extend_nodelist(nodelist, compiled_result, token)
# Compile success. Remove the token from the command stack.
self.command_stack.pop()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TokenType.BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def extend_nodelist(self, nodelist, node, token):
# Check that non-text nodes don't appear before an extends tag.
if node.must_be_first and nodelist.contains_nontext:
raise self.error(
token, '%r must be the first tag in the template.' % node,
)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
# Set origin and token here since we can't modify the node __init__()
# method.
node.token = token
node.origin = self.origin
nodelist.append(node)
def error(self, token, e):
"""
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
"""
if not isinstance(e, Exception):
e = TemplateSyntaxError(e)
if not hasattr(e, 'token'):
e.token = token
return e
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(
token,
"Invalid block tag on line %d: '%s', expected %s. Did you "
"forget to register or load this tag?" % (
token.lineno,
command,
get_text_list(["'%s'" % p for p in parse_until], 'or'),
),
)
raise self.error(
token,
"Invalid block tag on line %d: '%s'. Did you forget to register "
"or load this tag?" % (token.lineno, command)
)
def unclosed_block_tag(self, parse_until):
command, token = self.command_stack.pop()
msg = "Unclosed tag on line %d: '%s'. Looking for one of: %s." % (
token.lineno,
command,
', '.join(parse_until),
)
raise self.error(token, msg)
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': r'\w\.',
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.VERBOSE)
class FilterExpression:
"""
Parse a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
string_if_invalid = context.template.engine.string_if_invalid
if string_if_invalid:
if '%s' in string_if_invalid:
return string_if_invalid % self.var
else:
return string_if_invalid
else:
obj = string_if_invalid
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = unwrap(func)
args, _, _, defaults, _, _, _ = getfullargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
class Variable:
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':'News'}}
>>> Variable('article.section').resolve(c)
'News'
>>> Variable('article').resolve(c)
{'section': 'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = 'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, str):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
# Try to interpret values containing a period or an 'e'/'E'
# (possibly scientific notation) as a float; otherwise, try int.
if '.' in var or 'e' in var.lower():
self.literal = float(var)
# "2." is invalid
if var.endswith('.'):
raise ValueError
else:
self.literal = int(var)
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
is_safe = isinstance(value, SafeData)
msgid = value.replace('%', '%%')
msgid = mark_safe(msgid) if is_safe else msgid
if self.message_context:
return pgettext_lazy(self.message_context, msgid)
else:
return gettext_lazy(msgid)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Perform resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError):
# Reraise if the exception was raised by a @property
if not isinstance(current, BaseContext) and bit in dir(current):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
template_name = getattr(context, 'template_name', None) or 'unknown'
logger.debug(
"Exception while resolving variable '%s' in template '%s'.",
bit,
template_name,
exc_info=True,
)
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current
class Node:
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
token = None
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def render_annotated(self, context):
"""
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
"""
try:
return self.render(context)
except Exception as e:
if context.template.engine.debug and not hasattr(e, 'template_debug'):
e.template_debug = context.render_context.template.get_exception_info(e, self.token)
raise
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = node.render_annotated(context)
else:
bit = node
bits.append(str(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.s[:25])
def render(self, context):
return self.s
def render_value_in_context(value, context):
"""
Convert any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a string. If value is a
string, it's expected to already be translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
if context.autoescape:
if not issubclass(type(value), str):
value = str(value)
return conditional_escape(value)
else:
return str(value)
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
Parse token keyword arguments and return a dictionary of the arguments
retrieved from the ``bits`` token list.
`bits` is a list containing the remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments are removed from this
list.
`support_legacy` - if True, the legacy format ``1 as foo`` is accepted.
Otherwise, only the standard ``foo=1`` format is allowed.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so return the dictionary as soon as an invalid argument format
is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
|
3c90579039afdc0221e96ba06f215c50abb4d5728d0f1938dc12944532e123b8 | import functools
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from .base import Context, Template
from .context import _builtin_context_processors
from .exceptions import TemplateDoesNotExist
from .library import import_library
class Engine:
default_builtins = [
'django.template.defaulttags',
'django.template.defaultfilters',
'django.template.loader_tags',
]
def __init__(self, dirs=None, app_dirs=False, context_processors=None,
debug=False, loaders=None, string_if_invalid='',
file_charset='utf-8', libraries=None, builtins=None, autoescape=True):
if dirs is None:
dirs = []
if context_processors is None:
context_processors = []
if loaders is None:
loaders = ['django.template.loaders.filesystem.Loader']
if app_dirs:
loaders += ['django.template.loaders.app_directories.Loader']
if not debug:
loaders = [('django.template.loaders.cached.Loader', loaders)]
else:
if app_dirs:
raise ImproperlyConfigured(
"app_dirs must not be set when loaders is defined.")
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.dirs = dirs
self.app_dirs = app_dirs
self.autoescape = autoescape
self.context_processors = context_processors
self.debug = debug
self.loaders = loaders
self.string_if_invalid = string_if_invalid
self.file_charset = file_charset
self.libraries = libraries
self.template_libraries = self.get_template_libraries(libraries)
self.builtins = self.default_builtins + builtins
self.template_builtins = self.get_template_builtins(self.builtins)
@staticmethod
@functools.lru_cache()
def get_default():
"""
Return the first DjangoTemplates backend that's configured, or raise
ImproperlyConfigured if none are configured.
This is required for preserving historical APIs that rely on a
globally available, implicitly configured engine such as:
>>> from django.template import Context, Template
>>> template = Template("Hello {{ name }}!")
>>> context = Context({'name': "world"})
>>> template.render(context)
'Hello world!'
"""
# Since Engine is imported in django.template and since
# DjangoTemplates is a wrapper around this Engine class,
# local imports are required to avoid import loops.
from django.template import engines
from django.template.backends.django import DjangoTemplates
for engine in engines.all():
if isinstance(engine, DjangoTemplates):
return engine.engine
raise ImproperlyConfigured('No DjangoTemplates backend is configured.')
@cached_property
def template_context_processors(self):
context_processors = _builtin_context_processors
context_processors += tuple(self.context_processors)
return tuple(import_string(path) for path in context_processors)
def get_template_builtins(self, builtins):
return [import_library(x) for x in builtins]
def get_template_libraries(self, libraries):
loaded = {}
for name, path in libraries.items():
loaded[name] = import_library(path)
return loaded
@cached_property
def template_loaders(self):
return self.get_template_loaders(self.loaders)
def get_template_loaders(self, template_loaders):
loaders = []
for template_loader in template_loaders:
loader = self.find_template_loader(template_loader)
if loader is not None:
loaders.append(loader)
return loaders
def find_template_loader(self, loader):
if isinstance(loader, (tuple, list)):
loader, *args = loader
else:
args = []
if isinstance(loader, str):
loader_class = import_string(loader)
return loader_class(self, *args)
else:
raise ImproperlyConfigured(
"Invalid value in template loaders configuration: %r" % loader)
def find_template(self, name, dirs=None, skip=None):
tried = []
for loader in self.template_loaders:
try:
template = loader.get_template(name, skip=skip)
return template, template.origin
except TemplateDoesNotExist as e:
tried.extend(e.tried)
raise TemplateDoesNotExist(name, tried=tried)
def from_string(self, template_code):
"""
Return a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(template_code, engine=self)
def get_template(self, template_name):
"""
Return a compiled Template object for the given template name,
handling template inheritance recursively.
"""
template, origin = self.find_template(template_name)
if not hasattr(template, 'render'):
# template needs to be compiled
template = Template(template, origin, template_name, engine=self)
return template
def render_to_string(self, template_name, context=None):
"""
Render the template specified by template_name with the given context.
For use in Django's test suite.
"""
if isinstance(template_name, (list, tuple)):
t = self.select_template(template_name)
else:
t = self.get_template(template_name)
# Django < 1.8 accepted a Context in `context` even though that's
# unintended. Preserve this ability but don't rewrap `context`.
if isinstance(context, Context):
return t.render(context)
else:
return t.render(Context(context))
def select_template(self, template_name_list):
"""
Given a list of template names, return the first that can be loaded.
"""
if not template_name_list:
raise TemplateDoesNotExist("No template names provided")
not_found = []
for template_name in template_name_list:
try:
return self.get_template(template_name)
except TemplateDoesNotExist as exc:
if exc.args[0] not in not_found:
not_found.append(exc.args[0])
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(not_found))
|
0f02baa0207aa2f3fdfbf27cb5ff9cf9cf5cecb9c817635edb7b0242842e1013 | """Default variable filters."""
import random as random_module
import re
import types
from decimal import ROUND_HALF_UP, Context, Decimal, InvalidOperation
from functools import wraps
from operator import itemgetter
from pprint import pformat
from urllib.parse import quote
from django.utils import formats
from django.utils.dateformat import format, time_format
from django.utils.encoding import iri_to_uri
from django.utils.html import (
avoid_wrapping, conditional_escape, escape, escapejs,
json_script as _json_script, linebreaks, strip_tags, urlize as _urlize,
)
from django.utils.safestring import SafeData, mark_safe
from django.utils.text import (
Truncator, normalize_newlines, phone2numeric, slugify as _slugify, wrap,
)
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import gettext, ngettext
from .base import Variable, VariableDoesNotExist
from .library import Library
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive strings. The object
passed as the first positional argument will be converted to a string.
"""
def _dec(*args, **kwargs):
args = list(args)
args[0] = str(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Add slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalize the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encode characters for use in JavaScript strings."""
return escapejs(value)
@register.filter(is_safe=True)
def json_script(value, element_id):
"""
Output value JSON-encoded, wrapped in a <script type="application/json">
tag.
"""
return _json_script(value, element_id)
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Display a float to a specified number of decimal places.
If called without an argument, display the floating point number with one
decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, always display exactly arg number of decimal places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, display arg number of decimal places -- but only if
there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, display the string representation
of that value.
"""
try:
input_val = repr(text)
d = Decimal(input_val)
except InvalidOperation:
try:
d = Decimal(str(float(text)))
except (ValueError, InvalidOperation, TypeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format('%d' % (int(d)), 0))
exp = Decimal(1).scaleb(-abs(p))
# Set the precision high enough to avoid an exception (#15789).
tupl = d.as_tuple()
units = len(tupl[1])
units += -tupl[2] if m else tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`,
# and `exponent` from Decimal.as_tuple() directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP, Context(prec=prec)).as_tuple()
digits = [str(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escape an IRI value for use in a URL."""
return iri_to_uri(value)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=True):
"""Display text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = str(len(str(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Convert a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Return the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Convert to ASCII. Convert spaces to hyphens. Remove characters that aren't
alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip
leading and trailing whitespace.
"""
return _slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Format the variable according to the arg, a string formatting specifier.
This specifier uses Python string formatting syntax, with the exception
that the leading "%" is dropped.
See https://docs.python.org/library/stdtypes.html#printf-style-string-formatting
for documentation of Python string formatting.
"""
if isinstance(value, tuple):
value = str(value)
try:
return ("%" + str(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Convert a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub(r"\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""Truncate a string after `arg` number of characters."""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatechars_html(value, arg):
"""
Truncate HTML after `arg` number of chars.
Preserve newlines in the HTML.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).chars(length, html=True)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncate a string after `arg` number of words.
Remove newlines within the string.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' …')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncate HTML after `arg` number of words.
Preserve newlines in the HTML.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' …')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Convert a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escape a value for use in a URL.
The ``safe`` parameter determines the characters which should not be
escaped by Python's quote() function. If not provided, use the default safe
characters (but an empty string can be provided when *all* characters
should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return quote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=True):
"""Convert URLs in plain text into clickable links."""
return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=True):
"""
Convert URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True, autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Return the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""Wrap words at `arg` line length."""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""Left-align the value in a field of a given width."""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""Right-align the value in a field of a given width."""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Center the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""Remove all values of arg from the given string."""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""Mark the value as a string that should be auto-escaped."""
return conditional_escape(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escape a string's HTML. Return a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=True):
"""
Replace line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br>``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=True):
"""
Convert all newlines in a piece of plain text to HTML line breaks
(``<br>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br>'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""Mark the value as a string that should not be auto-escaped."""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Mark each element in the sequence,
individually, as safe, after converting them to strings. Return a list
with the results.
"""
return [mark_safe(obj) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strip all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
def _property_resolver(arg):
"""
When arg is convertible to float, behave like operator.itemgetter(arg)
Otherwise, behave like Variable(arg).resolve
>>> _property_resolver(1)('abc')
'b'
>>> _property_resolver('1')('abc')
Traceback (most recent call last):
...
TypeError: string indices must be integers
>>> class Foo:
... a = 42
... b = 3.14
... c = 'Hey!'
>>> _property_resolver('b')(Foo())
3.14
"""
try:
float(arg)
except ValueError:
return Variable(arg).resolve
else:
return itemgetter(arg)
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Given a list of dicts, return that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=_property_resolver(arg))
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Given a list of dicts, return that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=_property_resolver(arg), reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Return the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=True):
"""Join a list with a string, like Python's ``str.join(list)``."""
try:
if autoescape:
value = [conditional_escape(v) for v in value]
data = conditional_escape(arg).join(value)
except TypeError: # Fail silently if arg isn't iterable.
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"""Return the last item in a list."""
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=False)
def length(value):
"""Return the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return 0
@register.filter(is_safe=False)
def length_is(value, arg):
"""Return a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Return a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Return a slice of the list using the same syntax as Python's list slicing.
"""
try:
bits = []
for x in str(arg).split(':'):
if not x:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=True):
"""
Recursively take a self-nested list and return an HTML unordered list --
WITHOUT opening and closing <ul> tags.
Assume the list is in the proper format. For example, if ``var`` contains:
``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``, then
``{{ var|unordered_list }}`` returns::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
def escaper(x):
return x
def walk_items(item_list):
item_iterator = iter(item_list)
try:
item = next(item_iterator)
while True:
try:
next_item = next(item_iterator)
except StopIteration:
yield item, None
break
if isinstance(next_item, (list, tuple, types.GeneratorType)):
try:
iter(next_item)
except TypeError:
pass
else:
yield item, next_item
item = next(item_iterator)
continue
yield item, None
item = next_item
except StopIteration:
pass
def list_formatter(item_list, tabs=1):
indent = '\t' * tabs
output = []
for item, children in walk_items(item_list):
sublist = ''
if children:
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (
indent, list_formatter(children, tabs + 1), indent, indent)
output.append('%s<li>%s%s</li>' % (
indent, escaper(item), sublist))
return '\n'.join(output)
return mark_safe(list_formatter(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Add the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, return the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Return the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Format a date according to the given format."""
if value in (None, ''):
return ''
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Format a time according to the given format."""
if value in (None, ''):
return ''
try:
return formats.time_format(value, arg)
except (AttributeError, TypeError):
try:
return time_format(value, arg)
except (AttributeError, TypeError):
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Format a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Format a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Return True if the value is divisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false, and (optionally) None,
return one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = gettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes_):
"""
Format the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc.).
"""
try:
bytes_ = float(bytes_)
except (TypeError, ValueError, UnicodeDecodeError):
value = ngettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
return avoid_wrapping(value)
def filesize_number_format(value):
return formats.number_format(round(value, 1), 1)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
negative = bytes_ < 0
if negative:
bytes_ = -bytes_ # Allow formatting of negative numbers.
if bytes_ < KB:
value = ngettext("%(size)d byte", "%(size)d bytes", bytes_) % {'size': bytes_}
elif bytes_ < MB:
value = gettext("%s KB") % filesize_number_format(bytes_ / KB)
elif bytes_ < GB:
value = gettext("%s MB") % filesize_number_format(bytes_ / MB)
elif bytes_ < TB:
value = gettext("%s GB") % filesize_number_format(bytes_ / GB)
elif bytes_ < PB:
value = gettext("%s TB") % filesize_number_format(bytes_ / TB)
else:
value = gettext("%s PB") % filesize_number_format(bytes_ / PB)
if negative:
value = "-%s" % value
return avoid_wrapping(value)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Return a plural suffix if the value is not 1. By default, use 's' as the
suffix:
* If value is 0, vote{{ value|pluralize }} display "votes".
* If value is 1, vote{{ value|pluralize }} display "vote".
* If value is 2, vote{{ value|pluralize }} display "votes".
If an argument is provided, use that string instead:
* If value is 0, class{{ value|pluralize:"es" }} display "classes".
* If value is 1, class{{ value|pluralize:"es" }} display "class".
* If value is 2, class{{ value|pluralize:"es" }} display "classes".
If the provided argument contains a comma, use the text before the comma
for the singular case and the text after the comma for the plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} display "candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} display "candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} display "candies".
"""
if ',' not in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
if float(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Take a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, e)
|
8fcb8bd780eed456a263eb4af3b6044116f532c612444aa92e35b1910cda5553 | import re
from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.mail import mail_managers
from django.http import HttpResponsePermanentRedirect
from django.urls import is_valid_path
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import escape_leading_slashes
class CommonMiddleware(MiddlewareMixin):
"""
"Common" middleware for taking care of some basic operations:
- Forbid access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
append missing slashes and/or prepends missing "www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, form a new URL by
appending a slash at the end. If this new URL is found in
urlpatterns, return an HTTP redirect to this new URL; otherwise
process the initial URL as usual.
This behavior can be customized by subclassing CommonMiddleware and
overriding the response_redirect_class attribute.
"""
response_redirect_class = HttpResponsePermanentRedirect
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
raise PermissionDenied('Forbidden user agent')
# Check for a redirect based on settings.PREPEND_WWW
host = request.get_host()
must_prepend = settings.PREPEND_WWW and host and not host.startswith('www.')
redirect_url = ('%s://www.%s' % (request.scheme, host)) if must_prepend else ''
# Check if a slash should be appended
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
# Return a redirect if necessary
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
def should_redirect_with_slash(self, request):
"""
Return True if settings.APPEND_SLASH is True and appending a slash to
the request path turns an invalid path into a valid one.
"""
if settings.APPEND_SLASH and not request.path_info.endswith('/'):
urlconf = getattr(request, 'urlconf', None)
return (
not is_valid_path(request.path_info, urlconf) and
is_valid_path('%s/' % request.path_info, urlconf)
)
return False
def get_full_path_with_slash(self, request):
"""
Return the full path of the request with a trailing slash appended.
Raise a RuntimeError if settings.DEBUG is True and request.method is
POST, PUT, or PATCH.
"""
new_path = request.get_full_path(force_append_slash=True)
# Prevent construction of scheme relative urls.
new_path = escape_leading_slashes(new_path)
if settings.DEBUG and request.method in ('POST', 'PUT', 'PATCH'):
raise RuntimeError(
"You called this URL via %(method)s, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining %(method)s data. "
"Change your form to point to %(url)s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django settings." % {
'method': request.method,
'url': request.get_host() + new_path,
}
)
return new_path
def process_response(self, request, response):
"""
When the status code of the response is 404, it may redirect to a path
with an appended slash if should_redirect_with_slash() returns True.
"""
# If the given URL is "Not Found", then check if we should redirect to
# a path with a slash appended.
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(self.get_full_path_with_slash(request))
# Add the Content-Length header to non-streaming responses if not
# already set.
if not response.streaming and not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
return response
class BrokenLinkEmailsMiddleware(MiddlewareMixin):
def process_response(self, request, response):
"""Send broken link emails for relevant 404 NOT FOUND responses."""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = request.META.get('HTTP_REFERER', '')
if not self.is_ignorable_request(request, path, domain, referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True,
)
return response
def is_internal_request(self, domain, referer):
"""
Return True if the referring URL is the same domain as the current
request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Return True if the given request *shouldn't* notify the site managers
according to project settings or in situations outlined by the inline
comments.
"""
# The referer is empty.
if not referer:
return True
# APPEND_SLASH is enabled and the referer is equal to the current URL
# without a trailing slash indicating an internal redirect.
if settings.APPEND_SLASH and uri.endswith('/') and referer == uri[:-1]:
return True
# A '?' in referer is identified as a search engine source.
if not self.is_internal_request(domain, referer) and '?' in referer:
return True
# The referer is equal to the current URL, ignoring the scheme (assumed
# to be a poorly implemented bot).
parsed_referer = urlparse(referer)
if parsed_referer.netloc in ['', domain] and parsed_referer.path == uri:
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
|
02686cbef25cc1c662e960ef7dfb09c8ce42afb3e8fd2d01cbcd877d5376aa45 | """
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
import logging
import re
import string
from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import DisallowedHost, ImproperlyConfigured
from django.urls import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import is_same_domain
from django.utils.log import log_response
logger = logging.getLogger('django.security.csrf')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed."
REASON_INSECURE_REFERER = "Referer checking failed - Referer is insecure while host is secure."
CSRF_SECRET_LENGTH = 32
CSRF_TOKEN_LENGTH = 2 * CSRF_SECRET_LENGTH
CSRF_ALLOWED_CHARS = string.ascii_letters + string.digits
CSRF_SESSION_KEY = '_csrftoken'
def _get_failure_view():
"""Return the view to be used for CSRF rejections."""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_string():
return get_random_string(CSRF_SECRET_LENGTH, allowed_chars=CSRF_ALLOWED_CHARS)
def _salt_cipher_secret(secret):
"""
Given a secret (assumed to be a string of CSRF_ALLOWED_CHARS), generate a
token by adding a salt and using it to encrypt the secret.
"""
salt = _get_new_csrf_string()
chars = CSRF_ALLOWED_CHARS
pairs = zip((chars.index(x) for x in secret), (chars.index(x) for x in salt))
cipher = ''.join(chars[(x + y) % len(chars)] for x, y in pairs)
return salt + cipher
def _unsalt_cipher_token(token):
"""
Given a token (assumed to be a string of CSRF_ALLOWED_CHARS, of length
CSRF_TOKEN_LENGTH, and that its first half is a salt), use it to decrypt
the second half to produce the original secret.
"""
salt = token[:CSRF_SECRET_LENGTH]
token = token[CSRF_SECRET_LENGTH:]
chars = CSRF_ALLOWED_CHARS
pairs = zip((chars.index(x) for x in token), (chars.index(x) for x in salt))
secret = ''.join(chars[x - y] for x, y in pairs) # Note negative values are ok
return secret
def _get_new_csrf_token():
return _salt_cipher_secret(_get_new_csrf_string())
def get_token(request):
"""
Return the CSRF token required for a POST form. The token is an
alphanumeric value. A new token is created if one is not already set.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
if "CSRF_COOKIE" not in request.META:
csrf_secret = _get_new_csrf_string()
request.META["CSRF_COOKIE"] = _salt_cipher_secret(csrf_secret)
else:
csrf_secret = _unsalt_cipher_token(request.META["CSRF_COOKIE"])
request.META["CSRF_COOKIE_USED"] = True
return _salt_cipher_secret(csrf_secret)
def rotate_token(request):
"""
Change the CSRF token in use for a request - should be done on login
for security purposes.
"""
request.META.update({
"CSRF_COOKIE_USED": True,
"CSRF_COOKIE": _get_new_csrf_token(),
})
request.csrf_cookie_needs_reset = True
def _sanitize_token(token):
# Allow only ASCII alphanumerics
if re.search('[^a-zA-Z0-9]', token):
return _get_new_csrf_token()
elif len(token) == CSRF_TOKEN_LENGTH:
return token
elif len(token) == CSRF_SECRET_LENGTH:
# Older Django versions set cookies to values of CSRF_SECRET_LENGTH
# alphanumeric characters. For backwards compatibility, accept
# such values as unsalted secrets.
# It's easier to salt here and be consistent later, rather than add
# different code paths in the checks, although that might be a tad more
# efficient.
return _salt_cipher_secret(token)
return _get_new_csrf_token()
def _compare_salted_tokens(request_csrf_token, csrf_token):
# Assume both arguments are sanitized -- that is, strings of
# length CSRF_TOKEN_LENGTH, all CSRF_ALLOWED_CHARS.
return constant_time_compare(
_unsalt_cipher_token(request_csrf_token),
_unsalt_cipher_token(csrf_token),
)
class CsrfViewMiddleware(MiddlewareMixin):
"""
Require a present and correct csrfmiddlewaretoken for POST requests that
have a CSRF cookie, and set an outgoing CSRF cookie.
This middleware should be used in conjunction with the {% csrf_token %}
template tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
response = _get_failure_view()(request, reason=reason)
log_response(
'Forbidden (%s): %s', reason, request.path,
response=response,
request=request,
logger=logger,
)
return response
def _get_token(self, request):
if settings.CSRF_USE_SESSIONS:
try:
return request.session.get(CSRF_SESSION_KEY)
except AttributeError:
raise ImproperlyConfigured(
'CSRF_USE_SESSIONS is enabled, but request.session is not '
'set. SessionMiddleware must appear before CsrfViewMiddleware '
'in MIDDLEWARE%s.' % ('_CLASSES' if settings.MIDDLEWARE is None else '')
)
else:
try:
cookie_token = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
return None
csrf_token = _sanitize_token(cookie_token)
if csrf_token != cookie_token:
# Cookie token needed to be replaced;
# the cookie needs to be reset.
request.csrf_cookie_needs_reset = True
return csrf_token
def _set_token(self, request, response):
if settings.CSRF_USE_SESSIONS:
if request.session.get(CSRF_SESSION_KEY) != request.META['CSRF_COOKIE']:
request.session[CSRF_SESSION_KEY] = request.META['CSRF_COOKIE']
else:
response.set_cookie(
settings.CSRF_COOKIE_NAME,
request.META['CSRF_COOKIE'],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE,
httponly=settings.CSRF_COOKIE_HTTPONLY,
samesite=settings.CSRF_COOKIE_SAMESITE,
)
# Set the Vary header since content varies with the CSRF cookie.
patch_vary_headers(response, ('Cookie',))
def process_request(self, request):
csrf_token = self._get_token(request)
if csrf_token is not None:
# Use same token next time.
request.META['CSRF_COOKIE'] = csrf_token
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RFC7231 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# secret we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.META.get('HTTP_REFERER')
if referer is None:
return self._reject(request, REASON_NO_REFERER)
referer = urlparse(referer)
# Make sure we have a valid URL for Referer.
if '' in (referer.scheme, referer.netloc):
return self._reject(request, REASON_MALFORMED_REFERER)
# Ensure that our Referer is also secure.
if referer.scheme != 'https':
return self._reject(request, REASON_INSECURE_REFERER)
# If there isn't a CSRF_COOKIE_DOMAIN, require an exact match
# match on host:port. If not, obey the cookie rules (or those
# for the session cookie, if CSRF_USE_SESSIONS).
good_referer = (
settings.SESSION_COOKIE_DOMAIN
if settings.CSRF_USE_SESSIONS
else settings.CSRF_COOKIE_DOMAIN
)
if good_referer is not None:
server_port = request.get_port()
if server_port not in ('443', '80'):
good_referer = '%s:%s' % (good_referer, server_port)
else:
try:
# request.get_host() includes the port.
good_referer = request.get_host()
except DisallowedHost:
pass
# Create a list of all acceptable HTTP referers, including the
# current host if it's permitted by ALLOWED_HOSTS.
good_hosts = list(settings.CSRF_TRUSTED_ORIGINS)
if good_referer is not None:
good_hosts.append(good_referer)
if not any(is_same_domain(referer.netloc, host) for host in good_hosts):
reason = REASON_BAD_REFERER % referer.geturl()
return self._reject(request, reason)
csrf_token = request.META.get('CSRF_COOKIE')
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
try:
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
except IOError:
# Handle a broken connection before we've completed reading
# the POST data. process_view shouldn't raise any
# exceptions, so we'll ignore and serve the user a 403
# (assuming they're still listening, which they probably
# aren't because of the error).
pass
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get(settings.CSRF_HEADER_NAME, '')
request_csrf_token = _sanitize_token(request_csrf_token)
if not _compare_salted_tokens(request_csrf_token, csrf_token):
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if not getattr(request, 'csrf_cookie_needs_reset', False):
if getattr(response, 'csrf_cookie_set', False):
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
self._set_token(request, response)
response.csrf_cookie_set = True
return response
|
c5737e193aca1b97a7b0256170fcfc6993ede5831b275c511de3c5473f07e142 | from contextlib import ContextDecorator, contextmanager
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, Error, ProgrammingError, connections,
)
class TransactionManagementError(ProgrammingError):
"""Transaction management is used improperly."""
pass
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided. This is a private API.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
def get_autocommit(using=None):
"""Get the autocommit status of the connection."""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""Set the autocommit status of the connection."""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""Commit a transaction."""
get_connection(using).commit()
def rollback(using=None):
"""Roll back a transaction."""
get_connection(using).rollback()
def savepoint(using=None):
"""
Create a savepoint (if supported and required by the backend) inside the
current transaction. Return an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Roll back the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commit the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""Get the "needs rollback" flag -- for *advanced use* only."""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, trigger a rollback when exiting the innermost
enclosing atomic block that has `savepoint=True` (that's the default). Use
this to force a rollback without raising an exception.
When `rollback` is `False`, prevent such a rollback. Use this only after
rolling back to a known-good state! Otherwise, you break the atomic block
and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
@contextmanager
def mark_for_rollback_on_error(using=None):
"""
Internal low-level utility to mark a transaction as "needs rollback" when
an exception is raised while not enforcing the enclosed block to be in a
transaction. This is needed by Model.save() and friends to avoid starting a
transaction when in autocommit mode and a single query is executed.
It's equivalent to:
connection = get_connection(using)
if connection.get_autocommit():
yield
else:
with transaction.atomic(using=using, savepoint=False):
yield
but it uses low-level utilities to avoid performance overhead.
"""
try:
yield
except Exception:
connection = get_connection(using)
if connection.in_atomic_block:
connection.needs_rollback = True
raise
def on_commit(func, using=None):
"""
Register `func` to be called when the current transaction is committed.
If the current transaction is rolled back, `func` will not be called.
"""
get_connection(using).on_commit(func)
#################################
# Decorators / context managers #
#################################
class Atomic(ContextDecorator):
"""
Guarantee the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
This is a private API.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# sqlite3 in Python < 3.6 doesn't handle transactions and
# savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
connection.set_autocommit(False, force_begin_transaction_with_broken_autocommit=True)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if connection.closed_in_transaction:
# The database will perform a rollback by itself.
# Wait until we exit the outermost block.
pass
elif exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
else:
# Roll back transaction
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.in_atomic_block = False
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = {using}
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
|
bcd6f0cd1de5f242cc768d7eabdda2eb5f1516b584129fb188d90765fefe2759 | """
This module converts requested URLs to callback view functions.
URLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a ResolverMatch object which provides access to all
attributes of the resolved URL match.
"""
import functools
import inspect
import re
import threading
from importlib import import_module
from urllib.parse import quote
from django.conf import settings
from django.core.checks import Error, Warning
from django.core.checks.urls import check_resolver
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import MultiValueDict
from django.utils.functional import cached_property
from django.utils.http import RFC3986_SUBDELIMS, escape_leading_slashes
from django.utils.regex_helper import normalize
from django.utils.translation import get_language
from .converters import get_converter
from .exceptions import NoReverseMatch, Resolver404
from .utils import get_callable
class ResolverMatch:
def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None, route=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.url_name = url_name
self.route = route
# If a URLRegexResolver doesn't have a namespace or app_name, it passes
# in an empty value.
self.app_names = [x for x in app_names if x] if app_names else []
self.app_name = ':'.join(self.app_names)
self.namespaces = [x for x in namespaces if x] if namespaces else []
self.namespace = ':'.join(self.namespaces)
if not hasattr(func, '__name__'):
# A class-based view
self._func_path = func.__class__.__module__ + '.' + func.__class__.__name__
else:
# A function-based view
self._func_path = func.__module__ + '.' + func.__name__
view_path = url_name or self._func_path
self.view_name = ':'.join(self.namespaces + [view_path])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s, route=%s)" % (
self._func_path, self.args, self.kwargs, self.url_name,
self.app_names, self.namespaces, self.route,
)
@functools.lru_cache(maxsize=None)
def get_resolver(urlconf=None):
if urlconf is None:
urlconf = settings.ROOT_URLCONF
return URLResolver(RegexPattern(r'^/'), urlconf)
@functools.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver, converters):
# Build a namespaced resolver for the given parent URLconf pattern.
# This makes it possible to have captured parameters in the parent
# URLconf pattern.
pattern = RegexPattern(ns_pattern)
pattern.converters = dict(converters)
ns_resolver = URLResolver(pattern, resolver.url_patterns)
return URLResolver(RegexPattern(r'^/'), [ns_resolver])
class LocaleRegexDescriptor:
def __init__(self, attr):
self.attr = attr
def __get__(self, instance, cls=None):
"""
Return a compiled regular expression based on the active language.
"""
if instance is None:
return self
# As a performance optimization, if the given regex string is a regular
# string (not a lazily-translated string proxy), compile it once and
# avoid per-language compilation.
pattern = getattr(instance, self.attr)
if isinstance(pattern, str):
instance.__dict__['regex'] = instance._compile(pattern)
return instance.__dict__['regex']
language_code = get_language()
if language_code not in instance._regex_dict:
instance._regex_dict[language_code] = instance._compile(str(pattern))
return instance._regex_dict[language_code]
class CheckURLMixin:
def describe(self):
"""
Format the URL pattern for display in warning messages.
"""
description = "'{}'".format(self)
if self.name:
description += " [name='{}']".format(self.name)
return description
def _check_pattern_startswith_slash(self):
"""
Check that the pattern does not begin with a forward slash.
"""
regex_pattern = self.regex.pattern
if not settings.APPEND_SLASH:
# Skip check as it can be useful to start a URL pattern with a slash
# when APPEND_SLASH=False.
return []
if regex_pattern.startswith(('/', '^/', '^\\/')) and not regex_pattern.endswith('/'):
warning = Warning(
"Your URL pattern {} has a route beginning with a '/'. Remove this "
"slash as it is unnecessary. If this pattern is targeted in an "
"include(), ensure the include() pattern has a trailing '/'.".format(
self.describe()
),
id="urls.W002",
)
return [warning]
else:
return []
class RegexPattern(CheckURLMixin):
regex = LocaleRegexDescriptor('_regex')
def __init__(self, regex, name=None, is_endpoint=False):
self._regex = regex
self._regex_dict = {}
self._is_endpoint = is_endpoint
self.name = name
self.converters = {}
def match(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
args = () if kwargs else match.groups()
return path[match.end():], args, kwargs
return None
def check(self):
warnings = []
warnings.extend(self._check_pattern_startswith_slash())
if not self._is_endpoint:
warnings.extend(self._check_include_trailing_dollar())
return warnings
def _check_include_trailing_dollar(self):
regex_pattern = self.regex.pattern
if regex_pattern.endswith('$') and not regex_pattern.endswith(r'\$'):
return [Warning(
"Your URL pattern {} uses include with a route ending with a '$'. "
"Remove the dollar from the route to avoid problems including "
"URLs.".format(self.describe()),
id='urls.W001',
)]
else:
return []
def _compile(self, regex):
"""Compile and return the given regular expression."""
try:
return re.compile(regex)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' % (regex, e)
)
def __str__(self):
return str(self._regex)
_PATH_PARAMETER_COMPONENT_RE = re.compile(
r'<(?:(?P<converter>[^>:]+):)?(?P<parameter>\w+)>'
)
def _route_to_regex(route, is_endpoint=False):
"""
Convert a path pattern into a regular expression. Return the regular
expression and a dictionary mapping the capture names to the converters.
For example, 'foo/<int:pk>' returns '^foo\\/(?P<pk>[0-9]+)'
and {'pk': <django.urls.converters.IntConverter>}.
"""
original_route = route
parts = ['^']
converters = {}
while True:
match = _PATH_PARAMETER_COMPONENT_RE.search(route)
if not match:
parts.append(re.escape(route))
break
parts.append(re.escape(route[:match.start()]))
route = route[match.end():]
parameter = match.group('parameter')
if not parameter.isidentifier():
raise ImproperlyConfigured(
"URL route '%s' uses parameter name %r which isn't a valid "
"Python identifier." % (original_route, parameter)
)
raw_converter = match.group('converter')
if raw_converter is None:
# If a converter isn't specified, the default is `str`.
raw_converter = 'str'
try:
converter = get_converter(raw_converter)
except KeyError as e:
raise ImproperlyConfigured(
"URL route '%s' uses invalid converter %s." % (original_route, e)
)
converters[parameter] = converter
parts.append('(?P<' + parameter + '>' + converter.regex + ')')
if is_endpoint:
parts.append('$')
return ''.join(parts), converters
class RoutePattern(CheckURLMixin):
regex = LocaleRegexDescriptor('_route')
def __init__(self, route, name=None, is_endpoint=False):
self._route = route
self._regex_dict = {}
self._is_endpoint = is_endpoint
self.name = name
self.converters = _route_to_regex(str(route), is_endpoint)[1]
def match(self, path):
match = self.regex.search(path)
if match:
# RoutePattern doesn't allow non-named groups so args are ignored.
kwargs = match.groupdict()
for key, value in kwargs.items():
converter = self.converters[key]
try:
kwargs[key] = converter.to_python(value)
except ValueError:
return None
return path[match.end():], (), kwargs
return None
def check(self):
warnings = self._check_pattern_startswith_slash()
route = self._route
if '(?P<' in route or route.startswith('^') or route.endswith('$'):
warnings.append(Warning(
"Your URL pattern {} has a route that contains '(?P<', begins "
"with a '^', or ends with a '$'. This was likely an oversight "
"when migrating to django.urls.path().".format(self.describe()),
id='2_0.W001',
))
return warnings
def _compile(self, route):
return re.compile(_route_to_regex(route, self._is_endpoint)[0])
def __str__(self):
return str(self._route)
class LocalePrefixPattern:
def __init__(self, prefix_default_language=True):
self.prefix_default_language = prefix_default_language
self.converters = {}
@property
def regex(self):
# This is only used by reverse() and cached in _reverse_dict.
return re.compile(self.language_prefix)
@property
def language_prefix(self):
language_code = get_language() or settings.LANGUAGE_CODE
if language_code == settings.LANGUAGE_CODE and not self.prefix_default_language:
return ''
else:
return '%s/' % language_code
def match(self, path):
language_prefix = self.language_prefix
if path.startswith(language_prefix):
return path[len(language_prefix):], (), {}
return None
def check(self):
return []
def describe(self):
return "'{}'".format(self)
def __str__(self):
return self.language_prefix
class URLPattern:
def __init__(self, pattern, callback, default_args=None, name=None):
self.pattern = pattern
self.callback = callback # the view
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.pattern.describe())
def check(self):
warnings = self._check_pattern_name()
warnings.extend(self.pattern.check())
return warnings
def _check_pattern_name(self):
"""
Check that the pattern name does not contain a colon.
"""
if self.pattern.name is not None and ":" in self.pattern.name:
warning = Warning(
"Your URL pattern {} has a name including a ':'. Remove the colon, to "
"avoid ambiguous namespace references.".format(self.pattern.describe()),
id="urls.W003",
)
return [warning]
else:
return []
def resolve(self, path):
match = self.pattern.match(path)
if match:
new_path, args, kwargs = match
# Pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.pattern.name, route=str(self.pattern))
@cached_property
def lookup_str(self):
"""
A string that identifies the view (e.g. 'path.to.view_function' or
'path.to.ClassBasedView').
"""
callback = self.callback
if isinstance(callback, functools.partial):
callback = callback.func
if not hasattr(callback, '__name__'):
return callback.__module__ + "." + callback.__class__.__name__
return callback.__module__ + "." + callback.__qualname__
class URLResolver:
def __init__(self, pattern, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
self.pattern = pattern
# urlconf_name is the dotted Python path to the module defining
# urlpatterns. It may also be an object with an urlpatterns attribute
# or urlpatterns itself.
self.urlconf_name = urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
# set of dotted paths to all functions and classes that are used in
# urlpatterns
self._callback_strs = set()
self._populated = False
self._local = threading.local()
def __repr__(self):
if isinstance(self.urlconf_name, list) and self.urlconf_name:
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return '<%s %s (%s:%s) %s>' % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.pattern.describe(),
)
def check(self):
messages = []
for pattern in self.url_patterns:
messages.extend(check_resolver(pattern))
messages.extend(self._check_custom_error_handlers())
return messages or self.pattern.check()
def _check_custom_error_handlers(self):
messages = []
# All handlers take (request, exception) arguments except handler500
# which takes (request).
for status_code, num_parameters in [(400, 2), (403, 2), (404, 2), (500, 1)]:
handler, param_dict = self.resolve_error_handler(status_code)
signature = inspect.signature(handler)
args = [None] * num_parameters
try:
signature.bind(*args)
except TypeError:
msg = (
"The custom handler{status_code} view '{path}' does not "
"take the correct number of arguments ({args})."
).format(
status_code=status_code,
path=handler.__module__ + '.' + handler.__qualname__,
args='request, exception' if num_parameters == 2 else 'request',
)
messages.append(Error(msg, id='urls.E007'))
return messages
def _populate(self):
# Short-circuit if called recursively in this thread to prevent
# infinite recursion. Concurrent threads may call this at the same
# time and will need to continue, so set 'populating' on a
# thread-local variable.
if getattr(self._local, 'populating', False):
return
try:
self._local.populating = True
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for url_pattern in reversed(self.url_patterns):
p_pattern = url_pattern.pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(url_pattern, URLPattern):
self._callback_strs.add(url_pattern.lookup_str)
bits = normalize(url_pattern.pattern.regex.pattern)
lookups.appendlist(
url_pattern.callback,
(bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters)
)
if url_pattern.name is not None:
lookups.appendlist(
url_pattern.name,
(bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters)
)
else: # url_pattern is a URLResolver.
url_pattern._populate()
if url_pattern.app_name:
apps.setdefault(url_pattern.app_name, []).append(url_pattern.namespace)
namespaces[url_pattern.namespace] = (p_pattern, url_pattern)
else:
for name in url_pattern.reverse_dict:
for matches, pat, defaults, converters in url_pattern.reverse_dict.getlist(name):
new_matches = normalize(p_pattern + pat)
lookups.appendlist(
name,
(
new_matches,
p_pattern + pat,
{**defaults, **url_pattern.default_kwargs},
{**self.pattern.converters, **url_pattern.pattern.converters, **converters}
)
)
for namespace, (prefix, sub_pattern) in url_pattern.namespace_dict.items():
current_converters = url_pattern.pattern.converters
sub_pattern.pattern.converters.update(current_converters)
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in url_pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
self._callback_strs.update(url_pattern._callback_strs)
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
self._reverse_dict[language_code] = lookups
self._populated = True
finally:
self._local.populating = False
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
@staticmethod
def _join_route(route1, route2):
"""Join two routes, without the starting ^ in the second route."""
if not route1:
return route2
if route2.startswith('^'):
route2 = route2[1:]
return route1 + route2
def _is_callback(self, name):
if not self._populated:
self._populate()
return name in self._callback_strs
def resolve(self, path):
path = str(path) # path may be a reverse_lazy object
tried = []
match = self.pattern.match(path)
if match:
new_path, args, kwargs = match
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([pattern] + t for t in sub_tried)
else:
tried.append([pattern])
else:
if sub_match:
# Merge captured arguments in match with submatch
sub_match_dict = {**kwargs, **self.default_kwargs}
# Update the sub_match_dict with the kwargs from the sub_match.
sub_match_dict.update(sub_match.kwargs)
# If there are *any* named groups, ignore all non-named groups.
# Otherwise, pass all non-named arguments as positional arguments.
sub_match_args = sub_match.args
if not sub_match_dict:
sub_match_args = args + sub_match.args
current_route = '' if isinstance(pattern, URLPattern) else str(pattern.pattern)
return ResolverMatch(
sub_match.func,
sub_match_args,
sub_match_dict,
sub_match.url_name,
[self.app_name] + sub_match.app_names,
[self.namespace] + sub_match.namespaces,
self._join_route(current_route, sub_match.route),
)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path': path})
@cached_property
def urlconf_module(self):
if isinstance(self.urlconf_name, str):
return import_module(self.urlconf_name)
else:
return self.urlconf_name
@cached_property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
msg = (
"The included URLconf '{name}' does not appear to have any "
"patterns in it. If you see valid patterns in the file then "
"the issue is probably caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name))
return patterns
def resolve_error_handler(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use lazy import, since
# django.conf.urls imports this file.
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def reverse(self, lookup_view, *args, **kwargs):
return self._reverse_with_prefix(lookup_view, '', *args, **kwargs)
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
if not self._populated:
self._populate()
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern, defaults, converters in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
candidate_subs = dict(zip(params, args))
else:
if set(kwargs).symmetric_difference(params).difference(defaults):
continue
if any(kwargs.get(k, v) != v for k, v in defaults.items()):
continue
candidate_subs = kwargs
# Convert the candidate subs to text using Converter.to_url().
text_candidate_subs = {}
for k, v in candidate_subs.items():
if k in converters:
text_candidate_subs[k] = converters[k].to_url(v)
else:
text_candidate_subs[k] = str(v)
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = _prefix.replace('%', '%%') + result
if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % text_candidate_subs):
# safe characters from `pchar` definition of RFC 3986
url = quote(candidate_pat % text_candidate_subs, safe=RFC3986_SUBDELIMS + '/~:@')
# Don't allow construction of scheme relative urls.
return escape_leading_slashes(url)
# lookup_view can be URL name or callable, but callables are not
# friendly in error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (_, pattern, _, _) in possibilities]
if patterns:
if args:
arg_msg = "arguments '%s'" % (args,)
elif kwargs:
arg_msg = "keyword arguments '%s'" % (kwargs,)
else:
arg_msg = "no arguments"
msg = (
"Reverse for '%s' with %s not found. %d pattern(s) tried: %s" %
(lookup_view_s, arg_msg, len(patterns), patterns)
)
else:
msg = (
"Reverse for '%(view)s' not found. '%(view)s' is not "
"a valid view function or pattern name." % {'view': lookup_view_s}
)
raise NoReverseMatch(msg)
|
3840ca8335d38b102a0498bc43e247a2620c458e1315141299e7fc034245f3eb | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils.text import capfirst, get_text_list
from django.utils.translation import gettext, gettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'ModelChoiceField', 'ModelMultipleChoiceField', 'ALL_FIELDS',
'BaseModelFormSet', 'modelformset_factory', 'BaseInlineFormSet',
'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Construct and return a model instance from the bound ``form``'s
``cleaned_data``, but do not save the returned instance to the database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Leave defaults for fields that aren't in POST data, except for
# checkbox inputs because they don't appear in POST data if not checked.
if (f.has_default() and
form[f.name].field.widget.value_omitted_from_data(form.data, form.files, form.add_prefix(f.name))):
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Return a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, return only the
named.
``exclude`` is an optional list of field names. If provided, exclude the
named from the returned dict, even if they are listed in the ``fields``
argument.
"""
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
data[f.name] = f.value_from_object(instance)
return data
def apply_limit_choices_to_to_formfield(formfield):
"""Apply limit_choices_to to the formfield's queryset if needed."""
if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to is not None:
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None, *, apply_limit_choices_to=True):
"""
Return an ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, return only the
named fields.
``exclude`` is an optional list of field names. If provided, exclude the
named fields from the returned fields, even if they are listed in the
``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
``apply_limit_choices_to`` is a boolean indicating if limit_choices_to
should be applied to a field's queryset.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_private_fields = [f for f in opts.private_fields if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
if (fields is not None and f.name in fields and
(exclude is None or f.name not in exclude)):
raise FieldError(
"'%s' cannot be specified for %s model form as it is a non-editable field" % (
f.name, model.__name__)
)
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if field_classes and f.name in field_classes:
kwargs['form_class'] = field_classes[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
if apply_limit_choices_to:
apply_limit_choices_to_to_formfield(formfield)
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions:
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
self.field_classes = getattr(options, 'field_classes', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
base_formfield_callback = None
for b in bases:
if hasattr(b, 'Meta') and hasattr(b.Meta, 'formfield_callback'):
base_formfield_callback = b.Meta.formfield_callback
break
formfield_callback = attrs.pop('formfield_callback', base_formfield_callback)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, str) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(
opts.model, opts.fields, opts.exclude, opts.widgets,
formfield_callback, opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages, opts.field_classes,
# limit_choices_to will be applied during ModelForm.__init__().
apply_limit_choices_to=False,
)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = {k for k, v in fields.items() if not v}
missing_fields = none_model_fields.difference(new_class.declared_fields)
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None, use_required_attribute=None,
renderer=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super().__init__(
data, files, auto_id, prefix, object_data, error_class,
label_suffix, empty_permitted, use_required_attribute=use_required_attribute,
renderer=renderer,
)
for formfield in self.fields.values():
apply_limit_choices_to_to_formfield(formfield)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, exclude several types of fields from model
validation. See tickets #12507, #12521, #12553.
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors:
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
# Allow the model generated by construct_instance() to raise
# ValidationError and have them handled in the same way as others.
if hasattr(errors, 'error_dict'):
error_dict = errors.error_dict
else:
error_dict = {NON_FIELD_ERRORS: errors}
for field, messages in error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
except ValidationError as e:
self._update_errors(e)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Call the instance's validate_unique() method and update the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def _save_m2m(self):
"""
Save the many-to-many fields and generic relations for this form.
"""
cleaned_data = self.cleaned_data
exclude = self._meta.exclude
fields = self._meta.fields
opts = self.instance._meta
# Note that for historical reasons we want to include also
# private_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(self.instance, cleaned_data[f.name])
def save(self, commit=True):
"""
Save this form's self.instance object if commit=True. Otherwise, add
a save_m2m() method to the form which can be called after the instance
is saved manually at a later time. Return the model instance.
"""
if self.errors:
raise ValueError(
"The %s could not be %s because the data didn't validate." % (
self.instance._meta.object_name,
'created' if self.instance._state.adding else 'changed',
)
)
if commit:
# If committing, save the instance and the m2m data immediately.
self.instance.save()
self._save_m2m()
else:
# If not committing, add a method to the form to allow deferred
# saving of m2m data.
self.save_m2m = self._save_m2m
return self.instance
save.alters_data = True
class ModelForm(BaseModelForm, metaclass=ModelFormMetaclass):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Return a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, include only
the named fields in the returned fields. If omitted or '__all__', use all
fields.
``exclude`` is an optional list of field names. If provided, exclude the
named fields from the returned fields, even if they are listed in the
``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
if field_classes is not None:
attrs['field_classes'] = field_classes
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
bases = (form.Meta,) if hasattr(form, 'Meta') else ()
Meta = type('Meta', bases, attrs)
if formfield_callback:
Meta.formfield_callback = staticmethod(formfield_callback)
# Give this new form class a reasonable name.
class_name = model.__name__ + 'Form'
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
# Set of fields that must be unique among forms of this set.
unique_fields = set()
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, *, initial=None, **kwargs):
self.queryset = queryset
self.initial_extra = initial
super().__init__(**{'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix, **kwargs})
def initial_form_count(self):
"""Return the number of forms that are required in this FormSet."""
if not self.is_bound:
return len(self.get_queryset())
return super().initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.remote_field is not None:
field = field.remote_field.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
pk_required = i < self.initial_form_count()
if pk_required:
if self.is_bound:
pk_key = '%s-%s' % (self.add_prefix(i), self.model._meta.pk.name)
try:
pk = self.data[pk_key]
except KeyError:
# The primary key is missing. The user may have tampered
# with POST data.
pass
else:
to_python = self._get_to_python(self.model._meta.pk)
try:
pk = to_python(pk)
except ValidationError:
# The primary key exists but is an invalid value. The
# user may have tampered with POST data.
pass
else:
kwargs['instance'] = self._existing_object(pk)
else:
kwargs['instance'] = self.get_queryset()[i]
elif self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
form = super()._construct_form(i, **kwargs)
if pk_required:
form.fields[self.model._meta.pk.name].required = True
return form
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Save and return a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Save and return an existing model instance for the given form."""
return form.save(commit=commit)
def delete_existing(self, obj, commit=True):
"""Deletes an existing model instance."""
if commit:
obj.delete()
def save(self, commit=True):
"""
Save model instances for every form, adding and changing instances
as necessary, and return the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks.update(unique_checks)
all_date_checks.update(date_checks)
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# Get the data for the set of fields that must be unique among the forms.
row_data = (
field if field in self.unique_fields else form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data
)
# Reduce Model instances to their primary key values
row_data = tuple(
d._get_pk_val() if hasattr(d, '_get_pk_val')
# Prevent "unhashable type: list" errors later on.
else tuple(d) if isinstance(d, list)
else d for d in row_data
)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None and
form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return gettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return gettext("Please correct the duplicate data for %(field)s, which must be unique.") % {
"field": get_text_list(unique_check, _("and")),
}
def get_date_error_message(self, date_check):
return gettext(
"Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s."
) % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': str(date_check[1]),
}
def get_form_error(self):
return gettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
# If the pk is None, it means either:
# 1. The object is an unexpected empty model, created by invalid
# POST data such as an object outside the formset's queryset.
# 2. The object was already deleted from the database.
if obj.pk is None:
continue
if form in forms_to_delete:
self.deleted_objects.append(obj)
self.delete_existing(obj, commit=commit)
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return (
(not pk.editable) or (pk.auto_created or isinstance(pk, AutoField)) or (
pk.remote_field and pk.remote_field.parent_link and
pk_is_not_editable(pk.remote_field.model._meta.pk)
)
)
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, (ForeignKey, OneToOneField)):
qs = pk.remote_field.model._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super().add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""Return a FormSet class for the given Django model class."""
meta = getattr(form, 'Meta', None)
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts,
error_messages=error_messages, field_classes=field_classes)
FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_min=validate_min, validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
self.unique_fields = {self.fk.name}
super().__init__(data, files, prefix=prefix, queryset=qs, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if self.form._meta.fields and self.fk.name not in self.form._meta.fields:
if isinstance(self.form._meta.fields, tuple):
self.form._meta.fields = list(self.form._meta.fields)
self.form._meta.fields.append(self.fk.name)
def initial_form_count(self):
if self.save_as_new:
return 0
return super().initial_form_count()
def _construct_form(self, i, **kwargs):
form = super()._construct_form(i, **kwargs)
if self.save_as_new:
mutable = getattr(form.data, '_mutable', None)
# Allow modifying an immutable QueryDict.
if mutable is not None:
form.data._mutable = True
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
if mutable is not None:
form.data._mutable = mutable
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
fk_value = getattr(self.instance, self.fk.remote_field.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '')
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
return super().save_new(form, commit=commit)
def add_fields(self, form, index):
super().add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
# The InlineForeignKeyField assumes that the foreign key relation is
# based on the parent model's pk. If this isn't the case, set to_field
# to correctly resolve the initial form value.
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
kwargs['to_field'] = self.fk.remote_field.field_name
# If we're adding a new object, ignore a parent's auto-generated key
# as it will be regenerated on the save request.
if self.instance._state.adding:
if kwargs.get('to_field') is not None:
to_field = self.instance._meta.get_field(kwargs['to_field'])
else:
to_field = self.instance._meta.pk
if to_field.has_default():
setattr(self.instance, to_field.attname, None)
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super().get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Find and return the ForeignKey from model to parent if there is one
(return None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, raise an exception if there isn't a ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.remote_field.model != parent_model and
fk.remote_field.model not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label)
)
elif not fks_to_parent:
raise ValueError(
"'%s' has no field named '%s'." % (model._meta.label, fk_name)
)
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey) and (
f.remote_field.model == parent_model or
f.remote_field.model in parent_model._meta.get_parent_list()
)
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif not fks_to_parent:
if can_fail:
return
raise ValueError(
"'%s' has no ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
else:
raise ValueError(
"'%s' has more than one ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Return an ``InlineFormSet`` for the given kwargs.
``fk_name`` must be provided if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'min_num': min_num,
'max_num': max_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
'field_classes': field_classes,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline value did not match the parent instance.'),
}
def __init__(self, parent_instance, *args, pk_field=False, to_field=None, **kwargs):
self.parent_instance = parent_instance
self.pk_field = pk_field
self.to_field = to_field
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super().__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if str(value) != str(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIterator:
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
queryset = self.queryset
# Can't use iterator() when queryset uses prefetch_related()
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for obj in queryset:
yield self.choice(obj)
def __len__(self):
# count() adds a query but uses less memory since the QuerySet results
# won't be cached. In most cases, the choices will only be iterated on,
# and __len__() won't be called.
return self.queryset.count() + (1 if self.field.empty_label is not None else 0)
def __bool__(self):
return self.field.empty_label is not None or self.queryset.exists()
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
iterator = ModelChoiceIterator
def __init__(self, queryset, *, empty_label="---------",
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
**kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(
self, required=required, widget=widget, label=label,
initial=initial, help_text=help_text, **kwargs
)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Return ``limit_choices_to`` for this form field.
If it is a callable, invoke it and return the result.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
if self.queryset is not None:
result.queryset = self.queryset.all()
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = None if queryset is None else queryset.all()
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
Convert objects into strings and generate the labels for the choices
presented by this object. Subclasses can override this method to
customize the display of the choices.
"""
return str(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return self.iterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super().prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
if self.disabled:
return False
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return str(self.prepare_value(initial_value)) != str(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value.')
}
def __init__(self, queryset, **kwargs):
super().__init__(queryset, empty_label=None, **kwargs)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
value = self.prepare_value(value)
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, return a QuerySet of the
corresponding objects. Raise a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = {str(getattr(o, key)) for o in qs}
for val in value:
if str(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, str) and
not hasattr(value, '_meta')):
prepare_value = super().prepare_value
return [prepare_value(v) for v in value]
return super().prepare_value(value)
def has_changed(self, initial, data):
if self.disabled:
return False
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = {str(value) for value in self.prepare_value(initial)}
data_set = {str(value) for value in data}
return data_set != initial_set
def modelform_defines_fields(form_class):
return hasattr(form_class, '_meta') and (
form_class._meta.fields is not None or
form_class._meta.exclude is not None
)
|
47dc34328422b4ac027d7ca56c05363b8271cc48fa30a02dfda7710580dfc9f4 | from django.core.exceptions import ValidationError
from django.forms import Form
from django.forms.fields import BooleanField, IntegerField
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.utils.functional import cached_property
from django.utils.html import html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _, ngettext
__all__ = ('BaseFormSet', 'formset_factory', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MIN_NUM_FORM_COUNT = 'MIN_NUM_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
# default minimum number of forms in a formset
DEFAULT_MIN_NUM = 0
# default maximum number of forms in a formset, to prevent memory exhaustion
DEFAULT_MAX_NUM = 1000
class ManagementForm(Form):
"""
Keep track of how many form instances are displayed on the page. If adding
new forms via JavaScript, you should increment the count field of this form
as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
# MIN_NUM_FORM_COUNT and MAX_NUM_FORM_COUNT are output with the rest of
# the management form, but only for the convenience of client-side
# code. The POST value of them returned from the client is not checked.
self.base_fields[MIN_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super().__init__(*args, **kwargs)
@html_safe
class BaseFormSet:
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, form_kwargs=None):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data or {}
self.files = files or {}
self.initial = initial
self.form_kwargs = form_kwargs or {}
self.error_class = error_class
self._errors = None
self._non_form_errors = None
def __str__(self):
return self.as_table()
def __iter__(self):
"""Yield the forms in the order they should be rendered."""
return iter(self.forms)
def __getitem__(self, index):
"""Return the form at the given index, based on the rendering order."""
return self.forms[index]
def __len__(self):
return len(self.forms)
def __bool__(self):
"""
Return True since all formsets have a management form which is not
included in the length.
"""
return True
@cached_property
def management_form(self):
"""Return the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError(
_('ManagementForm data is missing or has been tampered with'),
code='missing_management_form',
)
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MIN_NUM_FORM_COUNT: self.min_num,
MAX_NUM_FORM_COUNT: self.max_num
})
return form
def total_form_count(self):
"""Return the total number of forms in this FormSet."""
if self.is_bound:
# return absolute_max if it is lower than the actual total form
# count in the data; this is DoS protection to prevent clients
# from forcing the server to instantiate arbitrary numbers of
# forms
return min(self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max)
else:
initial_forms = self.initial_form_count()
total_forms = max(initial_forms, self.min_num) + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Return the number of forms that are required in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the initial data if it's there, 0 otherwise.
initial_forms = len(self.initial) if self.initial else 0
return initial_forms
@cached_property
def forms(self):
"""Instantiate forms at first property access."""
# DoS protection is included in total_form_count()
forms = [self._construct_form(i, **self.get_form_kwargs(i))
for i in range(self.total_form_count())]
return forms
def get_form_kwargs(self, index):
"""
Return additional keyword arguments for each individual formset form.
index will be None if the form being constructed is a new empty
form.
"""
return self.form_kwargs.copy()
def _construct_form(self, i, **kwargs):
"""Instantiate and return the i-th form instance in a formset."""
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix(i),
'error_class': self.error_class,
# Don't render the HTML 'required' attribute as it may cause
# incorrect validation for extra, optional, and deleted
# forms in the formset.
'use_required_attribute': False,
}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial and 'initial' not in kwargs:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty, unless they're part of
# the minimum forms.
if i >= self.initial_form_count() and i >= self.min_num:
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
@property
def initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
@property
def extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
@property
def empty_form(self):
form = self.form(
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
use_required_attribute=False,
**self.get_form_kwargs(None)
)
self.add_fields(form, None)
return form
@property
def cleaned_data(self):
"""
Return a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
@property
def deleted_forms(self):
"""Return a list of forms that have been marked for deletion."""
if not self.is_valid() or not self.can_delete:
return []
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
@property
def ordered_forms(self):
"""
Return a list of form in the order specified by the incoming data.
Raise an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order specified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
@classmethod
def get_default_prefix(cls):
return 'form'
def non_form_errors(self):
"""
Return an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Return an empty ErrorList if there
are none.
"""
if self._non_form_errors is None:
self.full_clean()
return self._non_form_errors
@property
def errors(self):
"""Return a list of form.errors for every form in self.forms."""
if self._errors is None:
self.full_clean()
return self._errors
def total_error_count(self):
"""Return the number of errors across all forms in the formset."""
return len(self.non_form_errors()) +\
sum(len(form_errors) for form_errors in self.errors)
def _should_delete_form(self, form):
"""Return whether or not the form was marked for deletion."""
return form.cleaned_data.get(DELETION_FIELD_NAME, False)
def is_valid(self):
"""Return True if every form in self.forms is valid."""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
# This triggers a full clean.
self.errors
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete and self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# shouldn't cause the entire formset to be invalid.
continue
forms_valid &= form.is_valid()
return forms_valid and not self.non_form_errors()
def full_clean(self):
"""
Clean all of self.data and populate self._errors and
self._non_form_errors.
"""
self._errors = []
self._non_form_errors = self.error_class()
empty_forms_count = 0
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
# Empty forms are unchanged forms beyond those with initial data.
if not form.has_changed() and i >= self.initial_form_count():
empty_forms_count += 1
# Accessing errors calls full_clean() if necessary.
# _should_delete_form() requires cleaned_data.
form_errors = form.errors
if self.can_delete and self._should_delete_form(form):
continue
self._errors.append(form_errors)
try:
if (self.validate_max and
self.total_form_count() - len(self.deleted_forms) > self.max_num) or \
self.management_form.cleaned_data[TOTAL_FORM_COUNT] > self.absolute_max:
raise ValidationError(ngettext(
"Please submit %d or fewer forms.",
"Please submit %d or fewer forms.", self.max_num) % self.max_num,
code='too_many_forms',
)
if (self.validate_min and
self.total_form_count() - len(self.deleted_forms) - empty_forms_count < self.min_num):
raise ValidationError(ngettext(
"Please submit %d or more forms.",
"Please submit %d or more forms.", self.min_num) % self.min_num,
code='too_few_forms')
# Give self.clean() a chance to do cross-form validation.
self.clean()
except ValidationError as e:
self._non_form_errors = self.error_class(e.error_list)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accessible
via formset.non_form_errors()
"""
pass
def has_changed(self):
"""Return True if data in any form differs from initial."""
return any(form.has_changed() for form in self)
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index + 1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Return True if the formset needs to be multipart, i.e. it
has FileInput, or False otherwise.
"""
if self.forms:
return self.forms[0].is_multipart()
else:
return self.empty_form.is_multipart()
@property
def media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return self.empty_form.media
def as_table(self):
"Return this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = ' '.join(form.as_table() for form in self)
return mark_safe(str(self.management_form) + '\n' + forms)
def as_p(self):
"Return this formset rendered as HTML <p>s."
forms = ' '.join(form.as_p() for form in self)
return mark_safe(str(self.management_form) + '\n' + forms)
def as_ul(self):
"Return this formset rendered as HTML <li>s."
forms = ' '.join(form.as_ul() for form in self)
return mark_safe(str(self.management_form) + '\n' + forms)
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None, validate_max=False,
min_num=None, validate_min=False):
"""Return a FormSet for the given form class."""
if min_num is None:
min_num = DEFAULT_MIN_NUM
if max_num is None:
max_num = DEFAULT_MAX_NUM
# hard limit on forms instantiated, to prevent memory-exhaustion attacks
# limit is simply max_num + DEFAULT_MAX_NUM (which is 2*DEFAULT_MAX_NUM
# if max_num is None in the first place)
absolute_max = max_num + DEFAULT_MAX_NUM
attrs = {
'form': form,
'extra': extra,
'can_order': can_order,
'can_delete': can_delete,
'min_num': min_num,
'max_num': max_num,
'absolute_max': absolute_max,
'validate_min': validate_min,
'validate_max': validate_max,
}
return type(form.__name__ + 'FormSet', (formset,), attrs)
def all_valid(formsets):
"""Validate every formset and return True if all are valid."""
valid = True
for formset in formsets:
valid &= formset.is_valid()
return valid
|
7f2fdcc42900a18db533c7b7b02b7cf96f49d422c4f46454bb8d294d9395a4ad | """
HTML Widget classes
"""
import copy
import datetime
import re
import warnings
from itertools import chain
from django.conf import settings
from django.forms.utils import to_current_timezone
from django.templatetags.static import static
from django.utils import datetime_safe, formats
from django.utils.dates import MONTHS
from django.utils.formats import get_format
from django.utils.html import format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from .renderers import get_default_renderer
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
class MediaOrderConflictWarning(RuntimeWarning):
pass
@html_safe
class Media:
def __init__(self, media=None, css=None, js=None):
if media is not None:
css = getattr(media, 'css', {})
js = getattr(media, 'js', [])
else:
if css is None:
css = {}
if js is None:
js = []
self._css = css
self._js = js
def __repr__(self):
return 'Media(css=%r, js=%r)' % (self._css, self._js)
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES)))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css)
return chain.from_iterable([
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet">',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media)
def absolute_path(self, path):
"""
Given a relative or absolute path to a static asset, return an absolute
path. An absolute path will be returned unchanged while a relative path
will be passed to django.templatetags.static.static().
"""
if path.startswith(('http://', 'https://', '/')):
return path
return static(path)
def __getitem__(self, name):
"""Return a Media object that only contains media of the given type."""
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
@staticmethod
def merge(list_1, list_2):
"""
Merge two lists while trying to keep the relative order of the elements.
Warn if the lists have the same two elements in a different relative
order.
For static assets it can be important to have them included in the DOM
in a certain order. In JavaScript you may not be able to reference a
global or in CSS you might want to override a style.
"""
# Start with a copy of list_1.
combined_list = list(list_1)
last_insert_index = len(list_1)
# Walk list_2 in reverse, inserting each element into combined_list if
# it doesn't already exist.
for path in reversed(list_2):
try:
# Does path already exist in the list?
index = combined_list.index(path)
except ValueError:
# Add path to combined_list since it doesn't exist.
combined_list.insert(last_insert_index, path)
else:
if index > last_insert_index:
warnings.warn(
'Detected duplicate Media files in an opposite order:\n'
'%s\n%s' % (combined_list[last_insert_index], combined_list[index]),
MediaOrderConflictWarning,
)
# path already exists in the list. Update last_insert_index so
# that the following elements are inserted in front of this one.
last_insert_index = index
return combined_list
def __add__(self, other):
combined = Media()
combined._js = self.merge(self._js, other._js)
combined._css = {
medium: self.merge(self._css.get(medium, []), other._css.get(medium, []))
for medium in self._css.keys() | other._css.keys()
}
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
return Media(definition)
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = super(MediaDefiningClass, mcs).__new__(mcs, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class Widget(metaclass=MediaDefiningClass):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
self.attrs = {} if attrs is None else attrs.copy()
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None):
context = self.get_context(name, value, attrs)
yield context['widget']
def format_value(self, value):
"""
Return a value as it should appear when rendered in a template.
"""
if value == '' or value is None:
return None
if self.is_localized:
return formats.localize_input(value)
return str(value)
def get_context(self, name, value, attrs):
context = {}
context['widget'] = {
'name': name,
'is_hidden': self.is_hidden,
'required': self.is_required,
'value': self.format_value(value),
'attrs': self.build_attrs(self.attrs, attrs),
'template_name': self.template_name,
}
return context
def render(self, name, value, attrs=None, renderer=None):
"""Render the widget as an HTML string."""
context = self.get_context(name, value, attrs)
return self._render(self.template_name, context, renderer)
def _render(self, template_name, context, renderer=None):
if renderer is None:
renderer = get_default_renderer()
return mark_safe(renderer.render(template_name, context))
def build_attrs(self, base_attrs, extra_attrs=None):
"""Build an attribute dictionary."""
return {**base_attrs, **(extra_attrs or {})}
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, return the value
of this widget or None if it's not provided.
"""
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in data
def id_for_label(self, id_):
"""
Return the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Return None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
def use_required_attribute(self, initial):
return not self.is_hidden
class Input(Widget):
"""
Base class for all <input> widgets.
"""
input_type = None # Subclasses must define this.
template_name = 'django/forms/widgets/input.html'
def __init__(self, attrs=None):
if attrs is not None:
attrs = attrs.copy()
self.input_type = attrs.pop('type', self.input_type)
super().__init__(attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['type'] = self.input_type
return context
class TextInput(Input):
input_type = 'text'
template_name = 'django/forms/widgets/text.html'
class NumberInput(Input):
input_type = 'number'
template_name = 'django/forms/widgets/number.html'
class EmailInput(Input):
input_type = 'email'
template_name = 'django/forms/widgets/email.html'
class URLInput(Input):
input_type = 'url'
template_name = 'django/forms/widgets/url.html'
class PasswordInput(Input):
input_type = 'password'
template_name = 'django/forms/widgets/password.html'
def __init__(self, attrs=None, render_value=False):
super().__init__(attrs)
self.render_value = render_value
def get_context(self, name, value, attrs):
if not self.render_value:
value = None
return super().get_context(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
template_name = 'django/forms/widgets/hidden.html'
class MultipleHiddenInput(HiddenInput):
"""
Handle <input type="hidden"> for fields that have a list
of values.
"""
template_name = 'django/forms/widgets/multiple_hidden.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
final_attrs = context['widget']['attrs']
id_ = context['widget']['attrs'].get('id')
subwidgets = []
for index, value_ in enumerate(context['widget']['value']):
widget_attrs = final_attrs.copy()
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
widget_attrs['id'] = '%s_%s' % (id_, index)
widget = HiddenInput()
widget.is_required = self.is_required
subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def format_value(self, value):
return [] if value is None else value
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
template_name = 'django/forms/widgets/file.html'
def format_value(self, value):
"""File input never renders a value."""
return
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in files
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
clear_checkbox_label = _('Clear')
initial_text = _('Currently')
input_text = _('Change')
template_name = 'django/forms/widgets/clearable_file_input.html'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
return bool(value and getattr(value, 'url', False))
def format_value(self, value):
"""
Return the file object if it has a defined url attribute.
"""
if self.is_initial(value):
return value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
context['widget'].update({
'checkbox_name': checkbox_name,
'checkbox_id': checkbox_id,
'is_initial': self.is_initial(value),
'input_text': self.input_text,
'initial_text': self.initial_text,
'clear_checkbox_label': self.clear_checkbox_label,
})
return context
def value_from_datadict(self, data, files, name):
upload = super().value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
def use_required_attribute(self, initial):
return super().use_required_attribute(initial) and not initial
def value_omitted_from_data(self, data, files, name):
return (
super().value_omitted_from_data(data, files, name) and
self.clear_checkbox_name(name) not in data
)
class Textarea(Widget):
template_name = 'django/forms/widgets/textarea.html'
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super().__init__(attrs)
self.format = format or None
def format_value(self, value):
return formats.localize_input(value, self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
template_name = 'django/forms/widgets/date.html'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/datetime.html'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/time.html'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Input):
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox.html'
def __init__(self, attrs=None, check_test=None):
super().__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def format_value(self, value):
"""Only return the 'value' attribute if value isn't empty."""
if value is True or value is False or value is None or value == '':
return
return str(value)
def get_context(self, name, value, attrs):
if self.check_test(value):
if attrs is None:
attrs = {}
attrs['checked'] = True
return super().get_context(name, value, attrs)
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, str):
value = values.get(value.lower(), value)
return bool(value)
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
class ChoiceWidget(Widget):
allow_multiple_selected = False
input_type = None
template_name = None
option_template_name = None
add_id_index = True
checked_attribute = {'checked': True}
option_inherits_attrs = True
def __init__(self, attrs=None, choices=()):
super().__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self.choices)
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None):
"""
Yield all "subwidgets" of this widget. Used to enable iterating
options from a BoundField for choice widgets.
"""
value = self.format_value(value)
yield from self.options(name, value, attrs)
def options(self, name, value, attrs=None):
"""Yield a flat list of options for this widgets."""
for group in self.optgroups(name, value, attrs):
yield from group[1]
def optgroups(self, name, value, attrs=None):
"""Return a list of optgroups for this widget."""
groups = []
has_selected = False
for index, (option_value, option_label) in enumerate(self.choices):
if option_value is None:
option_value = ''
subgroup = []
if isinstance(option_label, (list, tuple)):
group_name = option_value
subindex = 0
choices = option_label
else:
group_name = None
subindex = None
choices = [(option_value, option_label)]
groups.append((group_name, subgroup, index))
for subvalue, sublabel in choices:
selected = (
str(subvalue) in value and
(not has_selected or self.allow_multiple_selected)
)
has_selected |= selected
subgroup.append(self.create_option(
name, subvalue, sublabel, selected, index,
subindex=subindex, attrs=attrs,
))
if subindex is not None:
subindex += 1
return groups
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
index = str(index) if subindex is None else "%s_%s" % (index, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
option_attrs['id'] = self.id_for_label(option_attrs['id'], index)
return {
'name': name,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
'wrap_label': True,
}
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs)
return context
def id_for_label(self, id_, index='0'):
"""
Use an incremented id for each option where the main widget
references the zero index.
"""
if id_ and self.add_id_index:
id_ = '%s_%s' % (id_, index)
return id_
def value_from_datadict(self, data, files, name):
getter = data.get
if self.allow_multiple_selected:
try:
getter = data.getlist
except AttributeError:
pass
return getter(name)
def format_value(self, value):
"""Return selected values as a list."""
if value is None and self.allow_multiple_selected:
return []
if not isinstance(value, (tuple, list)):
value = [value]
return [str(v) if v is not None else '' for v in value]
class Select(ChoiceWidget):
input_type = 'select'
template_name = 'django/forms/widgets/select.html'
option_template_name = 'django/forms/widgets/select_option.html'
add_id_index = False
checked_attribute = {'selected': True}
option_inherits_attrs = False
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.allow_multiple_selected:
context['widget']['attrs']['multiple'] = True
return context
@staticmethod
def _choice_has_empty_value(choice):
"""Return True if the choice's value is empty string or None."""
value, _ = choice
return value is None or value == ''
def use_required_attribute(self, initial):
"""
Don't render 'required' if the first <option> has a value, as that's
invalid HTML.
"""
use_required_attribute = super().use_required_attribute(initial)
# 'required' is always okay for <select multiple>.
if self.allow_multiple_selected:
return use_required_attribute
first_choice = next(iter(self.choices), None)
return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (
('unknown', _('Unknown')),
('true', _('Yes')),
('false', _('No')),
)
super().__init__(attrs, choices)
def format_value(self, value):
try:
return {
True: 'true', False: 'false',
'true': 'true', 'false': 'false',
# For backwards compatibility with Django < 2.2.
'2': 'true', '3': 'false',
}[value]
except KeyError:
return 'unknown'
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {
True: True,
'True': True,
'False': False,
False: False,
'true': True,
'false': False,
# For backwards compatibility with Django < 2.2.
'2': True,
'3': False,
}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def value_omitted_from_data(self, data, files, name):
# An unselected <select multiple> doesn't appear in POST data, so it's
# never known if the value is actually omitted.
return False
class RadioSelect(ChoiceWidget):
input_type = 'radio'
template_name = 'django/forms/widgets/radio.html'
option_template_name = 'django/forms/widgets/radio_option.html'
class CheckboxSelectMultiple(ChoiceWidget):
allow_multiple_selected = True
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox_select.html'
option_template_name = 'django/forms/widgets/checkbox_option.html'
def use_required_attribute(self, initial):
# Don't use the 'required' attribute because browser validation would
# require all checkboxes to be checked instead of at least one.
return False
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
def id_for_label(self, id_, index=None):
""""
Don't include for="field_0" in <label> because clicking such a label
would toggle the first checkbox.
"""
if index is None:
return ''
return super().id_for_label(id_, index)
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
In addition to the values added by Widget.get_context(), this widget
adds a list of subwidgets to the context as widget['subwidgets'].
These can be looped over and rendered like normal widgets.
You'll probably want to use this class with MultiValueField.
"""
template_name = 'django/forms/widgets/multiwidget.html'
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super().__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = context['widget']['attrs']
input_type = final_attrs.pop('type', None)
id_ = final_attrs.get('id')
subwidgets = []
for i, widget in enumerate(self.widgets):
if input_type is not None:
widget.input_type = input_type
widget_name = '%s_%s' % (name, i)
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
widget_attrs = final_attrs.copy()
widget_attrs['id'] = '%s_%s' % (id_, i)
else:
widget_attrs = final_attrs
subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def id_for_label(self, id_):
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def value_omitted_from_data(self, data, files, name):
return all(
widget.value_omitted_from_data(data, files, name + '_%s' % i)
for i, widget in enumerate(self.widgets)
)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"""
Media for a multiwidget is the combination of all media of the
subwidgets.
"""
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super().__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
template_name = 'django/forms/widgets/splitdatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
widgets = (
DateInput(
attrs=attrs if date_attrs is None else date_attrs,
format=date_format,
),
TimeInput(
attrs=attrs if time_attrs is None else time_attrs,
format=time_format,
),
)
super().__init__(widgets)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time()]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A widget that splits datetime input into two <input type="hidden"> inputs.
"""
template_name = 'django/forms/widgets/splithiddendatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
super().__init__(attrs, date_format, time_format, date_attrs, time_attrs)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = ('', '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
template_name = 'django/forms/widgets/select_date.html'
input_type = 'select'
select_widget = Select
date_re = re.compile(r'(\d{4}|0)-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = ('', empty_label[0])
self.month_none_value = ('', empty_label[1])
self.day_none_value = ('', empty_label[2])
else:
if empty_label is not None:
self.none_value = ('', empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
date_context = {}
year_choices = [(i, str(i)) for i in self.years]
if not self.is_required:
year_choices.insert(0, self.year_none_value)
year_name = self.year_field % name
date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context(
name=year_name,
value=context['widget']['value']['year'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % year_name},
)
month_choices = list(self.months.items())
if not self.is_required:
month_choices.insert(0, self.month_none_value)
month_name = self.month_field % name
date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context(
name=month_name,
value=context['widget']['value']['month'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % month_name},
)
day_choices = [(i, i) for i in range(1, 32)]
if not self.is_required:
day_choices.insert(0, self.day_none_value)
day_name = self.day_field % name
date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context(
name=day_name,
value=context['widget']['value']['day'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % day_name},
)
subwidgets = []
for field in self._parse_date_fmt():
subwidgets.append(date_context[field]['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def format_value(self, value):
"""
Return a dict containing the year, month, and day of the current value.
Use dict instead of a datetime to allow invalid dates such as February
31 to display correctly.
"""
year, month, day = None, None, None
if isinstance(value, (datetime.date, datetime.datetime)):
year, month, day = value.year, value.month, value.day
elif isinstance(value, str):
match = self.date_re.match(value)
if match:
# Convert any zeros in the date to empty strings to match the
# empty option value.
year, month, day = [int(val) or '' for val in match.groups()]
elif settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
d = datetime.datetime.strptime(value, input_format)
except ValueError:
pass
else:
year, month, day = d.year, d.month, d.day
return {'year': year, 'month': month, 'day': day}
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == '':
return None
if y is not None and m is not None and d is not None:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
pass
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
# Return pseudo-ISO dates with zeros for any unselected values,
# e.g. '2017-0-23'.
return '%s-%s-%s' % (y or 0, m or 0, d or 0)
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return not any(
('{}_{}'.format(name, interval) in data)
for interval in ('year', 'month', 'day')
)
|
3a84af0c8ced58c89fa1a1b0b4c603af7c6b15623dbf40b0f507bd79f39c73de | import datetime
import json
import mimetypes
import os
import re
import sys
import time
from email.header import Header
from http.client import responses
from urllib.parse import quote, urlparse
from django.conf import settings
from django.core import signals, signing
from django.core.exceptions import DisallowedRedirect
from django.core.serializers.json import DjangoJSONEncoder
from django.http.cookie import SimpleCookie
from django.utils import timezone
from django.utils.encoding import iri_to_uri
from django.utils.http import http_date
_charset_from_content_type_re = re.compile(r';\s*charset=(?P<charset>[^\s;]+)', re.I)
class BadHeaderError(ValueError):
pass
class HttpResponseBase:
"""
An HTTP response base class with dictionary-accessed headers.
This class doesn't handle content. It should not be used directly.
Use the HttpResponse and StreamingHttpResponse subclasses instead.
"""
status_code = 200
def __init__(self, content_type=None, status=None, reason=None, charset=None):
# _headers is a mapping of the lowercase name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._closable_objects = []
# This parameter is set by the handler. It's necessary to preserve the
# historical behavior of request_finished.
self._handler_class = None
self.cookies = SimpleCookie()
self.closed = False
if status is not None:
try:
self.status_code = int(status)
except (ValueError, TypeError):
raise TypeError('HTTP status code must be an integer.')
if not 100 <= self.status_code <= 599:
raise ValueError('HTTP status code must be an integer from 100 to 599.')
self._reason_phrase = reason
self._charset = charset
if content_type is None:
content_type = 'text/html; charset=%s' % self.charset
self['Content-Type'] = content_type
@property
def reason_phrase(self):
if self._reason_phrase is not None:
return self._reason_phrase
# Leave self._reason_phrase unset in order to use the default
# reason phrase for status code.
return responses.get(self.status_code, 'Unknown Status Code')
@reason_phrase.setter
def reason_phrase(self, value):
self._reason_phrase = value
@property
def charset(self):
if self._charset is not None:
return self._charset
content_type = self.get('Content-Type', '')
matched = _charset_from_content_type_re.search(content_type)
if matched:
# Extract the charset and strip its double quotes
return matched.group('charset').replace('"', '')
return settings.DEFAULT_CHARSET
@charset.setter
def charset(self, value):
self._charset = value
def serialize_headers(self):
"""HTTP headers as a bytestring."""
def to_bytes(val, encoding):
return val if isinstance(val, bytes) else val.encode(encoding)
headers = [
(to_bytes(key, 'ascii') + b': ' + to_bytes(value, 'latin-1'))
for key, value in self._headers.values()
]
return b'\r\n'.join(headers)
__bytes__ = serialize_headers
@property
def _content_type_for_repr(self):
return ', "%s"' % self['Content-Type'] if 'Content-Type' in self else ''
def _convert_to_charset(self, value, charset, mime_encode=False):
"""
Convert headers key/value to ascii/latin-1 native strings.
`charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and
`value` can't be represented in the given charset, apply MIME-encoding.
"""
if not isinstance(value, (bytes, str)):
value = str(value)
if ((isinstance(value, bytes) and (b'\n' in value or b'\r' in value)) or
isinstance(value, str) and ('\n' in value or '\r' in value)):
raise BadHeaderError("Header values can't contain newlines (got %r)" % value)
try:
if isinstance(value, str):
# Ensure string is valid in given charset
value.encode(charset)
else:
# Convert bytestring using given charset
value = value.decode(charset)
except UnicodeError as e:
if mime_encode:
value = Header(value, 'utf-8', maxlinelen=sys.maxsize).encode()
else:
e.reason += ', HTTP response headers must be in %s format' % charset
raise
return value
def __setitem__(self, header, value):
header = self._convert_to_charset(header, 'ascii')
value = self._convert_to_charset(value, 'latin-1', mime_encode=True)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
self._headers.pop(header.lower(), False)
def __getitem__(self, header):
return self._headers[header.lower()][1]
def has_header(self, header):
"""Case-insensitive check for a header."""
return header.lower() in self._headers
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate=None):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False, samesite=None):
"""
Set a cookie.
``expires`` can be:
- a string in the correct format,
- a naive ``datetime.datetime`` object in UTC,
- an aware ``datetime.datetime`` object in any time zone.
If it is a ``datetime.datetime`` object then calculate ``max_age``.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
if timezone.is_aware(expires):
expires = timezone.make_naive(expires, timezone.utc)
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
else:
self.cookies[key]['expires'] = ''
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = http_date(time.time() + max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
if samesite:
if samesite.lower() not in ('lax', 'strict'):
raise ValueError('samesite must be "lax" or "strict".')
self.cookies[key]['samesite'] = samesite
def setdefault(self, key, value):
"""Set a header unless it has already been set."""
if key not in self:
self[key] = value
def set_signed_cookie(self, key, value, salt='', **kwargs):
value = signing.get_cookie_signer(salt=key + salt).sign(value)
return self.set_cookie(key, value, **kwargs)
def delete_cookie(self, key, path='/', domain=None):
# Most browsers ignore the Set-Cookie header if the cookie name starts
# with __Host- or __Secure- and the cookie doesn't use the secure flag.
secure = key.startswith(('__Secure-', '__Host-'))
self.set_cookie(
key, max_age=0, path=path, domain=domain, secure=secure,
expires='Thu, 01 Jan 1970 00:00:00 GMT',
)
# Common methods used by subclasses
def make_bytes(self, value):
"""Turn a value into a bytestring encoded in the output charset."""
# Per PEP 3333, this response body must be bytes. To avoid returning
# an instance of a subclass, this function returns `bytes(value)`.
# This doesn't make a copy when `value` already contains bytes.
# Handle string types -- we can't rely on force_bytes here because:
# - Python attempts str conversion first
# - when self._charset != 'utf-8' it re-encodes the content
if isinstance(value, bytes):
return bytes(value)
if isinstance(value, str):
return bytes(value.encode(self.charset))
# Handle non-string types.
return str(value).encode(self.charset)
# These methods partially implement the file-like object interface.
# See https://docs.python.org/library/io.html#io.IOBase
# The WSGI server must call this method upon completion of the request.
# See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
def close(self):
for closable in self._closable_objects:
try:
closable.close()
except Exception:
pass
self.closed = True
signals.request_finished.send(sender=self._handler_class)
def write(self, content):
raise IOError("This %s instance is not writable" % self.__class__.__name__)
def flush(self):
pass
def tell(self):
raise IOError("This %s instance cannot tell its position" % self.__class__.__name__)
# These methods partially implement a stream-like object interface.
# See https://docs.python.org/library/io.html#io.IOBase
def readable(self):
return False
def seekable(self):
return False
def writable(self):
return False
def writelines(self, lines):
raise IOError("This %s instance is not writable" % self.__class__.__name__)
class HttpResponse(HttpResponseBase):
"""
An HTTP response class with a string as content.
This content that can be read, appended to, or replaced.
"""
streaming = False
def __init__(self, content=b'', *args, **kwargs):
super().__init__(*args, **kwargs)
# Content is a bytestring. See the `content` property methods.
self.content = content
def __repr__(self):
return '<%(cls)s status_code=%(status_code)d%(content_type)s>' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
}
def serialize(self):
"""Full HTTP message, including headers, as a bytestring."""
return self.serialize_headers() + b'\r\n\r\n' + self.content
__bytes__ = serialize
@property
def content(self):
return b''.join(self._container)
@content.setter
def content(self, value):
# Consume iterators upon assignment to allow repeated iteration.
if hasattr(value, '__iter__') and not isinstance(value, (bytes, str)):
content = b''.join(self.make_bytes(chunk) for chunk in value)
if hasattr(value, 'close'):
try:
value.close()
except Exception:
pass
else:
content = self.make_bytes(value)
# Create a list of properly encoded bytestrings to support write().
self._container = [content]
def __iter__(self):
return iter(self._container)
def write(self, content):
self._container.append(self.make_bytes(content))
def tell(self):
return len(self.content)
def getvalue(self):
return self.content
def writable(self):
return True
def writelines(self, lines):
for line in lines:
self.write(line)
class StreamingHttpResponse(HttpResponseBase):
"""
A streaming HTTP response class with an iterator as content.
This should only be iterated once, when the response is streamed to the
client. However, it can be appended to or replaced with a new iterator
that wraps the original content (or yields entirely new content).
"""
streaming = True
def __init__(self, streaming_content=(), *args, **kwargs):
super().__init__(*args, **kwargs)
# `streaming_content` should be an iterable of bytestrings.
# See the `streaming_content` property methods.
self.streaming_content = streaming_content
@property
def content(self):
raise AttributeError(
"This %s instance has no `content` attribute. Use "
"`streaming_content` instead." % self.__class__.__name__
)
@property
def streaming_content(self):
return map(self.make_bytes, self._iterator)
@streaming_content.setter
def streaming_content(self, value):
self._set_streaming_content(value)
def _set_streaming_content(self, value):
# Ensure we can never iterate on "value" more than once.
self._iterator = iter(value)
if hasattr(value, 'close'):
self._closable_objects.append(value)
def __iter__(self):
return self.streaming_content
def getvalue(self):
return b''.join(self.streaming_content)
class FileResponse(StreamingHttpResponse):
"""
A streaming HTTP response class optimized for files.
"""
block_size = 4096
def __init__(self, *args, as_attachment=False, filename='', **kwargs):
self.as_attachment = as_attachment
self.filename = filename
super().__init__(*args, **kwargs)
def _set_streaming_content(self, value):
if not hasattr(value, 'read'):
self.file_to_stream = None
return super()._set_streaming_content(value)
self.file_to_stream = filelike = value
if hasattr(filelike, 'close'):
self._closable_objects.append(filelike)
value = iter(lambda: filelike.read(self.block_size), b'')
self.set_headers(filelike)
super()._set_streaming_content(value)
def set_headers(self, filelike):
"""
Set some common response headers (Content-Length, Content-Type, and
Content-Disposition) based on the `filelike` response content.
"""
encoding_map = {
'bzip2': 'application/x-bzip',
'gzip': 'application/gzip',
'xz': 'application/x-xz',
}
filename = getattr(filelike, 'name', None)
filename = filename if (isinstance(filename, str) and filename) else self.filename
if os.path.isabs(filename):
self['Content-Length'] = os.path.getsize(filelike.name)
elif hasattr(filelike, 'getbuffer'):
self['Content-Length'] = filelike.getbuffer().nbytes
if self.get('Content-Type', '').startswith('text/html'):
if filename:
content_type, encoding = mimetypes.guess_type(filename)
# Encoding isn't set to prevent browsers from automatically
# uncompressing files.
content_type = encoding_map.get(encoding, content_type)
self['Content-Type'] = content_type or 'application/octet-stream'
else:
self['Content-Type'] = 'application/octet-stream'
if self.as_attachment:
filename = self.filename or os.path.basename(filename)
if filename:
try:
filename.encode('ascii')
file_expr = 'filename="{}"'.format(filename)
except UnicodeEncodeError:
file_expr = "filename*=utf-8''{}".format(quote(filename))
self['Content-Disposition'] = 'attachment; {}'.format(file_expr)
class HttpResponseRedirectBase(HttpResponse):
allowed_schemes = ['http', 'https', 'ftp']
def __init__(self, redirect_to, *args, **kwargs):
super().__init__(*args, **kwargs)
self['Location'] = iri_to_uri(redirect_to)
parsed = urlparse(str(redirect_to))
if parsed.scheme and parsed.scheme not in self.allowed_schemes:
raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
url = property(lambda self: self['Location'])
def __repr__(self):
return '<%(cls)s status_code=%(status_code)d%(content_type)s, url="%(url)s">' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
'url': self.url,
}
class HttpResponseRedirect(HttpResponseRedirectBase):
status_code = 302
class HttpResponsePermanentRedirect(HttpResponseRedirectBase):
status_code = 301
class HttpResponseNotModified(HttpResponse):
status_code = 304
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
del self['content-type']
@HttpResponse.content.setter
def content(self, value):
if value:
raise AttributeError("You cannot set content to a 304 (Not Modified) response")
self._container = []
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods, *args, **kwargs):
super().__init__(*args, **kwargs)
self['Allow'] = ', '.join(permitted_methods)
def __repr__(self):
return '<%(cls)s [%(methods)s] status_code=%(status_code)d%(content_type)s>' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
'methods': self['Allow'],
}
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
class Http404(Exception):
pass
class JsonResponse(HttpResponse):
"""
An HTTP response class that consumes data to be serialized to JSON.
:param data: Data to be dumped into json. By default only ``dict`` objects
are allowed to be passed due to a security flaw before EcmaScript 5. See
the ``safe`` parameter for more information.
:param encoder: Should be a json encoder class. Defaults to
``django.core.serializers.json.DjangoJSONEncoder``.
:param safe: Controls if only ``dict`` objects may be serialized. Defaults
to ``True``.
:param json_dumps_params: A dictionary of kwargs passed to json.dumps().
"""
def __init__(self, data, encoder=DjangoJSONEncoder, safe=True,
json_dumps_params=None, **kwargs):
if safe and not isinstance(data, dict):
raise TypeError(
'In order to allow non-dict objects to be serialized set the '
'safe parameter to False.'
)
if json_dumps_params is None:
json_dumps_params = {}
kwargs.setdefault('content_type', 'application/json')
data = json.dumps(data, cls=encoder, **json_dumps_params)
super().__init__(content=data, **kwargs)
|
3efc84a4c995a046c8ad8cad1d5abac328ad655696f587b084f3f06a10500fff | import copy
import re
from io import BytesIO
from itertools import chain
from urllib.parse import quote, urlencode, urljoin, urlsplit
from django.conf import settings
from django.core import signing
from django.core.exceptions import (
DisallowedHost, ImproperlyConfigured, RequestDataTooBig,
)
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils.datastructures import (
CaseInsensitiveMapping, ImmutableList, MultiValueDict,
)
from django.utils.encoding import escape_uri_path, iri_to_uri
from django.utils.functional import cached_property
from django.utils.http import is_same_domain, limited_parse_qsl
RAISE_ERROR = object()
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:\d+)?$")
class UnreadablePostError(IOError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest:
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self.content_type = None
self.content_params = None
def __repr__(self):
if self.method is None or not self.get_full_path():
return '<%s>' % self.__class__.__name__
return '<%s: %s %r>' % (self.__class__.__name__, self.method, self.get_full_path())
@cached_property
def headers(self):
return HttpHeaders(self.META)
def _get_raw_host(self):
"""
Return the HTTP host using the environment or request headers. Skip
allowed hosts protection, so may return an insecure host.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = self.get_port()
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
return host
def get_host(self):
"""Return the HTTP host using the environment or request headers."""
host = self._get_raw_host()
# Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
allowed_hosts = settings.ALLOWED_HOSTS
if settings.DEBUG and not allowed_hosts:
allowed_hosts = ['localhost', '127.0.0.1', '[::1]']
domain, port = split_domain_port(host)
if domain and validate_host(domain, allowed_hosts):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_port(self):
"""Return the port number for the request as a string."""
if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META:
port = self.META['HTTP_X_FORWARDED_PORT']
else:
port = self.META['SERVER_PORT']
return str(port)
def get_full_path(self, force_append_slash=False):
return self._get_full_path(self.path, force_append_slash)
def get_full_path_info(self, force_append_slash=False):
return self._get_full_path(self.path_info, force_append_slash)
def _get_full_path(self, path, force_append_slash):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s%s' % (
escape_uri_path(path),
'/' if force_append_slash and not path.endswith('/') else '',
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempt to return a signed cookie. If the signature fails or the
cookie has expired, raise an exception, unless the `default` argument
is provided, in which case return that value.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def get_raw_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return '{scheme}://{host}{path}'.format(
scheme=self.scheme,
host=self._get_raw_host(),
path=self.get_full_path(),
)
def build_absolute_uri(self, location=None):
"""
Build an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, build the absolute URI
using request.get_full_path(). If the location is absolute, convert it
to an RFC 3987 compliant URI and return it. If location is relative or
is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base
URL constructed from the request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
# Handle the simple, most common case. If the location is absolute
# and a scheme or host (netloc) isn't provided, skip an expensive
# urljoin() as long as no path segments are '.' or '..'.
if (bits.path.startswith('/') and not bits.scheme and not bits.netloc and
'/./' not in bits.path and '/../' not in bits.path):
# If location starts with '//' but has no netloc, reuse the
# schema and netloc from the current request. Strip the double
# slashes and continue as if it wasn't specified.
if location.startswith('//'):
location = location[2:]
location = self._current_scheme_host + location
else:
# Join the constructed URL with the provided location, which
# allows the provided location to apply query strings to the
# base path.
location = urljoin(self._current_scheme_host + self.path, location)
return iri_to_uri(location)
@cached_property
def _current_scheme_host(self):
return '{}://{}'.format(self.scheme, self.get_host())
def _get_scheme(self):
"""
Hook for subclasses like WSGIRequest to implement. Return 'http' by
default.
"""
return 'http'
@property
def scheme(self):
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
if self.META.get(header) == value:
return 'https'
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Set the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, remove and recreate it on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, 'GET'):
del self.GET
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Return a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
# Limit the maximum request data size that will be handled in-memory.
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
try:
self._body = self.read()
except IOError as e:
raise UnreadablePostError(*e.args) from e
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict()
self._files = MultiValueDict()
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.content_type == 'multipart/form-data':
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
self._mark_post_parse_error()
raise
elif self.content_type == 'application/x-www-form-urlencoded':
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(l[1] for l in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except IOError as e:
raise UnreadablePostError(*e.args) from e
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except IOError as e:
raise UnreadablePostError(*e.args) from e
def __iter__(self):
return iter(self.readline, b'')
def readlines(self):
return list(self)
class HttpHeaders(CaseInsensitiveMapping):
HTTP_PREFIX = 'HTTP_'
# PEP 333 gives two headers which aren't prepended with HTTP_.
UNPREFIXED_HEADERS = {'CONTENT_TYPE', 'CONTENT_LENGTH'}
def __init__(self, environ):
headers = {}
for header, value in environ.items():
name = self.parse_header_name(header)
if name:
headers[name] = value
super().__init__(headers)
@classmethod
def parse_header_name(cls, header):
if header.startswith(cls.HTTP_PREFIX):
header = header[len(cls.HTTP_PREFIX):]
elif header not in cls.UNPREFIXED_HEADERS:
return None
return header.replace('_', '-').title()
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to str.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super().__init__()
self.encoding = encoding or settings.DEFAULT_CHARSET
query_string = query_string or ''
parse_qsl_kwargs = {
'keep_blank_values': True,
'fields_limit': settings.DATA_UPLOAD_MAX_NUMBER_FIELDS,
'encoding': self.encoding,
}
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(self.encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
for key, value in limited_parse_qsl(query_string, **parse_qsl_kwargs):
self.appendlist(key, value)
self._mutable = mutable
@classmethod
def fromkeys(cls, iterable, value='', mutable=False, encoding=None):
"""
Return a new QueryDict with keys (may be repeated) from an iterable and
values from value.
"""
q = cls('', mutable=True, encoding=encoding)
for key in iterable:
q.appendlist(key, value)
if not mutable:
q._mutable = False
return q
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super().__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in self.lists():
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in self.lists():
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super().setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super().setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super().pop(key, *args)
def popitem(self):
self._assert_mutable()
return super().popitem()
def clear(self):
self._assert_mutable()
super().clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super().setdefault(key, default)
def copy(self):
"""Return a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Return an encoded string of all query string arguments.
`safe` specifies characters which don't require quoting, for example::
>>> q = QueryDict(mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = safe.encode(self.encoding)
def encode(k, v):
return '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
def encode(k, v):
return urlencode({k: v})
for k, list_ in self.lists():
output.extend(
encode(k.encode(self.encoding), str(v).encode(self.encoding))
for v in list_
)
return '&'.join(output)
# It's neither necessary nor appropriate to use
# django.utils.encoding.force_text for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Convert bytes objects to strings, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Return any non-bytes objects without change.
"""
if isinstance(s, bytes):
return str(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lowercased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
domain, port = bits if len(bits) == 2 else (bits[0], '')
# Remove a trailing dot (if present) from the domain.
domain = domain[:-1] if domain.endswith('.') else domain
return domain, port
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lowercased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
return any(pattern == '*' or is_same_domain(host, pattern) for pattern in allowed_hosts)
|
04e6c7833dd8c2135a8c541779f36fb7dc5c1826f966e6f5217c16f3ecfa9d5d | """
LANG_INFO is a dictionary structure to provide meta information about languages.
About name_local: capitalize it as if your language name was appearing
inside a sentence in your language.
The 'fallback' key can be used to specify a special fallback logic which doesn't
follow the traditional 'fr-ca' -> 'fr' fallback logic.
"""
LANG_INFO = {
'af': {
'bidi': False,
'code': 'af',
'name': 'Afrikaans',
'name_local': 'Afrikaans',
},
'ar': {
'bidi': True,
'code': 'ar',
'name': 'Arabic',
'name_local': 'العربيّة',
},
'ast': {
'bidi': False,
'code': 'ast',
'name': 'Asturian',
'name_local': 'asturianu',
},
'az': {
'bidi': True,
'code': 'az',
'name': 'Azerbaijani',
'name_local': 'Azərbaycanca',
},
'be': {
'bidi': False,
'code': 'be',
'name': 'Belarusian',
'name_local': 'беларуская',
},
'bg': {
'bidi': False,
'code': 'bg',
'name': 'Bulgarian',
'name_local': 'български',
},
'bn': {
'bidi': False,
'code': 'bn',
'name': 'Bengali',
'name_local': 'বাংলা',
},
'br': {
'bidi': False,
'code': 'br',
'name': 'Breton',
'name_local': 'brezhoneg',
},
'bs': {
'bidi': False,
'code': 'bs',
'name': 'Bosnian',
'name_local': 'bosanski',
},
'ca': {
'bidi': False,
'code': 'ca',
'name': 'Catalan',
'name_local': 'català',
},
'cs': {
'bidi': False,
'code': 'cs',
'name': 'Czech',
'name_local': 'česky',
},
'cy': {
'bidi': False,
'code': 'cy',
'name': 'Welsh',
'name_local': 'Cymraeg',
},
'da': {
'bidi': False,
'code': 'da',
'name': 'Danish',
'name_local': 'dansk',
},
'de': {
'bidi': False,
'code': 'de',
'name': 'German',
'name_local': 'Deutsch',
},
'dsb': {
'bidi': False,
'code': 'dsb',
'name': 'Lower Sorbian',
'name_local': 'dolnoserbski',
},
'el': {
'bidi': False,
'code': 'el',
'name': 'Greek',
'name_local': 'Ελληνικά',
},
'en': {
'bidi': False,
'code': 'en',
'name': 'English',
'name_local': 'English',
},
'en-au': {
'bidi': False,
'code': 'en-au',
'name': 'Australian English',
'name_local': 'Australian English',
},
'en-gb': {
'bidi': False,
'code': 'en-gb',
'name': 'British English',
'name_local': 'British English',
},
'eo': {
'bidi': False,
'code': 'eo',
'name': 'Esperanto',
'name_local': 'Esperanto',
},
'es': {
'bidi': False,
'code': 'es',
'name': 'Spanish',
'name_local': 'español',
},
'es-ar': {
'bidi': False,
'code': 'es-ar',
'name': 'Argentinian Spanish',
'name_local': 'español de Argentina',
},
'es-co': {
'bidi': False,
'code': 'es-co',
'name': 'Colombian Spanish',
'name_local': 'español de Colombia',
},
'es-mx': {
'bidi': False,
'code': 'es-mx',
'name': 'Mexican Spanish',
'name_local': 'español de Mexico',
},
'es-ni': {
'bidi': False,
'code': 'es-ni',
'name': 'Nicaraguan Spanish',
'name_local': 'español de Nicaragua',
},
'es-ve': {
'bidi': False,
'code': 'es-ve',
'name': 'Venezuelan Spanish',
'name_local': 'español de Venezuela',
},
'et': {
'bidi': False,
'code': 'et',
'name': 'Estonian',
'name_local': 'eesti',
},
'eu': {
'bidi': False,
'code': 'eu',
'name': 'Basque',
'name_local': 'Basque',
},
'fa': {
'bidi': True,
'code': 'fa',
'name': 'Persian',
'name_local': 'فارسی',
},
'fi': {
'bidi': False,
'code': 'fi',
'name': 'Finnish',
'name_local': 'suomi',
},
'fr': {
'bidi': False,
'code': 'fr',
'name': 'French',
'name_local': 'français',
},
'fy': {
'bidi': False,
'code': 'fy',
'name': 'Frisian',
'name_local': 'frysk',
},
'ga': {
'bidi': False,
'code': 'ga',
'name': 'Irish',
'name_local': 'Gaeilge',
},
'gd': {
'bidi': False,
'code': 'gd',
'name': 'Scottish Gaelic',
'name_local': 'Gàidhlig',
},
'gl': {
'bidi': False,
'code': 'gl',
'name': 'Galician',
'name_local': 'galego',
},
'he': {
'bidi': True,
'code': 'he',
'name': 'Hebrew',
'name_local': 'עברית',
},
'hi': {
'bidi': False,
'code': 'hi',
'name': 'Hindi',
'name_local': 'Hindi',
},
'hr': {
'bidi': False,
'code': 'hr',
'name': 'Croatian',
'name_local': 'Hrvatski',
},
'hsb': {
'bidi': False,
'code': 'hsb',
'name': 'Upper Sorbian',
'name_local': 'hornjoserbsce',
},
'hu': {
'bidi': False,
'code': 'hu',
'name': 'Hungarian',
'name_local': 'Magyar',
},
'hy': {
'bidi': False,
'code': 'hy',
'name': 'Armenian',
'name_local': 'հայերեն',
},
'ia': {
'bidi': False,
'code': 'ia',
'name': 'Interlingua',
'name_local': 'Interlingua',
},
'io': {
'bidi': False,
'code': 'io',
'name': 'Ido',
'name_local': 'ido',
},
'id': {
'bidi': False,
'code': 'id',
'name': 'Indonesian',
'name_local': 'Bahasa Indonesia',
},
'is': {
'bidi': False,
'code': 'is',
'name': 'Icelandic',
'name_local': 'Íslenska',
},
'it': {
'bidi': False,
'code': 'it',
'name': 'Italian',
'name_local': 'italiano',
},
'ja': {
'bidi': False,
'code': 'ja',
'name': 'Japanese',
'name_local': '日本語',
},
'ka': {
'bidi': False,
'code': 'ka',
'name': 'Georgian',
'name_local': 'ქართული',
},
'kab': {
'bidi': False,
'code': 'kab',
'name': 'Kabyle',
'name_local': 'taqbaylit',
},
'kk': {
'bidi': False,
'code': 'kk',
'name': 'Kazakh',
'name_local': 'Қазақ',
},
'km': {
'bidi': False,
'code': 'km',
'name': 'Khmer',
'name_local': 'Khmer',
},
'kn': {
'bidi': False,
'code': 'kn',
'name': 'Kannada',
'name_local': 'Kannada',
},
'ko': {
'bidi': False,
'code': 'ko',
'name': 'Korean',
'name_local': '한국어',
},
'lb': {
'bidi': False,
'code': 'lb',
'name': 'Luxembourgish',
'name_local': 'Lëtzebuergesch',
},
'lt': {
'bidi': False,
'code': 'lt',
'name': 'Lithuanian',
'name_local': 'Lietuviškai',
},
'lv': {
'bidi': False,
'code': 'lv',
'name': 'Latvian',
'name_local': 'latviešu',
},
'mk': {
'bidi': False,
'code': 'mk',
'name': 'Macedonian',
'name_local': 'Македонски',
},
'ml': {
'bidi': False,
'code': 'ml',
'name': 'Malayalam',
'name_local': 'Malayalam',
},
'mn': {
'bidi': False,
'code': 'mn',
'name': 'Mongolian',
'name_local': 'Mongolian',
},
'mr': {
'bidi': False,
'code': 'mr',
'name': 'Marathi',
'name_local': 'मराठी',
},
'my': {
'bidi': False,
'code': 'my',
'name': 'Burmese',
'name_local': 'မြန်မာဘာသာ',
},
'nb': {
'bidi': False,
'code': 'nb',
'name': 'Norwegian Bokmal',
'name_local': 'norsk (bokmål)',
},
'ne': {
'bidi': False,
'code': 'ne',
'name': 'Nepali',
'name_local': 'नेपाली',
},
'nl': {
'bidi': False,
'code': 'nl',
'name': 'Dutch',
'name_local': 'Nederlands',
},
'nn': {
'bidi': False,
'code': 'nn',
'name': 'Norwegian Nynorsk',
'name_local': 'norsk (nynorsk)',
},
'no': {
'bidi': False,
'code': 'no',
'name': 'Norwegian',
'name_local': 'norsk',
},
'os': {
'bidi': False,
'code': 'os',
'name': 'Ossetic',
'name_local': 'Ирон',
},
'pa': {
'bidi': False,
'code': 'pa',
'name': 'Punjabi',
'name_local': 'Punjabi',
},
'pl': {
'bidi': False,
'code': 'pl',
'name': 'Polish',
'name_local': 'polski',
},
'pt': {
'bidi': False,
'code': 'pt',
'name': 'Portuguese',
'name_local': 'Português',
},
'pt-br': {
'bidi': False,
'code': 'pt-br',
'name': 'Brazilian Portuguese',
'name_local': 'Português Brasileiro',
},
'ro': {
'bidi': False,
'code': 'ro',
'name': 'Romanian',
'name_local': 'Română',
},
'ru': {
'bidi': False,
'code': 'ru',
'name': 'Russian',
'name_local': 'Русский',
},
'sk': {
'bidi': False,
'code': 'sk',
'name': 'Slovak',
'name_local': 'Slovensky',
},
'sl': {
'bidi': False,
'code': 'sl',
'name': 'Slovenian',
'name_local': 'Slovenščina',
},
'sq': {
'bidi': False,
'code': 'sq',
'name': 'Albanian',
'name_local': 'shqip',
},
'sr': {
'bidi': False,
'code': 'sr',
'name': 'Serbian',
'name_local': 'српски',
},
'sr-latn': {
'bidi': False,
'code': 'sr-latn',
'name': 'Serbian Latin',
'name_local': 'srpski (latinica)',
},
'sv': {
'bidi': False,
'code': 'sv',
'name': 'Swedish',
'name_local': 'svenska',
},
'sw': {
'bidi': False,
'code': 'sw',
'name': 'Swahili',
'name_local': 'Kiswahili',
},
'ta': {
'bidi': False,
'code': 'ta',
'name': 'Tamil',
'name_local': 'தமிழ்',
},
'te': {
'bidi': False,
'code': 'te',
'name': 'Telugu',
'name_local': 'తెలుగు',
},
'th': {
'bidi': False,
'code': 'th',
'name': 'Thai',
'name_local': 'ภาษาไทย',
},
'tr': {
'bidi': False,
'code': 'tr',
'name': 'Turkish',
'name_local': 'Türkçe',
},
'tt': {
'bidi': False,
'code': 'tt',
'name': 'Tatar',
'name_local': 'Татарча',
},
'udm': {
'bidi': False,
'code': 'udm',
'name': 'Udmurt',
'name_local': 'Удмурт',
},
'uk': {
'bidi': False,
'code': 'uk',
'name': 'Ukrainian',
'name_local': 'Українська',
},
'ur': {
'bidi': True,
'code': 'ur',
'name': 'Urdu',
'name_local': 'اردو',
},
'vi': {
'bidi': False,
'code': 'vi',
'name': 'Vietnamese',
'name_local': 'Tiếng Việt',
},
'zh-cn': {
'fallback': ['zh-hans'],
},
'zh-hans': {
'bidi': False,
'code': 'zh-hans',
'name': 'Simplified Chinese',
'name_local': '简体中文',
},
'zh-hant': {
'bidi': False,
'code': 'zh-hant',
'name': 'Traditional Chinese',
'name_local': '繁體中文',
},
'zh-hk': {
'fallback': ['zh-hant'],
},
'zh-mo': {
'fallback': ['zh-hant'],
},
'zh-my': {
'fallback': ['zh-hans'],
},
'zh-sg': {
'fallback': ['zh-hans'],
},
'zh-tw': {
'fallback': ['zh-hant'],
},
}
|
2f6bb4ab06e7044dc9ab88cf9b8afdcc00e659d58e5e0a396faa95904bb594c2 | """Translation helper functions."""
import functools
import gettext as gettext_module
import os
import re
import sys
import warnings
from collections import OrderedDict
from threading import local
from django.apps import apps
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.core.exceptions import AppRegistryNotReady
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils.safestring import SafeData, mark_safe
from . import LANGUAGE_SESSION_KEY, to_language, to_locale
# Translations are cached in a dictionary for every language.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:\.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_re = re.compile(
r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$',
re.IGNORECASE
)
language_code_prefix_re = re.compile(r'^/(\w+([@-]\w+)?)(/|$)')
@receiver(setting_changed)
def reset_cache(**kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'):
check_for_language.cache_clear()
get_languages.cache_clear()
get_supported_language_variant.cache_clear()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
Set up the GNUTranslations context with regard to output charset.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct an object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
domain = 'django'
def __init__(self, language, domain=None, localedirs=None):
"""Create a GNUTranslations() using many locale directories"""
gettext_module.GNUTranslations.__init__(self)
if domain is not None:
self.domain = domain
self.__language = language
self.__to_language = to_language(language)
self.__locale = to_locale(language)
self._catalog = None
# If a language doesn't have a catalog, use the Germanic default for
# pluralization: anything except one is pluralized.
self.plural = lambda n: int(n != 1)
if self.domain == 'django':
if localedirs is not None:
# A module-level cache is used for caching 'django' translations
warnings.warn("localedirs is ignored when domain is 'django'.", RuntimeWarning)
localedirs = None
self._init_translation_catalog()
if localedirs:
for localedir in localedirs:
translation = self._new_gnu_trans(localedir)
self.merge(translation)
else:
self._add_installed_apps_translations()
self._add_local_translations()
if self.__language == settings.LANGUAGE_CODE and self.domain == 'django' and self._catalog is None:
# default lang should have at least one translation file available.
raise IOError("No translation files found for default language %s." % settings.LANGUAGE_CODE)
self._add_fallback(localedirs)
if self._catalog is None:
# No catalogs found for this language, set an empty catalog.
self._catalog = {}
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def _new_gnu_trans(self, localedir, use_null_fallback=True):
"""
Return a mergeable gettext.GNUTranslations instance.
A convenience wrapper. By default gettext uses 'fallback=False'.
Using param `use_null_fallback` to avoid confusion with any other
references to 'fallback'.
"""
return gettext_module.translation(
domain=self.domain,
localedir=localedir,
languages=[self.__locale],
fallback=use_null_fallback,
)
def _init_translation_catalog(self):
"""Create a base catalog using global django translations."""
settingsfile = sys.modules[settings.__module__].__file__
localedir = os.path.join(os.path.dirname(settingsfile), 'locale')
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_installed_apps_translations(self):
"""Merge translations from each installed app."""
try:
app_configs = reversed(list(apps.get_app_configs()))
except AppRegistryNotReady:
raise AppRegistryNotReady(
"The translation infrastructure cannot be initialized before the "
"apps registry is ready. Check that you don't make non-lazy "
"gettext calls at import time.")
for app_config in app_configs:
localedir = os.path.join(app_config.path, 'locale')
if os.path.exists(localedir):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_local_translations(self):
"""Merge translations defined in LOCALE_PATHS."""
for localedir in reversed(settings.LOCALE_PATHS):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_fallback(self, localedirs=None):
"""Set the GNUTranslations() fallback with the default language."""
# Don't set a fallback for the default language or any English variant
# (as it's empty, so it'll ALWAYS fall back to the default language)
if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'):
return
if self.domain == 'django':
# Get from cache
default_translation = translation(settings.LANGUAGE_CODE)
else:
default_translation = DjangoTranslation(
settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs
)
self.add_fallback(default_translation)
def merge(self, other):
"""Merge another translation into this catalog."""
if not getattr(other, '_catalog', None):
return # NullTranslations() has no _catalog
if self._catalog is None:
# Take plural and _info from first catalog found (generally Django's).
self.plural = other.plural
self._info = other._info.copy()
self._catalog = other._catalog.copy()
else:
self._catalog.update(other._catalog)
if other._fallback:
self.add_fallback(other._fallback)
def language(self):
"""Return the translation language."""
return self.__language
def to_language(self):
"""Return the translation language name."""
return self.__to_language
def translation(language):
"""
Return a translation object in the default 'django' domain.
"""
global _translations
if language not in _translations:
_translations[language] = DjangoTranslation(language)
return _translations[language]
def activate(language):
"""
Fetch the translation object for a given language and install it as the
current translation object for the current thread.
"""
if not language:
return
_active.value = translation(language)
def deactivate():
"""
Uninstall the active translation object so that further _() calls resolve
to the default translation object.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Make the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
_active.value.to_language = lambda *args: None
def get_language():
"""Return the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Return selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
lang = get_language()
if lang is None:
return False
else:
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Return the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return _default
def gettext(message):
"""
Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
eol_message = message.replace('\r\n', '\n').replace('\r', '\n')
if eol_message:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = translation_object.gettext(eol_message)
else:
# Return an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)('')
if isinstance(message, SafeData):
return mark_safe(result)
return result
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = gettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
elif isinstance(message, SafeData):
result = mark_safe(result)
return result
def gettext_noop(message):
"""
Mark strings for translation but don't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Return a string of the translation of either the singular or plural,
based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ngettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ngettext(singular, plural, number)
return result
def all_locale_paths():
"""
Return a list of paths to user-provides languages files.
"""
globalpath = os.path.join(
os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
app_paths = []
for app_config in apps.get_app_configs():
locale_path = os.path.join(app_config.path, 'locale')
if os.path.exists(locale_path):
app_paths.append(locale_path)
return [globalpath, *settings.LOCALE_PATHS, *app_paths]
@functools.lru_cache(maxsize=1000)
def check_for_language(lang_code):
"""
Check whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if lang_code is None or not language_code_re.search(lang_code):
return False
return any(
gettext_module.find('django', path, [to_locale(lang_code)]) is not None
for path in all_locale_paths()
)
@functools.lru_cache()
def get_languages():
"""
Cache of settings.LANGUAGES in an OrderedDict for easy lookups by key.
"""
return OrderedDict(settings.LANGUAGES)
@functools.lru_cache(maxsize=1000)
def get_supported_language_variant(lang_code, strict=False):
"""
Return the language code that's listed in supported languages, possibly
selecting a more generic variant. Raise LookupError if nothing is found.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
except KeyError:
pass
generic_lang_code = lang_code.split('-')[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, strict=False):
"""
Return the language code if there's a valid language code found in `path`.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
"""
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyze the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
supported_lang_codes = get_languages()
if hasattr(request, 'session'):
lang_code = request.session.get(LANGUAGE_SESSION_KEY)
if lang_code in supported_lang_codes and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
@functools.lru_cache(maxsize=1000)
def parse_accept_lang_header(lang_string):
"""
Parse the lang_string, which is the body of an HTTP Accept-Language
header, and return a tuple of (lang, q-value), ordered by 'q' values.
Return an empty tuple if there are any format errors in lang_string.
"""
result = []
pieces = accept_language_re.split(lang_string.lower())
if pieces[-1]:
return ()
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i:i + 3]
if first:
return ()
if priority:
priority = float(priority)
else:
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return tuple(result)
|
7634360f6b63f7f402ac759476a34577f15ac9f58c32bca6a13e5cddbe905ad7 | """
Internationalization support.
"""
import re
from contextlib import ContextDecorator
from django.utils.autoreload import autoreload_started, file_changed
from django.utils.functional import lazy
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_language', 'to_locale', 'templatize',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans:
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
from django.utils.translation.reloader import watch_for_translation_changes, translation_file_changed
autoreload_started.connect(watch_for_translation_changes, dispatch_uid='translation_file_changed')
file_changed.connect(translation_file_changed, dispatch_uid='translation_file_changed')
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
# An alias since Django 2.0
ugettext = gettext
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
# An alias since Django 2.0
ungettext = ngettext
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = ugettext_lazy = lazy(gettext, str)
pgettext_lazy = lazy(pgettext, str)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, int):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
original_kwargs = kwargs.copy()
class NumberAwareString(resultclass):
def __bool__(self):
return bool(kwargs['singular'])
def _get_number_value(self, values):
try:
return values[number]
except KeyError:
raise KeyError(
"Your dictionary lacks key '%s\'. Please provide "
"it, because it is required to determine whether "
"string is singular or plural." % number
)
def _translate(self, number_value):
kwargs['number'] = number_value
return func(**kwargs)
def format(self, *args, **kwargs):
number_value = self._get_number_value(kwargs) if kwargs and number else args[0]
return self._translate(number_value).format(*args, **kwargs)
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
number_value = self._get_number_value(rhs)
else:
number_value = rhs
translated = self._translate(number_value)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number.
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
proxy.__reduce__ = lambda: (_lazy_number_unpickle, (func, resultclass, number, original_kwargs))
return proxy
def _lazy_number_unpickle(func, resultclass, number, kwargs):
return lazy_number(func, resultclass, number=number, **kwargs)
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
# An alias since Django 2.0
ungettext_lazy = ngettext_lazy
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, str, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(ContextDecorator):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
def __enter__(self):
self.old_language = get_language()
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.old_language is None:
deactivate_all()
elif self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_language(locale):
"""Turn a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
def to_locale(language):
"""Turn a language name (en-us) into a locale name (en_US)."""
language, _, country = language.lower().partition('-')
if not country:
return language
# A language with > 2 characters after the dash only has its first
# character after the dash capitalized; e.g. sr-latn becomes sr_Latn.
# A language with 2 characters after the dash has both characters
# capitalized; e.g. en-us becomes en_US.
country, _, tail = country.partition('-')
country = country.title() if len(country) > 2 else country.upper()
if tail:
country += '-' + tail
return language + '_' + country
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def get_supported_language_variant(lang_code, *, strict=False):
return _trans.get_supported_language_variant(lang_code, strict)
def templatize(src, **kwargs):
from .template import templatize
return templatize(src, **kwargs)
def deactivate_all():
return _trans.deactivate_all()
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
lang_info = LANG_INFO[lang_code]
if 'fallback' in lang_info and 'name' not in lang_info:
info = get_language_info(lang_info['fallback'][0])
else:
info = lang_info
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
info = LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
if info:
info['name_translated'] = gettext_lazy(info['name'])
return info
trim_whitespace_re = re.compile(r'\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
|
313c2ae9b8e30182c8ad11a152d60673fc2aac67f33f48852be1ffafc83d4042 | from django.apps.registry import apps as global_apps
from django.db import migrations, router
from .exceptions import InvalidMigrationPlan
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor:
"""
End-to-end migration execution - load migrations and run them up or down
to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, return a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def _create_project_state(self, with_applied_migrations=False):
"""
Create a project state including all the applications without
migrations and applied migrations if with_applied_migrations=True.
"""
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
if with_applied_migrations:
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
applied_migrations = {
self.loader.graph.nodes[key] for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
for migration, _ in full_plan:
if migration in applied_migrations:
migration.mutate_state(state, preserve=False)
return state
def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False):
"""
Migrate the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
# The django_migrations table must be present to record applied
# migrations.
self.recorder.ensure_schema()
if plan is None:
plan = self.migration_plan(targets)
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
all_forwards = all(not backwards for mig, backwards in plan)
all_backwards = all(backwards for mig, backwards in plan)
if not plan:
if state is None:
# The resulting state should include applied migrations.
state = self._create_project_state(with_applied_migrations=True)
elif all_forwards == all_backwards:
# This should only happen if there's a mixed plan
raise InvalidMigrationPlan(
"Migration plans with both forwards and backwards migrations "
"are not supported. Please split your migration process into "
"separate plans of only forwards OR backwards migrations.",
plan
)
elif all_forwards:
if state is None:
# The resulting state should still include applied migrations.
state = self._create_project_state(with_applied_migrations=True)
state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)
else:
# No need to check for `elif all_backwards` here, as that condition
# would always evaluate to true.
state = self._migrate_all_backwards(plan, full_plan, fake=fake)
self.check_replacements()
return state
def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial):
"""
Take a list of 2-tuples of the form (migration instance, False) and
apply them in the order they occur in the full_plan.
"""
migrations_to_run = {m[0] for m in plan}
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from these sets so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
migrations_to_run.remove(migration)
return state
def _migrate_all_backwards(self, plan, full_plan, fake):
"""
Take a list of 2-tuples of the form (migration instance, True) and
unapply them in reverse order they occur in the full_plan.
Since unapplying a migration requires the project state prior to that
migration, Django will compute the migration states before each of them
in a first run over the plan and then unapply them in a second run over
the plan.
"""
migrations_to_run = {m[0] for m in plan}
# Holds all migration states prior to the migrations being unapplied
states = {}
state = self._create_project_state()
applied_migrations = {
self.loader.graph.nodes[key] for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
if self.progress_callback:
self.progress_callback("render_start")
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
state.apps # Render all -- performance critical
# The state before this migration
states[migration] = state
# The old state keeps as-is, we continue with the new state
state = migration.mutate_state(state, preserve=True)
migrations_to_run.remove(migration)
elif migration in applied_migrations:
# Only mutate the state if the migration is actually applied
# to make sure the resulting state doesn't include changes
# from unrelated migrations.
migration.mutate_state(state, preserve=False)
if self.progress_callback:
self.progress_callback("render_success")
for migration, _ in plan:
self.unapply_migration(states[migration], migration, fake=fake)
applied_migrations.remove(migration)
# Generate the post migration state by starting from the state before
# the last migration is unapplied and mutating it to include all the
# remaining applied migrations.
last_unapplied_migration = plan[-1][0]
state = states[last_unapplied_migration]
for index, (migration, _) in enumerate(full_plan):
if migration == last_unapplied_migration:
for migration, _ in full_plan[index:]:
if migration in applied_migrations:
migration.mutate_state(state, preserve=False)
break
return state
def collect_sql(self, plan):
"""
Take a migration plan and return a list of collected SQL statements
that represent the best-efforts version of that plan.
"""
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""Run a migration forwards."""
migration_recorded = False
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor(atomic=migration.atomic) as schema_editor:
state = migration.apply(state, schema_editor)
self.record_migration(migration)
migration_recorded = True
if not migration_recorded:
self.record_migration(migration)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def record_migration(self, migration):
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
def unapply_migration(self, state, migration, fake=False):
"""Run a migration backwards."""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor(atomic=migration.atomic) as schema_editor:
state = migration.unapply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
"""
Mark replacement migrations applied if their replaced set all are.
Do this unconditionally on every migrate, rather than just when
migrations are applied or unapplied, to correctly handle the case
when a new squash migration is pushed to a deployment that already had
all its replaced migrations applied. In this case no new migration will
be applied, but the applied state of the squashed migration must be
maintained.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
"""
Test whether a migration has been implicitly applied - that the
tables or columns it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel and AddField).
"""
def should_skip_detecting_model(migration, model):
"""
No need to detect tables for proxy models, unmanaged models, or
models that can't be migrated on the current database.
"""
return (
model._meta.proxy or not model._meta.managed or not
router.allow_migrate(
self.connection.alias, migration.app_label,
model_name=model._meta.model_name,
)
)
if migration.initial is None:
# Bail if the migration isn't the first one in its app
if any(app == migration.app_label for app, name in migration.dependencies):
return False, project_state
elif migration.initial is False:
# Bail if it's NOT an initial migration
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_model_migration = False
found_add_field_migration = False
with self.connection.cursor() as cursor:
existing_table_names = self.connection.introspection.table_names(cursor)
# Make sure all create model and add field operations are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
if model._meta.db_table not in existing_table_names:
return False, project_state
found_create_model_migration = True
elif isinstance(operation, migrations.AddField):
model = apps.get_model(migration.app_label, operation.model_name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
table = model._meta.db_table
field = model._meta.get_field(operation.name)
# Handle implicit many-to-many tables created by AddField.
if field.many_to_many:
if field.remote_field.through._meta.db_table not in existing_table_names:
return False, project_state
else:
found_add_field_migration = True
continue
column_names = [
column.name for column in
self.connection.introspection.get_table_description(self.connection.cursor(), table)
]
if field.column not in column_names:
return False, project_state
found_add_field_migration = True
# If we get this far and we found at least one CreateModel or AddField migration,
# the migration is considered implicitly applied.
return (found_create_model_migration or found_add_field_migration), after_state
|
1e87aeab62005ac0c6dccde987deaf9ae9fa7f35d289fb8599457447518c2a43 | import copy
from collections import OrderedDict
from contextlib import contextmanager
from django.apps import AppConfig
from django.apps.registry import Apps, apps as global_apps
from django.conf import settings
from django.db import models
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.utils import make_model_tuple
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
from .exceptions import InvalidBasesError
def _get_app_label_and_model_name(model, app_label=''):
if isinstance(model, str):
split = model.split('.', 1)
return tuple(split) if len(split) == 2 else (app_label, split[0])
else:
return model._meta.app_label, model._meta.model_name
def _get_related_models(m):
"""Return all models that have a direct relationship to the given model."""
related_models = [
subclass for subclass in m.__subclasses__()
if issubclass(subclass, models.Model)
]
related_fields_models = set()
for f in m._meta.get_fields(include_parents=True, include_hidden=True):
if f.is_relation and f.related_model is not None and not isinstance(f.related_model, str):
related_fields_models.add(f.model)
related_models.append(f.related_model)
# Reverse accessors of foreign keys to proxy models are attached to their
# concrete proxied model.
opts = m._meta
if opts.proxy and m in related_fields_models:
related_models.append(opts.concrete_model)
return related_models
def get_related_models_tuples(model):
"""
Return a list of typical (app_label, model_name) tuples for all related
models for the given model.
"""
return {
(rel_mod._meta.app_label, rel_mod._meta.model_name)
for rel_mod in _get_related_models(model)
}
def get_related_models_recursive(model):
"""
Return all models that have a direct or indirect relationship
to the given model.
Relationships are either defined by explicit relational fields, like
ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another
model (a superclass is related to its subclasses, but not vice versa). Note,
however, that a model inheriting from a concrete model is also related to
its superclass through the implicit *_ptr OneToOneField on the subclass.
"""
seen = set()
queue = _get_related_models(model)
for rel_mod in queue:
rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name
if (rel_app_label, rel_model_name) in seen:
continue
seen.add((rel_app_label, rel_model_name))
queue.extend(_get_related_models(rel_mod))
return seen - {(model._meta.app_label, model._meta.model_name)}
class ProjectState:
"""
Represent the entire project's overall state. This is the item that is
passed around - do it here rather than at the app level so that cross-app
FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
self.is_delayed = False
def add_model(self, model_state):
app_label, model_name = model_state.app_label, model_state.name_lower
self.models[(app_label, model_name)] = model_state
if 'apps' in self.__dict__: # hasattr would cache the property
self.reload_model(app_label, model_name)
def remove_model(self, app_label, model_name):
del self.models[app_label, model_name]
if 'apps' in self.__dict__: # hasattr would cache the property
self.apps.unregister_model(app_label, model_name)
# Need to do this explicitly since unregister_model() doesn't clear
# the cache automatically (#24513)
self.apps.clear_cache()
def _find_reload_model(self, app_label, model_name, delay=False):
if delay:
self.is_delayed = True
related_models = set()
try:
old_model = self.apps.get_model(app_label, model_name)
except LookupError:
pass
else:
# Get all relations to and from the old model before reloading,
# as _meta.apps may change
if delay:
related_models = get_related_models_tuples(old_model)
else:
related_models = get_related_models_recursive(old_model)
# Get all outgoing references from the model to be rendered
model_state = self.models[(app_label, model_name)]
# Directly related models are the models pointed to by ForeignKeys,
# OneToOneFields, and ManyToManyFields.
direct_related_models = set()
for name, field in model_state.fields:
if field.is_relation:
if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT:
continue
rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label)
direct_related_models.add((rel_app_label, rel_model_name.lower()))
# For all direct related models recursively get all related models.
related_models.update(direct_related_models)
for rel_app_label, rel_model_name in direct_related_models:
try:
rel_model = self.apps.get_model(rel_app_label, rel_model_name)
except LookupError:
pass
else:
if delay:
related_models.update(get_related_models_tuples(rel_model))
else:
related_models.update(get_related_models_recursive(rel_model))
# Include the model itself
related_models.add((app_label, model_name))
return related_models
def reload_model(self, app_label, model_name, delay=False):
if 'apps' in self.__dict__: # hasattr would cache the property
related_models = self._find_reload_model(app_label, model_name, delay)
self._reload(related_models)
def reload_models(self, models, delay=True):
if 'apps' in self.__dict__: # hasattr would cache the property
related_models = set()
for app_label, model_name in models:
related_models.update(self._find_reload_model(app_label, model_name, delay))
self._reload(related_models)
def _reload(self, related_models):
# Unregister all related models
with self.apps.bulk_update():
for rel_app_label, rel_model_name in related_models:
self.apps.unregister_model(rel_app_label, rel_model_name)
states_to_be_rendered = []
# Gather all models states of those models that will be rerendered.
# This includes:
# 1. All related models of unmigrated apps
for model_state in self.apps.real_models:
if (model_state.app_label, model_state.name_lower) in related_models:
states_to_be_rendered.append(model_state)
# 2. All related models of migrated apps
for rel_app_label, rel_model_name in related_models:
try:
model_state = self.models[rel_app_label, rel_model_name]
except KeyError:
pass
else:
states_to_be_rendered.append(model_state)
# Render all models
self.apps.render_multiple(states_to_be_rendered)
def clone(self):
"""Return an exact copy of this ProjectState."""
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
)
if 'apps' in self.__dict__:
new_state.apps = self.apps.clone()
new_state.is_delayed = self.is_delayed
return new_state
def clear_delayed_apps_cache(self):
if self.is_delayed and 'apps' in self.__dict__:
del self.__dict__['apps']
@cached_property
def apps(self):
return StateApps(self.real_apps, self.models)
@property
def concrete_apps(self):
self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True)
return self.apps
@classmethod
def from_apps(cls, apps):
"""Take an Apps and return a ProjectState matching it."""
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name_lower)] = model_state
return cls(app_models)
def __eq__(self, other):
return self.models == other.models and set(self.real_apps) == set(other.real_apps)
class AppConfigStub(AppConfig):
"""Stub of an AppConfig. Only provides a label and a dict of models."""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
self.label = label
# App-label and app-name are not the same thing, so technically passing
# in the label here is wrong. In practice, migrations don't care about
# the app name, but we need something unique, and the label works fine.
super().__init__(label, None)
def import_models(self):
self.models = self.apps.all_models[self.label]
class StateApps(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
self.real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
self.real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [AppConfigStub(label) for label in sorted([*real_apps, *app_labels])]
super().__init__(app_configs)
# These locks get in the way of copying as implemented in clone(),
# which is called whenever Django duplicates a StateApps before
# updating it.
self._lock = None
self.ready_event = None
self.render_multiple([*models.values(), *self.real_models])
# There shouldn't be any operations pending at this point.
from django.core.checks.model_checks import _check_lazy_references
ignore = {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set()
errors = _check_lazy_references(self, ignore=ignore)
if errors:
raise ValueError("\n".join(error.msg for error in errors))
@contextmanager
def bulk_update(self):
# Avoid clearing each model's cache for each change. Instead, clear
# all caches when we're finished updating the model instances.
ready = self.ready
self.ready = False
try:
yield
finally:
self.ready = ready
self.clear_cache()
def render_multiple(self, model_states):
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
if not model_states:
return
# Prevent that all model caches are expired for each render.
with self.bulk_update():
unrendered_models = model_states
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are inheriting models from an "
"app with migrations (e.g. contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies "
"for more" % (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
def clone(self):
"""Return a clone of this registry."""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
# Set the pointer to the correct app registry.
for app_config in clone.app_configs.values():
app_config.apps = clone
# No need to actually clone them, they'll never change
clone.real_models = self.real_models
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].apps = self
self.app_configs[app_label].models = OrderedDict()
self.app_configs[app_label].models[model._meta.model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
class ModelState:
"""
Represent a Django Model. Don't use the actual Model class as it's not
designed to have its options changed - instead, mutate this one and then
render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None, managers=None):
self.app_label = app_label
self.name = name
self.fields = fields
self.options = options or {}
self.options.setdefault('indexes', [])
self.options.setdefault('constraints', [])
self.bases = bases or (models.Model,)
self.managers = managers or []
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
for name, field in fields:
# Sanity-check that fields are NOT already bound to a model.
if hasattr(field, 'model'):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
# Sanity-check that relation fields are NOT referring to a model class.
if field.is_relation and hasattr(field.related_model, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.to" does. '
'Use a string reference instead.' % name
)
if field.many_to_many and hasattr(field.remote_field.through, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.through" does. '
'Use a string reference instead.' % name
)
# Sanity-check that indexes have their name set.
for index in self.options['indexes']:
if not index.name:
raise ValueError(
"Indexes passed to ModelState require a name attribute. "
"%r doesn't have one." % index
)
@cached_property
def name_lower(self):
return self.name.lower()
@classmethod
def from_model(cls, model, exclude_rels=False):
"""Given a model, return a ModelState representing it."""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "remote_field", None) and exclude_rels:
continue
if isinstance(field, OrderWrt):
continue
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s: %s" % (
name,
model._meta.label,
e,
))
if not exclude_rels:
for field in model._meta.local_many_to_many:
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
elif name == "indexes":
indexes = [idx.clone() for idx in model._meta.indexes]
for index in indexes:
if not index.name:
index.set_name_with_model(model)
options['indexes'] = indexes
elif name == 'constraints':
options['constraints'] = [con.clone() for con in model._meta.constraints]
else:
options[name] = model._meta.original_attrs[name]
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
# Private fields are ignored, so remove options that refer to them.
elif options.get('order_with_respect_to') in {field.name for field in model._meta.private_fields}:
del options['order_with_respect_to']
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
base._meta.label_lower
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, str) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
managers = []
manager_names = set()
default_manager_shim = None
for manager in model._meta.managers:
if manager.name in manager_names:
# Skip overridden managers.
continue
elif manager.use_in_migrations:
# Copy managers usable in migrations.
new_manager = copy.copy(manager)
new_manager._set_creation_counter()
elif manager is model._base_manager or manager is model._default_manager:
# Shim custom managers used as default and base managers.
new_manager = models.Manager()
new_manager.model = manager.model
new_manager.name = manager.name
if manager is model._default_manager:
default_manager_shim = new_manager
else:
continue
manager_names.add(manager.name)
managers.append((manager.name, new_manager))
# Ignore a shimmed default manager called objects if it's the only one.
if managers == [('objects', default_manager_shim)]:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
def construct_managers(self):
"""Deep-clone the managers using deconstruction."""
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"""Return an exact copy of this ModelState."""
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=list(self.fields),
# Since options are shallow-copied here, operations such as
# AddIndex must replace their option (e.g 'indexes') rather
# than mutating it.
options=dict(self.options),
bases=self.bases,
managers=list(self.managers),
)
def render(self, apps):
"""Create a Model object from our current state into the given apps."""
# First, make a Meta object
meta_contents = {'app_label': self.app_label, 'apps': apps, **self.options}
meta = type("Meta", (), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, str) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = {name: field.clone() for name, field in self.fields}
body['Meta'] = meta
body['__module__'] = "__fake__"
# Restore managers
body.update(self.construct_managers())
# Then, make a Model object (apps.register_model is called in __new__)
return type(self.name, bases, body)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def get_index_by_name(self, name):
for index in self.options['indexes']:
if index.name == name:
return index
raise ValueError("No index named %s on model %s" % (name, self.name))
def get_constraint_by_name(self, name):
for constraint in self.options['constraints']:
if constraint.name == name:
return constraint
raise ValueError('No constraint named %s on model %s' % (name, self.name))
def __repr__(self):
return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:]))
for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases) and
(self.managers == other.managers)
)
|
06d21d93b028d48c35ef4027279a1b8a4671537f1bf8d49cafc23fee7d7bd0bc | import pkgutil
import sys
from importlib import import_module, reload
from django.apps import apps
from django.conf import settings
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from .exceptions import (
AmbiguityError, BadMigrationError, InconsistentMigrationHistory,
NodeNotFoundError,
)
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader:
"""
Load migration files from disk and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the Python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_migrations=False):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
self.ignore_no_migrations = ignore_no_migrations
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
"""
Return the path to the migrations module for the specified app_label
and a boolean indicating if the module is specified in
settings.MIGRATION_MODULE.
"""
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label], True
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False
def load_disk(self):
"""Load the migrations from all INSTALLED_APPS from disk."""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name, explicit = self.migrations_module(app_config.label)
if module_name is None:
self.unmigrated_apps.add(app_config.label)
continue
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if ((explicit and self.ignore_no_migrations) or (
not explicit and "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e))):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# Empty directories are namespaces.
# getattr() needed on PY36 and older (replace w/attribute access).
if getattr(module, '__file__', None) is None:
self.unmigrated_apps.add(app_config.label)
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
self.unmigrated_apps.add(app_config.label)
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
reload(module)
self.migrated_apps.add(app_config.label)
migration_names = {
name for _, name, is_pkg in pkgutil.iter_modules(module.__path__)
if not is_pkg and name[0] not in '_~'
}
# Load migrations
for migration_name in migration_names:
migration_path = '%s.%s' % (module_name, migration_name)
try:
migration_module = import_module(migration_path)
except ImportError as e:
if 'bad magic number' in str(e):
raise ImportError(
"Couldn't import %r as it appears to be a stale "
".pyc file." % migration_path
) from e
else:
raise
if not hasattr(migration_module, "Migration"):
raise BadMigrationError(
"Migration %s in app %s has no Migration class" % (migration_name, app_config.label)
)
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(
migration_name,
app_config.label,
)
def get_migration(self, app_label, name_prefix):
"""Return the named migration or raise NodeNotFoundError."""
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"""
Return the migration(s) which match the given app label and name_prefix.
"""
# Do the search
results = []
for migration_app_label, migration_name in self.disk_migrations:
if migration_app_label == app_label and migration_name.startswith(name_prefix):
results.append((migration_app_label, migration_name))
if len(results) > 1:
raise AmbiguityError(
"There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif not results:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def check_key(self, key, current_app):
if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph:
return key
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they even have
# migrations.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.migrated_apps:
try:
if key[1] == "__first__":
return self.graph.root_nodes(key[0])[0]
else: # "__latest__"
return self.graph.leaf_nodes(key[0])[0]
except IndexError:
if self.ignore_no_migrations:
return None
else:
raise ValueError("Dependency on app with no migrations: %s" % key[0])
raise ValueError("Dependency on unknown app: %s" % key[0])
def add_internal_dependencies(self, key, migration):
"""
Internal dependencies need to be added first to ensure `__first__`
dependencies find the correct root node.
"""
for parent in migration.dependencies:
# Ignore __first__ references to the same app.
if parent[0] == key[0] and parent[1] != '__first__':
self.graph.add_dependency(migration, key, parent, skip_validation=True)
def add_external_dependencies(self, key, migration):
for parent in migration.dependencies:
# Skip internal dependencies
if key[0] == parent[0]:
continue
parent = self.check_key(parent, key[0])
if parent is not None:
self.graph.add_dependency(migration, key, parent, skip_validation=True)
for child in migration.run_before:
child = self.check_key(child, key[0])
if child is not None:
self.graph.add_dependency(migration, child, key, skip_validation=True)
def build_graph(self):
"""
Build a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_migrations = set()
else:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# To start, populate the migration graph with nodes for ALL migrations
# and their dependencies. Also make note of replacing migrations at this step.
self.graph = MigrationGraph()
self.replacements = {}
for key, migration in self.disk_migrations.items():
self.graph.add_node(key, migration)
# Replacing migrations.
if migration.replaces:
self.replacements[key] = migration
for key, migration in self.disk_migrations.items():
# Internal (same app) dependencies.
self.add_internal_dependencies(key, migration)
# Add external dependencies now that the internal ones have been resolved.
for key, migration in self.disk_migrations.items():
self.add_external_dependencies(key, migration)
# Carry out replacements where possible.
for key, migration in self.replacements.items():
# Get applied status of each of this migration's replacement targets.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
# Ensure the replacing migration is only marked as applied if all of
# its replacement targets are.
if all(applied_statuses):
self.applied_migrations.add(key)
else:
self.applied_migrations.discard(key)
# A replacing migration can be used if either all or none of its
# replacement targets have been applied.
if all(applied_statuses) or (not any(applied_statuses)):
self.graph.remove_replaced_nodes(key, migration.replaces)
else:
# This replacing migration cannot be used because it is partially applied.
# Remove it from the graph and remap dependencies to it (#25945).
self.graph.remove_replacement_node(key, migration.replaces)
# Ensure the graph is consistent.
try:
self.graph.validate_consistency()
except NodeNotFoundError as exc:
# Check if the missing node could have been replaced by any squash
# migration but wasn't because the squash migration was partially
# applied before. In that case raise a more understandable exception
# (#23556).
# Get reverse replacements.
reverse_replacements = {}
for key, migration in self.replacements.items():
for replaced in migration.replaces:
reverse_replacements.setdefault(replaced, set()).add(key)
# Try to reraise exception with more detail.
if exc.node in reverse_replacements:
candidates = reverse_replacements.get(exc.node, set())
is_replaced = any(candidate in self.graph.nodes for candidate in candidates)
if not is_replaced:
tries = ', '.join('%s.%s' % c for c in candidates)
raise NodeNotFoundError(
"Migration {0} depends on nonexistent node ('{1}', '{2}'). "
"Django tried to replace migration {1}.{2} with any of [{3}] "
"but wasn't able to because some of the replaced migrations "
"are already applied.".format(
exc.origin, exc.node[0], exc.node[1], tries
),
exc.node
) from exc
raise exc
self.graph.ensure_not_cyclic()
def check_consistent_history(self, connection):
"""
Raise InconsistentMigrationHistory if any applied migrations have
unapplied dependencies.
"""
recorder = MigrationRecorder(connection)
applied = recorder.applied_migrations()
for migration in applied:
# If the migration is unknown, skip it.
if migration not in self.graph.nodes:
continue
for parent in self.graph.node_map[migration].parents:
if parent not in applied:
# Skip unapplied squashed migrations that have all of their
# `replaces` applied.
if parent in self.replacements:
if all(m in applied for m in self.replacements[parent].replaces):
continue
raise InconsistentMigrationHistory(
"Migration {}.{} is applied before its dependency "
"{}.{} on database '{}'.".format(
migration[0], migration[1], parent[0], parent[1],
connection.alias,
)
)
def detect_conflicts(self):
"""
Look through the loaded graph and detect any conflicts - apps
with more than one leaf migration. Return a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Return a ProjectState object representing the most recent state
that the loaded migrations represent.
See graph.make_state() for the meaning of "nodes" and "at_end".
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))
|
a64d476f01b1d182931d7981fccde5c2a27de94e22b81fc2b2ee14572ffda994 | import os
import re
from importlib import import_module
from django import get_version
from django.apps import apps
# SettingsReference imported for backwards compatibility in Django 2.2.
from django.conf import SettingsReference # NOQA
from django.db import migrations
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.serializer import Serializer, serializer_factory
from django.utils.inspect import get_func_args
from django.utils.module_loading import module_dir
from django.utils.timezone import now
class OperationWriter:
def __init__(self, operation, indentation=2):
self.operation = operation
self.buff = []
self.indentation = indentation
def serialize(self):
def _write(_arg_name, _arg_value):
if (_arg_name in self.operation.serialization_expand_args and
isinstance(_arg_value, (list, tuple, dict))):
if isinstance(_arg_value, dict):
self.feed('%s={' % _arg_name)
self.indent()
for key, value in _arg_value.items():
key_string, key_imports = MigrationWriter.serialize(key)
arg_string, arg_imports = MigrationWriter.serialize(value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s: %s' % (key_string, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s: %s,' % (key_string, arg_string))
imports.update(key_imports)
imports.update(arg_imports)
self.unindent()
self.feed('},')
else:
self.feed('%s=[' % _arg_name)
self.indent()
for item in _arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
args = arg_string.splitlines()
if len(args) > 1:
for arg in args[:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s,' % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed('],')
else:
arg_string, arg_imports = MigrationWriter.serialize(_arg_value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s=%s' % (_arg_name, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s=%s,' % (_arg_name, arg_string))
imports.update(arg_imports)
imports = set()
name, args, kwargs = self.operation.deconstruct()
operation_args = get_func_args(self.operation.__init__)
# See if this operation is in django.db.migrations. If it is,
# We can just use the fact we already have that imported,
# otherwise, we need to add an import for the operation class.
if getattr(migrations, name, None) == self.operation.__class__:
self.feed('migrations.%s(' % name)
else:
imports.add('import %s' % (self.operation.__class__.__module__))
self.feed('%s.%s(' % (self.operation.__class__.__module__, name))
self.indent()
for i, arg in enumerate(args):
arg_value = arg
arg_name = operation_args[i]
_write(arg_name, arg_value)
i = len(args)
# Only iterate over remaining arguments
for arg_name in operation_args[i:]:
if arg_name in kwargs: # Don't sort to maintain signature order
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
self.unindent()
self.feed('),')
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(' ' * (self.indentation * 4) + line)
def render(self):
return '\n'.join(self.buff)
class MigrationWriter:
"""
Take a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration, include_header=True):
self.migration = migration
self.include_header = include_header
self.needs_manual_porting = False
def as_string(self):
"""Return a string of the file contents."""
items = {
"replaces_str": "",
"initial_str": "",
}
imports = set()
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1])
imports.add("from django.conf import settings")
else:
dependencies.append(" %s," % self.serialize(dependency)[0])
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely, swapping imports of functions from migration files
# for comments
migration_imports = set()
for line in list(imports):
if re.match(r"^import (.*)\.\d+[^\s]*$", line):
migration_imports.add(line.split("import")[1].strip())
imports.remove(line)
self.needs_manual_porting = True
# django.db.migrations is always used, but models import may not be.
# If models import exists, merge it with migrations import.
if "from django.db import models" in imports:
imports.discard("from django.db import models")
imports.add("from django.db import migrations, models")
else:
imports.add("from django.db import migrations")
# Sort imports by the package / module to be imported (the part after
# "from" in "from ... import ..." or after "import" in "import ...").
sorted_imports = sorted(imports, key=lambda i: i.split()[1])
items["imports"] = "\n".join(sorted_imports) + "\n" if imports else ""
if migration_imports:
items["imports"] += (
"\n\n# Functions from the following migrations need manual "
"copying.\n# Move them and any dependencies into this file, "
"then update the\n# RunPython operations to refer to the local "
"versions:\n# %s"
) % "\n# ".join(sorted(migration_imports))
# If there's a replaces, make a string for it
if self.migration.replaces:
items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0]
# Hinting that goes into comment
if self.include_header:
items['migration_header'] = MIGRATION_HEADER_TEMPLATE % {
'version': get_version(),
'timestamp': now().strftime("%Y-%m-%d %H:%M"),
}
else:
items['migration_header'] = ""
if self.migration.initial:
items['initial_str'] = "\n initial = True\n"
return MIGRATION_TEMPLATE % items
@property
def basedir(self):
migrations_package_name, _ = MigrationLoader.migrations_module(self.migration.app_label)
if migrations_package_name is None:
raise ValueError(
"Django can't create migrations for app '%s' because "
"migrations have been disabled via the MIGRATION_MODULES "
"setting." % self.migration.app_label
)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
except ImportError:
pass
else:
try:
return module_dir(migrations_module)
except ValueError:
pass
# Alright, see if it's a direct submodule of the app
app_config = apps.get_app_config(self.migration.app_label)
maybe_app_name, _, migrations_package_basename = migrations_package_name.rpartition(".")
if app_config.name == maybe_app_name:
return os.path.join(app_config.path, migrations_package_basename)
# In case of using MIGRATION_MODULES setting and the custom package
# doesn't exist, create one, starting from an existing package
existing_dirs, missing_dirs = migrations_package_name.split("."), []
while existing_dirs:
missing_dirs.insert(0, existing_dirs.pop(-1))
try:
base_module = import_module(".".join(existing_dirs))
except (ImportError, ValueError):
continue
else:
try:
base_dir = module_dir(base_module)
except ValueError:
continue
else:
break
else:
raise ValueError(
"Could not locate an appropriate location to create "
"migrations package %s. Make sure the toplevel "
"package exists and can be imported." %
migrations_package_name)
final_dir = os.path.join(base_dir, *missing_dirs)
if not os.path.isdir(final_dir):
os.makedirs(final_dir)
for missing_dir in missing_dirs:
base_dir = os.path.join(base_dir, missing_dir)
with open(os.path.join(base_dir, "__init__.py"), "w"):
pass
return final_dir
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
return os.path.join(self.basedir, self.filename)
@classmethod
def serialize(cls, value):
return serializer_factory(value).serialize()
@classmethod
def register_serializer(cls, type_, serializer):
Serializer.register(type_, serializer)
@classmethod
def unregister_serializer(cls, type_):
Serializer.unregister(type_)
MIGRATION_HEADER_TEMPLATE = """\
# Generated by Django %(version)s on %(timestamp)s
"""
MIGRATION_TEMPLATE = """\
%(migration_header)s%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s%(initial_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
|
d266ab2180eef5a4fddaacbbbdc7e4d9e3f7b2da1406142a65e4b18480884d4a | import functools
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.utils import (
COMPILED_REGEX_TYPE, RegexObject, get_migration_name_timestamp,
)
from .topological_sort import stable_topological_sort
class MigrationAutodetector:
"""
Take a pair of ProjectStates and compare them to see what the first would
need doing to make it match the second (the second usually being the
project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of applicable changes.
Take a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {
key: self.deep_deconstruct(value)
for key, value in obj.items()
}
elif isinstance(obj, functools.partial):
return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords))
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance method,
# avoid treating this as being deconstructible itself - see #22951
return obj
elif hasattr(obj, 'deconstruct'):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to. Used for detecting renames (as,
of course, the related fields change during renames).
"""
fields_def = []
for name, field in sorted(fields):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Return a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# Then go through that list, order it, and split into migrations to
# resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
self.altered_indexes = {}
self.altered_constraints = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = set()
self.old_proxy_keys = set()
self.old_unmanaged_keys = set()
self.new_model_keys = set()
self.new_proxy_keys = set()
self.new_unmanaged_keys = set()
for al, mn in self.from_state.models:
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.add((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.add((al, mn))
else:
self.old_model_keys.add((al, mn))
for al, mn in self.to_state.models:
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.add((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.add((al, mn))
else:
self.new_model_keys.add((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Create the altered indexes and store them in self.altered_indexes.
# This avoids the same computation in generate_removed_indexes()
# and generate_added_indexes().
self.create_altered_indexes()
self.create_altered_constraints()
# Generate index removal operations before field is removed
self.generate_removed_constraints()
self.generate_removed_indexes()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_added_indexes()
self.generate_added_constraints()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists and a list of the fields that used through models
in the old state so dependencies can be made from the through model
deletion to the field that uses it.
"""
self.kept_model_keys = self.old_model_keys & self.new_model_keys
self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys
self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys
self.through_users = {}
self.old_field_keys = {
(app_label, model_name, x)
for app_label, model_name in self.kept_model_keys
for x, y in self.from_state.models[
app_label,
self.renamed_models.get((app_label, model_name), model_name)
].fields
}
self.new_field_keys = {
(app_label, model_name, x)
for app_label, model_name in self.kept_model_keys
for x, y in self.to_state.models[app_label, model_name].fields
}
def _generate_through_model_map(self):
"""Through model map generation."""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and
not old_field.remote_field.through._meta.auto_created):
through_key = (
old_field.remote_field.through._meta.app_label,
old_field.remote_field.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
@staticmethod
def _resolve_dependency(dependency):
"""
Return the resolved dependency and a boolean denoting whether or not
it was swappable.
"""
if dependency[0] != '__setting__':
return dependency, False
resolved_app_label, resolved_object_name = getattr(settings, dependency[1]).split('.')
return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True
def _build_migration_list(self, graph=None):
"""
Chop the lists of operations up into migrations with dependencies on
each other. Do this by going through an app's list of operations until
one is found that has an outgoing dependency that isn't in another
app's migration yet (hasn't been chopped off its list). Then chop off
the operations before it into a migration and move onto the next app.
If the loops completes without doing anything, there's a circular
dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
# Temporarily resolve the swappable dependency to
# prevent circular references. While keeping the
# dependency checks on the resolved model, add the
# swappable dependencies.
original_dep = dep
dep, is_swappable_dep = self._resolve_dependency(dep)
if dep[0] != app_label:
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
del self.generated_operations[app_label][0]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. Reordering may be needed so FKs work
nicely inside the same app.
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
# Resolve intra-app dependencies to handle circular
# references involving a swappable model.
dep = self._resolve_dependency(dep)[0]
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for migrations in self.migrations.values():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
def check_dependency(self, operation, dependency):
"""
Return True if the given operation depends on the given dependency,
False otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency,))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Place potential swappable models first in lists of created models (only
real way to solve #22783).
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Find any renamed models, generate the operations for them, and remove
the old entry from the model lists. Must be run before other
model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = self.new_model_keys - self.old_model_keys
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = self.old_model_keys - self.new_model_keys
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
model_opts = self.new_apps.get_model(app_label, model_name)._meta
dependencies = []
for field in model_opts.get_fields():
if field.is_relation:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
),
dependencies=dependencies,
)
self.renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel_key = '%s.%s' % (rem_model_state.app_label, rem_model_state.name)
self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % (
model_state.app_label,
model_state.name,
)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.add((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (these are optimized later, if
possible).
Defer any model options that refer to collections of fields that might
be deferred (e.g. unique_together, index_together).
"""
old_keys = self.old_model_keys | self.old_unmanaged_keys
added_models = self.new_model_keys - old_keys
added_unmanaged_models = self.new_unmanaged_keys - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Are there indexes/unique|index_together to defer?
indexes = model_state.options.pop('indexes')
constraints = model_state.options.pop('constraints')
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
dependencies = self._get_dependencies_for_foreign_key(field)
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name in sorted(related_fields)
]
related_dependencies.append((app_label, model_name, None, True))
for index in indexes:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
dependencies=related_dependencies,
)
for constraint in constraints:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
),
dependencies=related_dependencies,
)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
# Fix relationships if the model changed from a proxy model to a
# concrete model.
if (app_label, model_name) in self.old_proxy_keys:
for related_object in model_opts.related_objects:
self.add_operation(
related_object.related_model._meta.app_label,
operations.AlterField(
model_name=related_object.related_model._meta.object_name,
name=related_object.field.name,
field=related_object.field,
),
dependencies=[(app_label, model_name, None, True)],
)
def generate_created_proxies(self):
"""
Make CreateModel statements for proxy models. Use the same statements
as that way there's less code duplication, but of course for proxy
models it's safe to skip all the pointless field stuff and just chuck
out an operation.
"""
added = self.new_proxy_keys - self.old_proxy_keys
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (these are optimized later, if
possible).
Also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = self.new_model_keys | self.new_unmanaged_keys
deleted_models = self.old_model_keys - new_keys
deleted_unmanaged_models = self.old_unmanaged_keys - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.remote_field:
if field.remote_field.model:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name in sorted(related_fields):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name in sorted(related_fields):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""Make DeleteModel options for proxy models."""
deleted = self.old_proxy_keys - self.new_proxy_keys
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""Work out renamed fields."""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field = old_model_state.get_field_by_name(rem_field_name)
old_field_dec = self.deep_deconstruct(old_field)
if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
old_field.set_attributes_from_name(rem_field_name)
old_db_column = old_field.get_attname_column()[1]
if (old_field_dec == field_dec or (
# Was the field renamed and db_column equal to the
# old field's column added?
old_field_dec[0:2] == field_dec[0:2] and
dict(old_field_dec[2], db_column=old_db_column) == field_dec[2])):
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""Make AddField operations."""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
time_fields = (models.DateField, models.DateTimeField, models.TimeField)
preserve_default = (
field.null or field.has_default() or field.many_to_many or
(field.blank and field.empty_strings_allowed) or
(isinstance(field, time_fields) and field.auto_now)
)
if not preserve_default:
field = field.clone()
if isinstance(field, time_fields) and field.auto_now_add:
field.default = self.questioner.ask_auto_now_add_addition(field_name, model_name)
else:
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""Make RemoveField operations."""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Make AlterField operations, or possibly RemovedField/AddField if alter
isn's possible.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys & self.new_field_keys):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None):
rename_key = (
new_field.remote_field.model._meta.app_label,
new_field.remote_field.model._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
# Handle ForeignKey which can only have a single to_field.
remote_field_name = getattr(new_field.remote_field, 'field_name', None)
if remote_field_name:
to_field_rename_key = rename_key + (remote_field_name,)
if to_field_rename_key in self.renamed_fields:
new_field.remote_field.field_name = old_field.remote_field.field_name
# Handle ForeignObjects which can have multiple from_fields/to_fields.
from_fields = getattr(new_field, 'from_fields', None)
if from_fields:
from_rename_key = (app_label, model_name)
new_field.from_fields = tuple([
self.renamed_fields.get(from_rename_key + (from_field,), from_field)
for from_field in from_fields
])
new_field.to_fields = tuple([
self.renamed_fields.get(rename_key + (to_field,), to_field)
for to_field in new_field.to_fields
])
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "through", None):
rename_key = (
new_field.remote_field.through._meta.app_label,
new_field.remote_field.through._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.through = old_field.remote_field.through
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = old_field.many_to_many and new_field.many_to_many
neither_m2m = not old_field.many_to_many and not new_field.many_to_many
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not new_field.many_to_many):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
)
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def create_altered_indexes(self):
option_name = operations.AddIndex.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_indexes = old_model_state.options[option_name]
new_indexes = new_model_state.options[option_name]
add_idx = [idx for idx in new_indexes if idx not in old_indexes]
rem_idx = [idx for idx in old_indexes if idx not in new_indexes]
self.altered_indexes.update({
(app_label, model_name): {
'added_indexes': add_idx, 'removed_indexes': rem_idx,
}
})
def generate_added_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['added_indexes']:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
)
)
def generate_removed_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['removed_indexes']:
self.add_operation(
app_label,
operations.RemoveIndex(
model_name=model_name,
name=index.name,
)
)
def create_altered_constraints(self):
option_name = operations.AddConstraint.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_constraints = old_model_state.options[option_name]
new_constraints = new_model_state.options[option_name]
add_constraints = [c for c in new_constraints if c not in old_constraints]
rem_constraints = [c for c in old_constraints if c not in new_constraints]
self.altered_constraints.update({
(app_label, model_name): {
'added_constraints': add_constraints, 'removed_constraints': rem_constraints,
}
})
def generate_added_constraints(self):
for (app_label, model_name), alt_constraints in self.altered_constraints.items():
for constraint in alt_constraints['added_constraints']:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
)
)
def generate_removed_constraints(self):
for (app_label, model_name), alt_constraints in self.altered_constraints.items():
for constraint in alt_constraints['removed_constraints']:
self.add_operation(
app_label,
operations.RemoveConstraint(
model_name=model_name,
name=constraint.name,
)
)
def _get_dependencies_for_foreign_key(self, field):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True,
))
return dependencies
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name)
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
} if old_value else set()
new_value = new_model_state.options.get(option_name)
new_value = set(new_value) if new_value else set()
if old_value != new_value:
dependencies = []
for foo_togethers in new_value:
for field_name in foo_togethers:
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
),
dependencies=dependencies,
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys, self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Work out if any non-schema-affecting options have changed and make an
operation to represent them in state changes (in case Python code in
migrations needs them).
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys,
self.kept_unmanaged_keys,
# unmanaged converted to managed
self.old_unmanaged_keys & self.new_model_keys,
# managed converted to unmanaged
self.old_model_keys & self.new_unmanaged_keys,
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = {
key: value for key, value in old_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
new_options = {
key: value for key, value in new_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to") !=
new_model_state.options.get("order_with_respect_to")):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to'),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Take a result from changes() and a MigrationGraph, and fix the names
and dependencies of the changes so they extend the graph from the leaf
nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for migrations in changes.values():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Take changes from arrange_for_graph() and set of app labels, and return
a modified set of changes which trims out as many migrations that are
not in app_labels as possible. Note that some other migrations may
still be present as they may be required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
required_apps.update(*[app_dependencies.get(app_label, ()) for app_label in required_apps])
# Remove all migrations that aren't needed
for app_label in list(changes):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggest a name for the migration they might
represent. Names are not guaranteed to be unique, but put some effort
into the fallback name to avoid VCS conflicts if possible.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif ops:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % get_migration_name_timestamp()
@classmethod
def parse_number(cls, name):
"""
Given a migration name, try to extract a number from the beginning of
it. If no number is found, return None.
"""
match = re.match(r'^\d+', name)
if match:
return int(match.group())
return None
|
dc33d47afb520d2b80f564ceca5c9cfbcced129f0318b69e9d7d29bb0578f343 | import builtins
import collections.abc
import datetime
import decimal
import enum
import functools
import math
import re
import types
import uuid
from collections import OrderedDict
from django.conf import SettingsReference
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject
from django.utils.functional import LazyObject, Promise
from django.utils.timezone import utc
from django.utils.version import get_docs_version
class BaseSerializer:
def __init__(self, value):
self.value = value
def serialize(self):
raise NotImplementedError('Subclasses of BaseSerializer must implement the serialize() method.')
class BaseSequenceSerializer(BaseSerializer):
def _format(self):
raise NotImplementedError('Subclasses of BaseSequenceSerializer must implement the _format() method.')
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
value = self._format()
return value % (", ".join(strings)), imports
class BaseSimpleSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), set()
class DateTimeSerializer(BaseSerializer):
"""For datetime.*, except datetime.datetime."""
def serialize(self):
return repr(self.value), {'import datetime'}
class DatetimeDatetimeSerializer(BaseSerializer):
"""For datetime.datetime."""
def serialize(self):
if self.value.tzinfo is not None and self.value.tzinfo != utc:
self.value = self.value.astimezone(utc)
imports = ["import datetime"]
if self.value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return repr(self.value).replace('<UTC>', 'utc'), set(imports)
class DecimalSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), {"from decimal import Decimal"}
class DeconstructableSerializer(BaseSerializer):
@staticmethod
def serialize_deconstructed(path, args, kwargs):
name, imports = DeconstructableSerializer._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = serializer_factory(arg).serialize()
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in sorted(kwargs.items()):
arg_string, arg_imports = serializer_factory(arg).serialize()
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@staticmethod
def _serialize_path(path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
def serialize(self):
return self.serialize_deconstructed(*self.value.deconstruct())
class DictionarySerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for k, v in sorted(self.value.items()):
k_string, k_imports = serializer_factory(k).serialize()
v_string, v_imports = serializer_factory(v).serialize()
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
class EnumSerializer(BaseSerializer):
def serialize(self):
enum_class = self.value.__class__
module = enum_class.__module__
v_string, v_imports = serializer_factory(self.value.value).serialize()
imports = {'import %s' % module, *v_imports}
return "%s.%s(%s)" % (module, enum_class.__name__, v_string), imports
class FloatSerializer(BaseSimpleSerializer):
def serialize(self):
if math.isnan(self.value) or math.isinf(self.value):
return 'float("{}")'.format(self.value), set()
return super().serialize()
class FrozensetSerializer(BaseSequenceSerializer):
def _format(self):
return "frozenset([%s])"
class FunctionTypeSerializer(BaseSerializer):
def serialize(self):
if getattr(self.value, "__self__", None) and isinstance(self.value.__self__, type):
klass = self.value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), {"import %s" % module}
# Further error checking
if self.value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if self.value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % self.value)
module_name = self.value.__module__
if '<' not in self.value.__qualname__: # Qualname can include <locals>
return '%s.%s' % (module_name, self.value.__qualname__), {'import %s' % self.value.__module__}
raise ValueError(
'Could not find function %s in %s.\n' % (self.value.__name__, module_name)
)
class FunctoolsPartialSerializer(BaseSerializer):
def serialize(self):
# Serialize functools.partial() arguments
func_string, func_imports = serializer_factory(self.value.func).serialize()
args_string, args_imports = serializer_factory(self.value.args).serialize()
keywords_string, keywords_imports = serializer_factory(self.value.keywords).serialize()
# Add any imports needed by arguments
imports = {'import functools', *func_imports, *args_imports, *keywords_imports}
return (
'functools.%s(%s, *%s, **%s)' % (
self.value.__class__.__name__,
func_string,
args_string,
keywords_string,
),
imports,
)
class IterableSerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
value = "(%s)" if len(strings) != 1 else "(%s,)"
return value % (", ".join(strings)), imports
class ModelFieldSerializer(DeconstructableSerializer):
def serialize(self):
attr_name, path, args, kwargs = self.value.deconstruct()
return self.serialize_deconstructed(path, args, kwargs)
class ModelManagerSerializer(DeconstructableSerializer):
def serialize(self):
as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct()
if as_manager:
name, imports = self._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return self.serialize_deconstructed(manager_path, args, kwargs)
class OperationSerializer(BaseSerializer):
def serialize(self):
from django.db.migrations.writer import OperationWriter
string, imports = OperationWriter(self.value, indentation=0).serialize()
# Nested operation, trailing comma is handled in upper OperationWriter._write()
return string.rstrip(','), imports
class RegexSerializer(BaseSerializer):
def serialize(self):
regex_pattern, pattern_imports = serializer_factory(self.value.pattern).serialize()
# Turn off default implicit flags (e.g. re.U) because regexes with the
# same implicit and explicit flags aren't equal.
flags = self.value.flags ^ re.compile('').flags
regex_flags, flag_imports = serializer_factory(flags).serialize()
imports = {'import re', *pattern_imports, *flag_imports}
args = [regex_pattern]
if flags:
args.append(regex_flags)
return "re.compile(%s)" % ', '.join(args), imports
class SequenceSerializer(BaseSequenceSerializer):
def _format(self):
return "[%s]"
class SetSerializer(BaseSequenceSerializer):
def _format(self):
# Serialize as a set literal except when value is empty because {}
# is an empty dict.
return '{%s}' if self.value else 'set(%s)'
class SettingsReferenceSerializer(BaseSerializer):
def serialize(self):
return "settings.%s" % self.value.setting_name, {"from django.conf import settings"}
class TupleSerializer(BaseSequenceSerializer):
def _format(self):
# When len(value)==0, the empty tuple should be serialized as "()",
# not "(,)" because (,) is invalid Python syntax.
return "(%s)" if len(self.value) != 1 else "(%s,)"
class TypeSerializer(BaseSerializer):
def serialize(self):
special_cases = [
(models.Model, "models.Model", []),
(type(None), 'type(None)', []),
]
for case, string, imports in special_cases:
if case is self.value:
return string, set(imports)
if hasattr(self.value, "__module__"):
module = self.value.__module__
if module == builtins.__name__:
return self.value.__name__, set()
else:
return "%s.%s" % (module, self.value.__name__), {"import %s" % module}
class UUIDSerializer(BaseSerializer):
def serialize(self):
return "uuid.%s" % repr(self.value), {"import uuid"}
class Serializer:
_registry = OrderedDict([
(frozenset, FrozensetSerializer),
(list, SequenceSerializer),
(set, SetSerializer),
(tuple, TupleSerializer),
(dict, DictionarySerializer),
(enum.Enum, EnumSerializer),
(datetime.datetime, DatetimeDatetimeSerializer),
((datetime.date, datetime.timedelta, datetime.time), DateTimeSerializer),
(SettingsReference, SettingsReferenceSerializer),
(float, FloatSerializer),
((bool, int, type(None), bytes, str), BaseSimpleSerializer),
(decimal.Decimal, DecimalSerializer),
((functools.partial, functools.partialmethod), FunctoolsPartialSerializer),
((types.FunctionType, types.BuiltinFunctionType, types.MethodType), FunctionTypeSerializer),
(collections.abc.Iterable, IterableSerializer),
((COMPILED_REGEX_TYPE, RegexObject), RegexSerializer),
(uuid.UUID, UUIDSerializer),
])
@classmethod
def register(cls, type_, serializer):
if not issubclass(serializer, BaseSerializer):
raise ValueError("'%s' must inherit from 'BaseSerializer'." % serializer.__name__)
cls._registry[type_] = serializer
@classmethod
def unregister(cls, type_):
cls._registry.pop(type_)
def serializer_factory(value):
if isinstance(value, Promise):
value = str(value)
elif isinstance(value, LazyObject):
# The unwrapped value is returned as the first item of the arguments
# tuple.
value = value.__reduce__()[1][0]
if isinstance(value, models.Field):
return ModelFieldSerializer(value)
if isinstance(value, models.manager.BaseManager):
return ModelManagerSerializer(value)
if isinstance(value, Operation):
return OperationSerializer(value)
if isinstance(value, type):
return TypeSerializer(value)
# Anything that knows how to deconstruct itself.
if hasattr(value, 'deconstruct'):
return DeconstructableSerializer(value)
for type_, serializer_cls in Serializer._registry.items():
if isinstance(value, type_):
return serializer_cls(value)
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
|
1aa401c76018749ed1769af29da7a33157c4d155af5b846b47d40f15bd460882 | """
Classes to represent the definitions of aggregate functions.
"""
from django.core.exceptions import FieldError
from django.db.models.expressions import Case, Func, Star, When
from django.db.models.fields import IntegerField
from django.db.models.functions.mixins import (
FixDurationInputMixin, NumericOutputFieldMixin,
)
__all__ = [
'Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance',
]
class Aggregate(Func):
template = '%(function)s(%(distinct)s%(expressions)s)'
contains_aggregate = True
name = None
filter_template = '%s FILTER (WHERE %%(filter)s)'
window_compatible = True
allow_distinct = False
def __init__(self, *expressions, distinct=False, filter=None, **extra):
if distinct and not self.allow_distinct:
raise TypeError("%s does not allow distinct." % self.__class__.__name__)
self.distinct = distinct
self.filter = filter
super().__init__(*expressions, **extra)
def get_source_fields(self):
# Don't return the filter expression since it's not a source field.
return [e._output_field_or_none for e in super().get_source_expressions()]
def get_source_expressions(self):
source_expressions = super().get_source_expressions()
if self.filter:
return source_expressions + [self.filter]
return source_expressions
def set_source_expressions(self, exprs):
self.filter = self.filter and exprs.pop()
return super().set_source_expressions(exprs)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# Aggregates are not allowed in UPDATE queries, so ignore for_save
c = super().resolve_expression(query, allow_joins, reuse, summarize)
c.filter = c.filter and c.filter.resolve_expression(query, allow_joins, reuse, summarize)
if not summarize:
# Call Aggregate.get_source_expressions() to avoid
# returning self.filter and including that in this loop.
expressions = super(Aggregate, c).get_source_expressions()
for index, expr in enumerate(expressions):
if expr.contains_aggregate:
before_resolved = self.get_source_expressions()[index]
name = before_resolved.name if hasattr(before_resolved, 'name') else repr(before_resolved)
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (c.name, name, name))
return c
@property
def default_alias(self):
expressions = self.get_source_expressions()
if len(expressions) == 1 and hasattr(expressions[0], 'name'):
return '%s__%s' % (expressions[0].name, self.name.lower())
raise TypeError("Complex expressions require an alias")
def get_group_by_cols(self):
return []
def as_sql(self, compiler, connection, **extra_context):
extra_context['distinct'] = 'DISTINCT' if self.distinct else ''
if self.filter:
if connection.features.supports_aggregate_filter_clause:
filter_sql, filter_params = self.filter.as_sql(compiler, connection)
template = self.filter_template % extra_context.get('template', self.template)
sql, params = super().as_sql(
compiler, connection, template=template, filter=filter_sql,
**extra_context
)
return sql, params + filter_params
else:
copy = self.copy()
copy.filter = None
source_expressions = copy.get_source_expressions()
condition = When(self.filter, then=source_expressions[0])
copy.set_source_expressions([Case(condition)] + source_expressions[1:])
return super(Aggregate, copy).as_sql(compiler, connection, **extra_context)
return super().as_sql(compiler, connection, **extra_context)
def _get_repr_options(self):
options = super()._get_repr_options()
if self.distinct:
options['distinct'] = self.distinct
if self.filter:
options['filter'] = self.filter
return options
class Avg(FixDurationInputMixin, NumericOutputFieldMixin, Aggregate):
function = 'AVG'
name = 'Avg'
class Count(Aggregate):
function = 'COUNT'
name = 'Count'
output_field = IntegerField()
allow_distinct = True
def __init__(self, expression, filter=None, **extra):
if expression == '*':
expression = Star()
if isinstance(expression, Star) and filter is not None:
raise ValueError('Star cannot be used with filter. Please specify a field.')
super().__init__(expression, filter=filter, **extra)
def convert_value(self, value, expression, connection):
return 0 if value is None else value
class Max(Aggregate):
function = 'MAX'
name = 'Max'
class Min(Aggregate):
function = 'MIN'
name = 'Min'
class StdDev(NumericOutputFieldMixin, Aggregate):
name = 'StdDev'
def __init__(self, expression, sample=False, **extra):
self.function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
super().__init__(expression, **extra)
def _get_repr_options(self):
return {**super()._get_repr_options(), 'sample': self.function == 'STDDEV_SAMP'}
class Sum(FixDurationInputMixin, Aggregate):
function = 'SUM'
name = 'Sum'
class Variance(NumericOutputFieldMixin, Aggregate):
name = 'Variance'
def __init__(self, expression, sample=False, **extra):
self.function = 'VAR_SAMP' if sample else 'VAR_POP'
super().__init__(expression, **extra)
def _get_repr_options(self):
return {**super()._get_repr_options(), 'sample': self.function == 'VAR_SAMP'}
|
eb2d4546fea37272974d26b4a3cae2417afdd479339291a2406974530ff3b7e7 | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import operator
import warnings
from collections import OrderedDict, namedtuple
from functools import lru_cache
from itertools import chain
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import Case, Expression, F, Value, When
from django.db.models.fields import AutoField
from django.db.models.functions import Cast, Trunc
from django.db.models.query_utils import FilteredRelation, InvalidQuery, Q
from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
from django.db.utils import NotSupportedError
from django.utils import timezone
from django.utils.functional import cached_property, partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable:
def __init__(self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
self.chunk_size = chunk_size
class ModelIterable(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
related_populators = get_related_populators(klass_info, select, db)
known_related_objects = [
(field, related_objs, operator.attrgetter(*[
field.attname
if from_field == 'self' else
queryset.model._meta.get_field(from_field).attname
for from_field in field.from_fields
])) for field, related_objs in queryset._known_related_objects.items()
]
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model.
for field, rel_objs, rel_getter in known_related_objects:
# Avoid overwriting objects loaded by, e.g., select_related().
if field.is_cached(obj):
continue
rel_obj_id = rel_getter(obj)
try:
rel_obj = rel_objs[rel_obj_id]
except KeyError:
pass # May happen in qs1 | qs2 scenarios.
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
indexes = range(len(names))
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
yield {names[i]: row[i] for i in indexes}
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if queryset._fields:
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
fields = [*queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields)]
if fields != names:
# Reorder according to fields.
index_map = {name: idx for idx, name in enumerate(names)}
rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
return map(
rowfactory,
compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
)
return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
class NamedValuesListIterable(ValuesListIterable):
"""
Iterable returned by QuerySet.values_list(named=True) that yields a
namedtuple for each row.
"""
@staticmethod
@lru_cache()
def create_namedtuple_class(*names):
# Cache namedtuple() with @lru_cache() since it's too slow to be
# called for every QuerySet evaluation.
return namedtuple('Row', names)
def __iter__(self):
queryset = self.queryset
if queryset._fields:
names = queryset._fields
else:
query = queryset.query
names = [*query.extra_select, *query.values_select, *query.annotation_select]
tuple_class = self.create_namedtuple_class(*names)
new = tuple.__new__
for row in super().__iter__():
yield new(tuple_class, row)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
yield row[0]
class QuerySet:
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: get_version()}
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled queryset instance's Django version %s does not "
"match the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "…(remaining elements truncated)…"
return '<%s %r>' % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._chain()
qs.query.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._chain()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values('pk'))
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values('pk'))
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def _iterator(self, use_chunked_fetch, chunk_size):
yield from self._iterable_class(self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size)
def iterator(self, chunk_size=2000):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
if chunk_size <= 0:
raise ValueError('Chunk size must be strictly positive.')
use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS')
return self._iterator(use_chunked_fetch, chunk_size)
def aggregate(self, *args, **kwargs):
"""
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions((*args, *kwargs.values()), method_name='aggregate')
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.chain()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs)
def count(self):
"""
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Perform the query and return a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None, ignore_conflicts=False):
"""
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if features.can_return_ids_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_ids_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size, ignore_conflicts=ignore_conflicts)
for obj_with_pk in objs_with_pk:
obj_with_pk._state.adding = False
obj_with_pk._state.db = self.db
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
ids = self._batched_insert(objs_without_pk, fields, batch_size, ignore_conflicts=ignore_conflicts)
if connection.features.can_return_ids_from_bulk_insert and not ignore_conflicts:
assert len(ids) == len(objs_without_pk)
for obj_without_pk, pk in zip(objs_without_pk, ids):
obj_without_pk.pk = pk
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def bulk_update(self, objs, fields, batch_size=None):
"""
Update the given fields in each of the given objects in the database.
"""
if batch_size is not None and batch_size < 0:
raise ValueError('Batch size must be a positive integer.')
if not fields:
raise ValueError('Field names must be given to bulk_update().')
objs = tuple(objs)
if any(obj.pk is None for obj in objs):
raise ValueError('All bulk_update() objects must have a primary key set.')
fields = [self.model._meta.get_field(name) for name in fields]
if any(not f.concrete or f.many_to_many for f in fields):
raise ValueError('bulk_update() can only be used with concrete fields.')
if any(f.primary_key for f in fields):
raise ValueError('bulk_update() cannot be used with primary key fields.')
if not objs:
return
# PK is used twice in the resulting update query, once in the filter
# and once in the WHEN. Each field will also have one CAST.
max_batch_size = connections[self.db].ops.bulk_batch_size(['pk', 'pk'] + fields, objs)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
requires_casting = connections[self.db].features.requires_casted_case_in_updates
batches = (objs[i:i + batch_size] for i in range(0, len(objs), batch_size))
updates = []
for batch_objs in batches:
update_kwargs = {}
for field in fields:
when_statements = []
for obj in batch_objs:
attr = getattr(obj, field.attname)
if not isinstance(attr, Expression):
attr = Value(attr, output_field=field)
when_statements.append(When(pk=obj.pk, then=attr))
case_statement = Case(*when_statements, output_field=field)
if requires_casting:
case_statement = Cast(case_statement, output_field=field)
update_kwargs[field.attname] = case_statement
updates.append(([obj.pk for obj in batch_objs], update_kwargs))
with transaction.atomic(using=self.db, savepoint=False):
for pks, update_kwargs in updates:
self.filter(pk__in=pks).update(**update_kwargs)
bulk_update.alters_data = True
def get_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
return self._create_object_from_params(kwargs, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
self._for_write = True
with transaction.atomic(using=self.db):
try:
obj = self.select_for_update().get(**kwargs)
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
# Lock the row so that a concurrent update is blocked until
# after update_or_create() has performed its save.
obj, created = self._create_object_from_params(kwargs, params, lock=True)
if created:
return obj, created
for k, v in defaults.items():
setattr(obj, k, v() if callable(v) else v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params, lock=False):
"""
Try to create an object using passed params. Used by get_or_create()
and update_or_create().
"""
try:
with transaction.atomic(using=self.db):
params = {k: v() if callable(v) else v for k, v in params.items()}
obj = self.create(**params)
return obj, True
except IntegrityError as e:
try:
qs = self.select_for_update() if lock else self
return qs.get(**lookup), False
except self.model.DoesNotExist:
pass
raise e
def _extract_model_params(self, defaults, **kwargs):
"""
Prepare `params` for creating a model instance based on the given
kwargs; for use by get_or_create() and update_or_create().
"""
defaults = defaults or {}
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
property_names = self.model._meta._property_names
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
# It's okay to use a model's property if it has a setter.
if not (param in property_names and getattr(self.model, param).fset):
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'." % (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
))
return params
def _earliest(self, *fields):
"""
Return the earliest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if fields:
order_by = fields
else:
order_by = getattr(self.model._meta, 'get_latest_by')
if order_by and not isinstance(order_by, (tuple, list)):
order_by = (order_by,)
if order_by is None:
raise ValueError(
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta."
)
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._chain()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering(*order_by)
return obj.get()
def earliest(self, *fields):
return self._earliest(*fields)
def latest(self, *fields):
return self.reverse()._earliest(*fields)
def first(self):
"""Return the first object of a query or None if no match is found."""
for obj in (self if self.ordered else self.order_by('pk'))[:1]:
return obj
def last(self):
"""Return the last object of a query or None if no match is found."""
for obj in (self.reverse() if self.ordered else self.order_by('-pk'))[:1]:
return obj
def in_bulk(self, id_list=None, *, field_name='pk'):
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if field_name != 'pk' and not self.model._meta.get_field(field_name).unique:
raise ValueError("in_bulk()'s field_name must be a unique field but %r isn't." % field_name)
if id_list is not None:
if not id_list:
return {}
filter_key = '{}__in'.format(field_name)
batch_size = connections[self.db].features.max_query_params
id_list = tuple(id_list)
# If the database has a limit on the number of query parameters
# (e.g. SQLite), retrieve objects in batches if necessary.
if batch_size and batch_size < len(id_list):
qs = ()
for offset in range(0, len(id_list), batch_size):
batch = id_list[offset:offset + batch_size]
qs += tuple(self.filter(**{filter_key: batch}).order_by())
else:
qs = self.filter(**{filter_key: id_list}).order_by()
else:
qs = self._chain()
return {getattr(obj, field_name): obj for obj in qs}
def delete(self):
"""Delete the records in the current QuerySet."""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._chain()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.chain(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query._annotations = None
with transaction.mark_for_rollback_on_error(using=self.db):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update() that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.chain(sql.UpdateQuery)
query.add_update_fields(values)
# Clear any annotations so that they won't be present in subqueries.
query._annotations = None
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def explain(self, *, format=None, **options):
return self.query.explain(using=self.db, format=format, **options)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
qs = RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using)
qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
return qs
def _values(self, *fields, **expressions):
clone = self._chain()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False, named=False):
if flat and named:
raise TypeError("'flat' and 'named' can't be used together.")
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
field_names = {f for f in fields if not hasattr(f, 'resolve_expression')}
_fields = []
expressions = {}
counter = 1
for field in fields:
if hasattr(field, 'resolve_expression'):
field_id_prefix = getattr(field, 'default_alias', field.__class__.__name__.lower())
while True:
field_id = field_id_prefix + str(counter)
counter += 1
if field_id not in field_names:
break
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = (
NamedValuesListIterable if named
else FlatValuesListIterable if flat
else ValuesListIterable
)
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ('year', 'month', 'week', 'day'), \
"'kind' must be one of 'year', 'month', 'week', or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ('year', 'month', 'week', 'day', 'hour', 'minute', 'second'), \
"'kind' must be one of 'year', 'month', 'week', 'day', 'hour', 'minute', or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""Return an empty QuerySet."""
clone = self._chain()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._chain()
def filter(self, *args, **kwargs):
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._chain()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object or a dictionary of keyword lookup
arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q):
clone = self._chain()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._chain()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
# If the query is an EmptyQuerySet, combine all nonempty querysets.
if isinstance(self, EmptyQuerySet):
qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
return qs[0]._combinator_query('union', *qs[1:], all=all) if qs else self
return self._combinator_query('union', *other_qs, all=all)
def intersection(self, *other_qs):
# If any query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
for other in other_qs:
if isinstance(other, EmptyQuerySet):
return other
return self._combinator_query('intersection', *other_qs)
def difference(self, *other_qs):
# If the query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
return self._combinator_query('difference', *other_qs)
def select_for_update(self, nowait=False, skip_locked=False, of=()):
"""
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError('The nowait option cannot be used with skip_locked.')
obj = self._chain()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
obj.query.select_for_update_of = of
return obj
def select_related(self, *fields):
"""
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, clear the list.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._chain()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
clone = self._chain()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
for lookup in lookups:
if isinstance(lookup, Prefetch):
lookup = lookup.prefetch_to
lookup = lookup.split(LOOKUP_SEP, 1)[0]
if lookup in self.query._filtered_relations:
raise ValueError('prefetch_related() is not supported with FilteredRelation.')
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
self._validate_values_are_expressions(args + tuple(kwargs.values()), method_name='annotate')
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError.
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except TypeError:
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._chain()
names = self._fields
if names is None:
names = set(chain.from_iterable(
(field.name, field.attname) if hasattr(field, 'attname') else (field.name,)
for field in self.model._meta.get_fields()
))
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
if isinstance(annotation, FilteredRelation):
clone.query.add_filtered_relation(annotation, alias)
else:
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""Return a new QuerySet instance with the ordering changed."""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._chain()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Return a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._chain()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""Add extra SQL fragments to the query."""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._chain()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""Reverse the ordering of the QuerySet."""
if not self.query.can_filter():
raise TypeError('Cannot reverse a query once a slice has been taken.')
clone = self._chain()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case removal all deferrals.
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._chain()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
for field in fields:
field = field.split(LOOKUP_SEP, 1)[0]
if field in self.query._filtered_relations:
raise ValueError('only() is not supported with FilteredRelation.')
clone = self._chain()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""Select which database this QuerySet should execute against."""
clone = self._chain()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model (or is empty).
"""
if isinstance(self, EmptyQuerySet):
return True
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
@property
def db(self):
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None, ignore_conflicts=False):
"""
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model, ignore_conflicts=ignore_conflicts)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size, ignore_conflicts=False):
"""
Helper method for bulk_create() to insert objs one batch at a time.
"""
if ignore_conflicts and not connections[self.db].features.supports_ignore_conflicts:
raise NotSupportedError('This database backend does not support ignoring conflicts.')
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
inserted_ids = []
bulk_return = connections[self.db].features.can_return_ids_from_bulk_insert
for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
if bulk_return and not ignore_conflicts:
inserted_id = self._insert(
item, fields=fields, using=self.db, return_id=True,
ignore_conflicts=ignore_conflicts,
)
if isinstance(inserted_id, list):
inserted_ids.extend(inserted_id)
else:
inserted_ids.append(inserted_id)
else:
self._insert(item, fields=fields, using=self.db, ignore_conflicts=ignore_conflicts)
return inserted_ids
def _chain(self, **kwargs):
"""
Return a copy of the current QuerySet that's ready for another
operation.
"""
obj = self._clone()
if obj._sticky_filter:
obj.query.filter_is_sticky = True
obj._sticky_filter = False
obj.__dict__.update(kwargs)
return obj
def _clone(self):
"""
Return a copy of the current QuerySet. A lightweight alternative
to deepcopy().
"""
c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints)
c._sticky_filter = self._sticky_filter
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c._iterable_class = self._iterable_class
c._fields = self._fields
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def resolve_expression(self, *args, **kwargs):
if self._fields and len(self._fields) > 1:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
raise TypeError('Cannot use multi-field values as a filter value.')
query = self.query.resolve_expression(*args, **kwargs)
query._db = self._db
return query
resolve_expression.queryset_only = True
def _add_hints(self, **hints):
"""
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
self._hints.update(hints)
def _has_filters(self):
"""
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
@staticmethod
def _validate_values_are_expressions(values, method_name):
invalid_args = sorted(str(arg) for arg in values if not hasattr(arg, 'resolve_expression'))
if invalid_args:
raise TypeError(
'QuerySet.%s() received non-expression(s): %s.' % (
method_name,
', '.join(invalid_args),
)
)
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
self._result_cache = None
self._prefetch_related_lookups = ()
self._prefetch_done = False
def resolve_model_init_order(self):
"""Resolve the init field names and value positions."""
converter = connections[self.db].introspection.identifier_converter
model_init_fields = [f for f in self.model._meta.fields if converter(f.column) in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(converter(f.column)) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def prefetch_related(self, *lookups):
"""Same as QuerySet.prefetch_related()"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def _prefetch_related_objects(self):
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def _clone(self):
"""Same as QuerySet._clone()"""
c = self.__class__(
self.raw_query, model=self.model, query=self.query, params=self.params,
translations=self.translations, using=self._db, hints=self._hints
)
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def iterator(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
if self.model._meta.pk.attname not in model_init_names:
raise InvalidQuery('Raw query must include the primary key')
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
if converters:
query = compiler.apply_converters(query, converters)
for values in query:
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.chain(using=alias),
params=self.params, translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
# Ignore translations for nonexistent column names
try:
index = columns.index(query_name)
except ValueError:
pass
else:
columns[index] = model_name
return columns
@cached_property
def model_fields(self):
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.identifier_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and not issubclass(queryset._iterable_class, ModelIterable):
raise ValueError('Prefetch querysets cannot use values().')
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
# Prevent the QuerySet from being evaluated
obj_dict['queryset'] = self.queryset._chain(
_result_cache=[],
_prefetch_done=True,
)
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
return isinstance(other, Prefetch) and self.prefetch_to == other.prefetch_to
def __hash__(self):
return hash((self.__class__, self.prefetch_to))
def normalize_prefetch_lookups(lookups, prefix=None):
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if not model_instances:
return # nothing to do
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
while all_lookups:
lookup = all_lookups.pop()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if not obj_list:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(reversed(additional_lookups), prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extend(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
if through_attr in getattr(obj, '_prefetched_objects_cache', ()):
# If related objects have been prefetched, use the
# cache rather than the object's through_attr.
new_obj = list(obj._prefetched_objects_cache.get(through_attr))
else:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_queryset().
Return a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(getattr(instance.__class__, to_attr, None), cached_property):
is_fetched = to_attr in instance.__dict__
else:
is_fetched = hasattr(instance, to_attr)
else:
is_fetched = through_attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects().
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache or field name to assign to,
# boolean that is True when the previous argument is a cache name vs a field name).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
if as_attr:
# A to_attr has been given for the prefetch.
setattr(obj, to_attr, val)
elif is_descriptor:
# cache_name points to a field name in obj.
# This field is a descriptor for a related object.
setattr(obj, cache_name, val)
else:
# No to_attr has been given for this prefetch operation and the
# cache_name does not point to a descriptor. Store the value of
# the field in the object's field cache.
obj._state.fields_cache[cache_name] = val
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - local_setter, remote_setter: Methods to set cached values on
# the object being populated and on the remote object. Usually
# these are Field.set_cached_value() methods.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
attname_indexes = {select[idx][0].target.attname: idx for idx in select_fields}
model_init_attnames = (f.attname for f in klass_info['model']._meta.concrete_fields)
self.init_list = [attname for attname in model_init_attnames if attname in attname_indexes]
self.reorder_for_init = operator.itemgetter(*[attname_indexes[attname] for attname in self.init_list])
self.model_cls = klass_info['model']
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
self.local_setter = klass_info['local_setter']
self.remote_setter = klass_info['remote_setter']
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
self.local_setter(from_obj, obj)
if obj is not None:
self.remote_setter(obj, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
|
a2df96ef941d97407d70f77b0f6afdef12560c8cdcd7e74d45d79f7d86701cf8 | import copy
import inspect
from bisect import bisect
from collections import OrderedDict, defaultdict
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured
from django.db import connections
from django.db.models import Manager
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.query_utils import PathInfo
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.functional import cached_property
from django.utils.text import camel_case_to_spaces, format_lazy
from django.utils.translation import override
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = ()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = (
'verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to',
'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable',
'auto_created', 'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name', 'required_db_features',
'required_db_vendor', 'base_manager_name', 'default_manager_name',
'indexes', 'constraints',
)
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = option_together[0]
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
class Options:
FORWARD_PROPERTIES = {
'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields',
'_forward_fields_map', 'managers', 'managers_map', 'base_manager',
'default_manager',
}
REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.private_fields = []
self.local_managers = []
self.base_manager_name = None
self.default_manager_name = None
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.indexes = []
self.constraints = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete', 'view')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs))
else:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(iter(self.parents.values()))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
if not field.remote_field.parent_link:
raise ImproperlyConfigured(
'Add parent_link=True to %s.' % field,
)
else:
auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True)
model.add_to_class('id', auto)
def add_manager(self, manager):
self.local_managers.append(manager)
self._expire_cache()
def add_field(self, field, private=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if private:
self.private_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Do the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (self.app_label, self.model_name)
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, str):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""Return the untranslated verbose name."""
with override(None):
return str(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def managers(self):
managers = []
seen_managers = set()
bases = (b for b in self.model.mro() if hasattr(b, '_meta'))
for depth, base in enumerate(bases):
for manager in base._meta.local_managers:
if manager.name in seen_managers:
continue
manager = copy.copy(manager)
manager.model = self.model
seen_managers.add(manager.name)
managers.append((depth, manager.creation_counter, manager))
return make_immutable_fields_list(
"managers",
(m[2] for m in sorted(managers)),
)
@cached_property
def managers_map(self):
return {manager.name: manager for manager in self.managers}
@cached_property
def base_manager(self):
base_manager_name = self.base_manager_name
if not base_manager_name:
# Get the first parent's base_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
if parent._base_manager.name != '_base_manager':
base_manager_name = parent._base_manager.name
break
if base_manager_name:
try:
return self.managers_map[base_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
base_manager_name,
)
)
manager = Manager()
manager.name = '_base_manager'
manager.model = self.model
manager.auto_created = True
return manager
@cached_property
def default_manager(self):
default_manager_name = self.default_manager_name
if not default_manager_name and not self.local_managers:
# Get the first parent's default_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
default_manager_name = parent._meta.default_manager_name
break
if default_manager_name:
try:
return self.managers_map[default_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
default_manager_name,
)
)
if self.managers:
return self.managers[0]
@cached_property
def fields(self):
"""
Return a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not private or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
def is_not_an_m2m_field(f):
return not (f.is_relation and f.many_to_many)
def is_not_a_generic_relation(f):
return not (f.is_relation and f.one_to_many)
def is_not_a_generic_foreign_key(f):
return not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False)
if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Return a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Return a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Return a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Return all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many)
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named '%s'. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name))
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Return all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Return the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Return None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_path_to_parent(self, parent):
"""
Return a list of PathInfos containing the path from the current
model to the parent model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
# Skip the chain of proxy to the concrete proxied model.
proxied_model = self.concrete_model
path = []
opts = self
for int_model in self.get_base_chain(parent):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(
from_opts=final_field.model._meta,
to_opts=opts,
target_fields=targets,
join_field=final_field,
m2m=False,
direct=True,
filtered_relation=None,
))
return path
def get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent)
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
opts = model._meta
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if opts.abstract:
continue
fields_with_relations = (
f for f in opts._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, str):
related_objects_graph[f.remote_field.model._meta.concrete_model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta.concrete_model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
if forward:
for cache_key in self.FORWARD_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
if reverse and not self.abstract:
for cache_key in self.REVERSE_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = seen_models is None
if topmost_call:
seen_models = set()
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if not getattr(obj, 'parent_link', False) or obj.model == self.concrete_model:
fields.append(obj)
if reverse and not self.proxy:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields += self.local_fields
fields += self.local_many_to_many
# Private fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the private fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields += self.private_fields
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
@cached_property
def _property_names(self):
"""Return a set of the names of the properties defined on the model."""
names = []
for name in dir(self.model):
attr = inspect.getattr_static(self.model, name)
if isinstance(attr, property):
names.append(name)
return frozenset(names)
|
dc77290eed8039e52186e7ad28da5c0aa8319739280f42488386f5635fdf2ce6 | import copy
import inspect
import warnings
from functools import partialmethod
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned,
ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection,
connections, router, transaction,
)
from django.db.models.constants import LOOKUP_SEP
from django.db.models.constraints import CheckConstraint, UniqueConstraint
from django.db.models.deletion import CASCADE, Collector
from django.db.models.fields.related import (
ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation,
)
from django.db.models.manager import Manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.signals import (
class_prepared, post_init, post_save, pre_init, pre_save,
)
from django.db.models.utils import make_model_tuple
from django.utils.text import capfirst, get_text_list
from django.utils.translation import gettext_lazy as _
from django.utils.version import get_version
class Deferred:
def __repr__(self):
return '<Deferred field>'
def __str__(self):
return '<Deferred field>'
DEFERRED = Deferred()
def subclass_exception(name, bases, module, attached_to):
"""
Create exception subclass. Used by ModelBase below.
The exception is created in a way that allows it to be pickled, assuming
that the returned exception class will be added as an attribute to the
'attached_to' class.
"""
return type(name, bases, {
'__module__': module,
'__qualname__': '%s.%s' % (attached_to.__qualname__, name),
})
def _has_contribute_to_class(value):
# Only call contribute_to_class() if it's bound.
return not inspect.isclass(value) and hasattr(value, 'contribute_to_class')
class ModelBase(type):
"""Metaclass for all models."""
def __new__(cls, name, bases, attrs, **kwargs):
super_new = super().__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_attrs = {'__module__': module}
classcell = attrs.pop('__classcell__', None)
if classcell is not None:
new_attrs['__classcell__'] = classcell
attr_meta = attrs.pop('Meta', None)
# Pass all attrs without a (Django-specific) contribute_to_class()
# method to type.__new__() so that they're properly initialized
# (i.e. __set_name__()).
for obj_name, obj in list(attrs.items()):
if not _has_contribute_to_class(obj):
new_attrs[obj_name] = attrs.pop(obj_name)
new_class = super_new(cls, name, bases, new_attrs, **kwargs)
abstract = getattr(attr_meta, 'abstract', False)
meta = attr_meta or getattr(new_class, 'Meta', None)
base_meta = getattr(new_class, '_meta', None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
new_class.add_to_class('_meta', Options(meta, app_label))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
'DoesNotExist',
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
'MultipleObjectsReturned',
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.private_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is None:
base = parent
elif parent._meta.concrete_model is not base._meta.concrete_model:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Track fields inherited from base models.
inherited_attributes = set()
# Do the appropriate setup for any model parents.
for base in new_class.mro():
if base not in parents or not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
inherited_attributes.update(base.__dict__)
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
if not base._meta.abstract:
# Check for clashes between locally declared fields and those
# on the base classes.
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
inherited_attributes.add(field.name)
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(
base,
on_delete=CASCADE,
name=attr_name,
auto_created=True,
parent_link=True,
)
if attr_name in field_names:
raise FieldError(
"Auto-generated field '%s' in class %r for "
"parent_link to base class %r clashes with "
"declared field of the same name." % (
attr_name,
name,
base.__name__,
)
)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
base_parents = base._meta.parents.copy()
# Add fields from abstract base class if it wasn't overridden.
for field in parent_fields:
if (field.name not in field_names and
field.name not in new_class.__dict__ and
field.name not in inherited_attributes):
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Replace parent links defined on this base by the new
# field. It will be appropriately resolved if required.
if field.one_to_one:
for parent, parent_link in base_parents.items():
if field == parent_link:
base_parents[parent] = new_field
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base_parents)
# Inherit private fields (like GenericForeignKey) from the parent
# class
for field in base._meta.private_fields:
if field.name in field_names:
if not base._meta.abstract:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
field = copy.deepcopy(field)
if not base._meta.abstract:
field.mti_inherited = True
new_class.add_to_class(field.name, field)
# Copy indexes so that index names are unique when models extend an
# abstract model.
new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes]
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def add_to_class(cls, name, value):
if _has_contribute_to_class(value):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""Create some methods once self._meta has been populated."""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=False)
# Defer creating accessors on the foreign class until it has been
# created and registered. If remote_field is None, we're ordering
# with respect to a GenericForeignKey and don't know what the
# foreign class is - we'll add those accessors later in
# contribute_to_class().
if opts.order_with_respect_to.remote_field:
wrt = opts.order_with_respect_to
remote = wrt.remote_field.model
lazy_related_operation(make_foreign_order_accessors, cls, remote)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
if not opts.managers:
if any(f.name == 'objects' for f in opts.fields):
raise ValueError(
"Model %s must specify a custom Manager, because it has a "
"field named 'objects'." % cls.__name__
)
manager = Manager()
manager.auto_created = True
cls.add_to_class('objects', manager)
# Set the name of _meta.indexes. This can't be done in
# Options.contribute_to_class() because fields haven't been added to
# the model at that point.
for index in cls._meta.indexes:
if not index.name:
index.set_name_with_model(cls)
class_prepared.send(sender=cls)
@property
def _base_manager(cls):
return cls._meta.base_manager
@property
def _default_manager(cls):
return cls._meta.default_manager
class ModelStateFieldsCacheDescriptor:
def __get__(self, instance, cls=None):
if instance is None:
return self
res = instance.fields_cache = {}
return res
class ModelState:
"""Store model instance state."""
db = None
# If true, uniqueness validation checks will consider this a new, unsaved
# object. Necessary for correct validation of new instances of objects with
# explicit (non-auto) PKs. This impacts validation only; it has no effect
# on the actual save.
adding = True
fields_cache = ModelStateFieldsCacheDescriptor()
class Model(metaclass=ModelBase):
def __init__(self, *args, **kwargs):
# Alias some things as locals to avoid repeat global lookups
cls = self.__class__
opts = self._meta
_setattr = setattr
_DEFERRED = DEFERRED
pre_init.send(sender=cls, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
if len(args) > len(opts.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(opts.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(opts.fields)
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# Virtual field
if field.attname not in kwargs and field.column is None:
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
if rel_obj is not _DEFERRED:
_setattr(self, field.name, rel_obj)
else:
if val is not _DEFERRED:
_setattr(self, field.attname, val)
if kwargs:
property_names = opts._property_names
for prop in tuple(kwargs):
try:
# Any remaining kwargs must correspond to properties or
# virtual fields.
if prop in property_names or opts.get_field(prop):
if kwargs[prop] is not _DEFERRED:
_setattr(self, prop, kwargs[prop])
del kwargs[prop]
except (AttributeError, FieldDoesNotExist):
pass
for kwarg in kwargs:
raise TypeError("%s() got an unexpected keyword argument '%s'" % (cls.__name__, kwarg))
super().__init__()
post_init.send(sender=cls, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if len(values) != len(cls._meta.concrete_fields):
values_iter = iter(values)
values = [
next(values_iter) if f.attname in field_names else DEFERRED
for f in cls._meta.concrete_fields
]
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self)
def __str__(self):
return '%s object (%s)' % (self.__class__.__name__, self.pk)
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self.pk
if my_pk is None:
return self is other
return my_pk == other.pk
def __hash__(self):
if self.pk is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self.pk)
def __reduce__(self):
data = self.__getstate__()
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id,), data
def __getstate__(self):
"""Hook to allow choosing the attributes to pickle."""
return self.__dict__
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled model instance's Django version %s does not match "
"the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
meta = meta or self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Return a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if f.attname not in self.__dict__
}
def refresh_from_db(self, using=None, fields=None):
"""
Reload field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is None:
self._prefetched_objects_cache = {}
else:
prefetched_objects_cache = getattr(self, '_prefetched_objects_cache', ())
for field in fields:
if field in prefetched_objects_cache:
del prefetched_objects_cache[field]
fields.remove(field)
if not fields:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
hints = {'instance': self}
db_instance_qs = self.__class__._base_manager.db_manager(using, hints=hints).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
deferred_fields = self.get_deferred_fields()
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif deferred_fields:
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Clear cached foreign keys.
if field.is_relation and field.is_cached(self):
field.delete_cached_value(self)
# Clear cached relations.
for field in self._meta.related_objects:
if field.is_cached(self):
field.delete_cached_value(self)
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Return the value of the field name for this instance. If the field is
a foreign key, return the id value instead of the object. If there's
no Field object with this name on the model, return the model
attribute's value.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Save the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save() would result in silent data loss.
for field in self._meta.concrete_fields:
# If the related field isn't cached, then an instance hasn't
# been assigned and there's no need to worry about this check.
if field.is_relation and field.is_cached(self):
obj = getattr(self, field.name, None)
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj and obj.pk is None:
# Remove the object from a related instance cache.
if not field.remote_field.multiple:
field.remote_field.delete_cached_value(obj)
raise ValueError(
"save() prohibited to prevent data loss due to "
"unsaved related object '%s'." % field.name
)
# If the relationship's pk/to_field was changed, clear the
# cached relationship.
if obj and getattr(obj, field.target_field.attname) != getattr(self, field.attname):
field.delete_cached_value(self)
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
deferred_fields = self.get_deferred_fields()
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if not update_fields:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and deferred_fields and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handle the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or update_fields
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
pre_save.send(
sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields,
)
# A transaction isn't needed if one query is issued.
if meta.parents:
context_manager = transaction.atomic(using=using, savepoint=False)
else:
context_manager = transaction.mark_for_rollback_on_error(using=using)
with context_manager:
parent_inserted = False
if not raw:
parent_inserted = self._save_parents(cls, using, update_fields)
updated = self._save_table(
raw, cls, force_insert or parent_inserted,
force_update, using, update_fields,
)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
post_save.send(
sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using,
)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""Save all the parents of cls using values from self."""
meta = cls._meta
inserted = False
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None and
getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
parent_inserted = self._save_parents(cls=parent, using=using, update_fields=update_fields)
updated = self._save_table(
cls=parent, using=using, update_fields=update_fields,
force_insert=parent_inserted,
)
if not updated:
inserted = True
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
if field.is_cached(self):
field.delete_cached_value(self)
return inserted
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Do the heavy-lifting involved in saving. Update or insert the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
filter_args = field.get_filter_kwargs_for_object(self)
order_value = cls._base_manager.using(using).filter(**filter_args).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if f is not meta.auto_field]
update_pk = meta.auto_field and not pk_set
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
Try to update the model. Return True if the model was updated (if an
update query was done and a matching row was found in the DB).
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
return (
filtered.exists() and
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
(filtered._update(values) > 0 or filtered.exists())
)
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self.pk is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return dict(field.flatchoices).get(value, value)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = getattr(self, field.attname)
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
filter_args = order_field.get_filter_kwargs_for_object(self)
obj = self.__class__._default_manager.filter(**filter_args).filter(**{
'_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Check unique constraints on the model and raise ValidationError if any
failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Return a list of checks to perform. Since validate_unique() could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check. Fields that did not validate should also be excluded,
but they need to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
constraints = [(self.__class__, self._meta.constraints)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
if parent_class._meta.constraints:
constraints.append((parent_class, parent_class._meta.constraints))
for model_class, unique_together in unique_togethers:
for check in unique_together:
if not any(name in exclude for name in check):
# Add the check if the field isn't excluded.
unique_checks.append((model_class, tuple(check)))
for model_class, model_constraints in constraints:
for constraint in model_constraints:
if (isinstance(constraint, UniqueConstraint) and
# Partial unique constraints can't be validated.
constraint.condition is None and
not any(name in exclude for name in constraint.fields)):
unique_checks.append((model_class, constraint.fields))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
# TODO: Handle multiple backends with different feature flags.
if (lookup_value is None or
(lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)):
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': capfirst(opts.verbose_name),
'lookup_type': lookup_type,
'field': field_name,
'field_label': capfirst(field.verbose_name),
'date_field': unique_for,
'date_field_label': capfirst(opts.get_field(unique_for).verbose_name),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': capfirst(opts.verbose_name),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = capfirst(field.verbose_name)
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = get_text_list(field_labels, _('and'))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Call clean_fields(), clean(), and validate_unique() on the model.
Raise a ValidationError for any errors that occur.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors:
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Clean all fields and raise a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = [*cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs)]
if not cls._meta.swapped:
errors += [
*cls._check_fields(**kwargs),
*cls._check_m2m_through_same_relationship(),
*cls._check_long_column_names(),
]
clash_errors = (
*cls._check_id_field(),
*cls._check_field_name_clashes(),
*cls._check_model_name_db_lookup_clashes(),
*cls._check_property_name_related_field_accessor_clashes(),
*cls._check_single_primary_key(),
)
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors += [
*cls._check_index_together(),
*cls._check_unique_together(),
*cls._check_indexes(),
*cls._check_ordering(),
*cls._check_constraints(),
]
return errors
@classmethod
def _check_swappable(cls):
"""Check if the swapped model exists."""
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract." % (
cls._meta.swappable, app_label, model_name
),
id='models.E002',
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
id='models.E017',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
"""Perform all manager checks."""
errors = []
for manager in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
"""Perform all field checks."""
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two identical many-to-many relations "
"through the intermediate model '%s'." %
f.remote_field.through._meta.label,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
"""Check if `id` field is a primary key."""
fields = [f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk]
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
"""Forbid field shadowing in multi-table inheritance."""
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'." % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents, including auto-generated fields like multi-table inheritance
# child accessors.
for parent in cls._meta.get_parent_list():
for f in parent._meta.get_fields():
if f not in used_fields:
used_fields[f.name] = f
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (
f.name, clash.name, clash.model._meta
),
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_model_name_db_lookup_clashes(cls):
errors = []
model_name = cls.__name__
if model_name.startswith('_') or model_name.endswith('_'):
errors.append(
checks.Error(
"The model name '%s' cannot start or end with an underscore "
"as it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E023'
)
)
elif LOOKUP_SEP in model_name:
errors.append(
checks.Error(
"The model name '%s' cannot contain double underscores as "
"it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E024'
)
)
return errors
@classmethod
def _check_property_name_related_field_accessor_clashes(cls):
errors = []
property_names = cls._meta._property_names
related_field_accessors = (
f.get_attname() for f in cls._meta._get_fields(reverse=False)
if f.is_relation and f.related_model is not None
)
for accessor in related_field_accessors:
if accessor in property_names:
errors.append(
checks.Error(
"The property '%s' clashes with a related field "
"accessor." % accessor,
obj=cls,
id='models.E025',
)
)
return errors
@classmethod
def _check_single_primary_key(cls):
errors = []
if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1:
errors.append(
checks.Error(
"The model cannot have more than one field with "
"'primary_key=True'.",
obj=cls,
id='models.E026',
)
)
return errors
@classmethod
def _check_index_together(cls):
"""Check the value of "index_together" option."""
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
"""Check the value of "unique_together" option."""
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_indexes(cls):
"""Check the fields of indexes."""
fields = [field for index in cls._meta.indexes for field, _ in index.fields_orders]
return cls._check_local_fields(fields, 'indexes')
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {
field.name: field for field in cls._meta._get_fields(reverse=False)
}
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the nonexistent field '%s'." % (
option, field_name,
),
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'." % (
option, field_name, option,
),
obj=cls,
id='models.E013',
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
"'%s' refers to field '%s' which is not local to model '%s'."
% (option, field_name, cls._meta.object_name),
hint="This issue may be caused by multi-table inheritance.",
obj=cls,
id='models.E016',
)
)
return errors
@classmethod
def _check_ordering(cls):
"""
Check "ordering" option -- is it a list of strings and do all fields
exist?
"""
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=cls,
id='models.E021',
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
"'ordering' must be a tuple or list (even if you want to order by only one field).",
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip expressions and '?' fields.
fields = (f for f in fields if isinstance(f, str) and f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if LOOKUP_SEP not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != 'pk'}
# Check for invalid or nonexistent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(chain.from_iterable(
(f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
))
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the nonexistent field '%s'." % invalid_field,
obj=cls,
id='models.E015',
)
)
return errors
@classmethod
def _check_long_column_names(cls):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in settings.DATABASES:
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if f.db_column is None and column_name is not None and len(column_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id='models.E018',
)
)
for f in cls._meta.local_many_to_many:
# Skip nonexistent models.
if isinstance(f.remote_field.through, str):
continue
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for M2M field '
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=(
"Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."
),
obj=cls,
id='models.E019',
)
)
return errors
@classmethod
def _check_constraints(cls):
errors = []
for db in settings.DATABASES:
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
if connection.features.supports_table_check_constraints:
continue
if any(isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints):
errors.append(
checks.Warning(
'%s does not support check constraints.' % connection.display_name,
hint=(
"A constraint won't be created. Silence this "
"warning if you don't care about it."
),
obj=cls,
id='models.W027',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(self, ordered_obj, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(pk=j, **filter_args).update(_order=i)
def method_get_order(self, ordered_obj):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
pk_name = ordered_obj._meta.pk.name
return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True)
def make_foreign_order_accessors(model, related_model):
setattr(
related_model,
'get_%s_order' % model.__name__.lower(),
partialmethod(method_get_order, model)
)
setattr(
related_model,
'set_%s_order' % model.__name__.lower(),
partialmethod(method_set_order, model)
)
########
# MISC #
########
def model_unpickle(model_id):
"""Used to unpickle Model subclasses with deferred fields."""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
return model.__new__(model)
model_unpickle.__safe_for_unpickle__ = True
|
cfaf46ae42683033a779827deaaf109e226c7de4693e2e8e796e497cff291400 | """
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
import copy
import functools
import inspect
from collections import namedtuple
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple('PathInfo', 'from_opts to_opts target_fields join_field m2m direct filtered_relation')
class InvalidQuery(Exception):
"""The query passed to raw() isn't a safe query to use with raw()."""
pass
def subclasses(cls):
yield cls
for subclass in cls.__subclasses__():
yield from subclasses(subclass)
class QueryWrapper:
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
contains_aggregate = False
def __init__(self, sql, params):
self.data = sql, list(params)
def as_sql(self, compiler=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulate filters as objects that can then be combined logically (using
`&` and `|`).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
conditional = True
def __init__(self, *args, _connector=None, _negated=False, **kwargs):
super().__init__(children=[*args, *sorted(kwargs.items())], connector=_connector, negated=_negated)
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
# If the other Q() is empty, ignore it and just use `self`.
if not other:
return copy.deepcopy(self)
# Or if this Q is empty, ignore it and just use `other`.
elif not self:
return copy.deepcopy(other)
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(self, reuse, allow_joins=allow_joins, split_subq=False)
query.promote_joins(joins)
return clause
def deconstruct(self):
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
if path.startswith('django.db.models.query_utils'):
path = path.replace('django.db.models.query_utils', 'django.db.models')
args, kwargs = (), {}
if len(self.children) == 1 and not isinstance(self.children[0], Q):
child = self.children[0]
kwargs = {child[0]: child[1]}
else:
args = tuple(self.children)
if self.connector != self.default:
kwargs = {'_connector': self.connector}
if self.negated:
kwargs['_negated'] = True
return path, args, kwargs
class DeferredAttribute:
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name):
self.field_name = field_name
def __get__(self, instance, cls=None):
"""
Retrieve and caches the value from the datastore on the first lookup.
Return the cached value.
"""
if instance is None:
return self
data = instance.__dict__
if data.get(self.field_name, self) is self:
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance, self.field_name)
if val is None:
instance.refresh_from_db(fields=[self.field_name])
val = getattr(instance, self.field_name)
data[self.field_name] = val
return data[self.field_name]
def _check_parent_chain(self, instance, name):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
f = opts.get_field(name)
link_field = opts.get_ancestor_link(f.model)
if f.primary_key and f != link_field:
return getattr(instance, link_field.attname)
return None
class RegisterLookupMixin:
@classmethod
def _get_lookup(cls, lookup_name):
return cls.get_lookups().get(lookup_name, None)
@classmethod
@functools.lru_cache(maxsize=None)
def get_lookups(cls):
class_lookups = [parent.__dict__.get('class_lookups', {}) for parent in inspect.getmro(cls)]
return cls.merge_dicts(class_lookups)
def get_lookup(self, lookup_name):
from django.db.models.lookups import Lookup
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
from django.db.models.lookups import Transform
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@staticmethod
def merge_dicts(dicts):
"""
Merge dicts in reverse to preference the order of the original list. e.g.,
merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'.
"""
merged = {}
for d in reversed(dicts):
merged.update(d)
return merged
@classmethod
def _clear_cached_lookups(cls):
for subclass in subclasses(cls):
subclass.get_lookups.cache_clear()
@classmethod
def register_lookup(cls, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if 'class_lookups' not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup_name] = lookup
cls._clear_cached_lookups()
return lookup
@classmethod
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name]
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Return True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.remote_field:
return False
if field.remote_field.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.attname not in load_fields:
if restricted and field.name in requested:
raise InvalidQuery("Field %s.%s cannot be both deferred"
" and traversed using select_related"
" at the same time." %
(field.model._meta.object_name, field.name))
return True
def refs_expression(lookup_parts, annotations):
"""
Check if the lookup_parts contains references to the given annotations set.
Because the LOOKUP_SEP is contained in the default annotation names, check
each prefix of the lookup_parts for a match.
"""
for n in range(1, len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in annotations and annotations[level_n_lookup]:
return annotations[level_n_lookup], lookup_parts[n:]
return False, ()
def check_rel_lookup_compatibility(model, target_opts, field):
"""
Check that self.model is compatible with target_opts. Compatibility
is OK if:
1) model and opts match (where proxy inheritance is removed)
2) model is parent of opts' model or the other way around
"""
def check(opts):
return (
model._meta.concrete_model == opts.concrete_model or
opts.concrete_model in model._meta.get_parent_list() or
model in opts.get_parent_list()
)
# If the field is a primary key, then doing a query against the field's
# model is ok, too. Consider the case:
# class Restaurant(models.Model):
# place = OnetoOneField(Place, primary_key=True):
# Restaurant.objects.filter(pk__in=Restaurant.objects.all()).
# If we didn't have the primary key check, then pk__in (== place__in) would
# give Place's opts as the target opts, but Restaurant isn't compatible
# with that. This logic applies only to primary keys, as when doing __in=qs,
# we are going to turn this into __in=qs.values('pk') later on.
return (
check(target_opts) or
(getattr(field, 'primary_key', False) and check(field.model._meta))
)
class FilteredRelation:
"""Specify custom filtering in the ON clause of SQL joins."""
def __init__(self, relation_name, *, condition=Q()):
if not relation_name:
raise ValueError('relation_name cannot be empty.')
self.relation_name = relation_name
self.alias = None
if not isinstance(condition, Q):
raise ValueError('condition argument must be a Q() instance.')
self.condition = condition
self.path = []
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.relation_name == other.relation_name and
self.alias == other.alias and
self.condition == other.condition
)
def clone(self):
clone = FilteredRelation(self.relation_name, condition=self.condition)
clone.alias = self.alias
clone.path = self.path[:]
return clone
def resolve_expression(self, *args, **kwargs):
"""
QuerySet.annotate() only accepts expression-like arguments
(with a resolve_expression() method).
"""
raise NotImplementedError('FilteredRelation.resolve_expression() is unused.')
def as_sql(self, compiler, connection):
# Resolve the condition in Join.filtered_relation.
query = compiler.query
where = query.build_filtered_relation_q(self.condition, reuse=set(self.path))
return compiler.compile(where)
|
5ef594ffdc2bd0bcdcc5776eadc2f62baa2ad7130a7c960333fcceae17f57d8a | import copy
import datetime
import inspect
from decimal import Decimal
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import connection
from django.db.models import fields
from django.db.models.query_utils import Q
from django.utils.deconstruct import deconstructible
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class SQLiteNumericMixin:
"""
Some expressions with output_field=DecimalField() must be cast to
numeric to be properly filtered.
"""
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
class Combinable:
"""
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
BITLEFTSHIFT = '<<'
BITRIGHTSHIFT = '>>'
def _combine(self, other, connector, reversed):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __neg__(self):
return self._combine(-1, self.MUL, False)
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
@deconstructible
class BaseExpression:
"""Base class for all query expressions."""
# aggregate specific fields
is_summary = False
_output_field_resolved_to_none = False
# Can the expression be used in a WHERE clause?
filterable = True
# Can the expression can be used as a source expression in Window?
window_compatible = False
def __init__(self, output_field=None):
if output_field is not None:
self.output_field = output_field
def __getstate__(self):
state = self.__dict__.copy()
state.pop('convert_value', None)
return state
def get_db_converters(self, connection):
return (
[]
if self.convert_value is self._convert_value_noop else
[self.convert_value]
) + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert not exprs
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, str) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
return any(expr and expr.contains_aggregate for expr in self.get_source_expressions())
@cached_property
def contains_over_clause(self):
return any(expr and expr.contains_over_clause for expr in self.get_source_expressions())
@cached_property
def contains_column_references(self):
return any(expr and expr.contains_column_references for expr in self.get_source_expressions())
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
if expr else None
for expr in c.get_source_expressions()
])
return c
def _prepare(self, field):
"""Hook used by Lookup.get_prep_lookup() to do custom preparation."""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""Return the output type of this expressions."""
output_field = self._resolve_output_field()
if output_field is None:
self._output_field_resolved_to_none = True
raise FieldError('Cannot resolve expression type, unknown output_field')
return output_field
@cached_property
def _output_field_or_none(self):
"""
Return the output field of this expression, or None if
_resolve_output_field() didn't return an output type.
"""
try:
return self.output_field
except FieldError:
if not self._output_field_resolved_to_none:
raise
def _resolve_output_field(self):
"""
Attempt to infer the output type of the expression. If the output
fields of all source fields match then, simply infer the same type
here. This isn't always correct, but it makes sense most of the time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source's output field resolves to None, exclude it from this check.
If all sources are None, then an error is raised higher up the stack in
the output_field property.
"""
sources_iter = (source for source in self.get_source_fields() if source is not None)
for output_field in sources_iter:
if any(not isinstance(output_field, source.__class__) for source in sources_iter):
raise FieldError('Expression contains mixed types. You must set output_field.')
return output_field
@staticmethod
def _convert_value_noop(value, expression, connection):
return value
@cached_property
def convert_value(self):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if internal_type == 'FloatField':
return lambda value, expression, connection: None if value is None else float(value)
elif internal_type.endswith('IntegerField'):
return lambda value, expression, connection: None if value is None else int(value)
elif internal_type == 'DecimalField':
return lambda value, expression, connection: None if value is None else Decimal(value)
return self._convert_value_noop
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions([
e.relabeled_clone(change_map) if e is not None else None
for e in self.get_source_expressions()
])
return clone
def copy(self):
return copy.copy(self)
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
yield from expr.flatten()
@cached_property
def identity(self):
constructor_signature = inspect.signature(self.__init__)
args, kwargs = self._constructor_args
signature = constructor_signature.bind_partial(*args, **kwargs)
signature.apply_defaults()
arguments = signature.arguments.items()
identity = [self.__class__]
for arg, value in arguments:
if isinstance(value, fields.Field):
value = type(value)
else:
value = make_hashable(value)
identity.append((arg, value))
return tuple(identity)
def __eq__(self, other):
return isinstance(other, BaseExpression) and other.identity == self.identity
def __hash__(self):
return hash(self.identity)
class Expression(BaseExpression, Combinable):
"""An expression that can be combined with other expressions."""
pass
class CombinedExpression(SQLiteNumericMixin, Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField') or
(rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
if (lhs_output and rhs_output and self.connector == self.SUB and
lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and
lhs_output.get_internal_type() == rhs_output.get_internal_type()):
return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class TemporalSubtraction(CombinedExpression):
output_field = fields.DurationField()
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs, connection)
rhs = compiler.compile(self.rhs, connection)
return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs)
@deconstructible
class F(Combinable):
"""An object capable of resolving references to existing query objects."""
# Can the expression be used in a WHERE clause?
filterable = True
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None,
summarize=False, for_save=False, simple_col=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize, simple_col)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
def as_sql(self, *args, **kwargs):
raise ValueError(
'This queryset contains a reference to an outer query and may '
'only be used in a subquery.'
)
def _prepare(self, output_field=None):
return self
def relabeled_clone(self, relabels):
return self
class OuterRef(F):
def resolve_expression(self, query=None, allow_joins=True, reuse=None,
summarize=False, for_save=False, simple_col=False):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def _prepare(self, output_field=None):
return self
class Func(SQLiteNumericMixin, Expression):
"""An SQL function call."""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)" % (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = {**self.extra, **self._get_repr_options()}
if extra:
extra = ', '.join(str(key) + '=' + str(val) for key, val in sorted(extra.items()))
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def _get_repr_options(self):
"""Return a dict of extra __init__() options to include in the repr."""
return {}
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
data = {**self.extra, **extra_context}
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data['function'] = function
else:
data.setdefault('function', self.function)
template = template or data.get('template', self.template)
arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner)
data['expressions'] = data['field'] = arg_joiner.join(sql_parts)
return template % data, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""Represent a wrapped value as a node within an expression."""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
output_field = self._output_field_or_none
if output_field is not None:
if self.for_save:
val = output_field.get_db_prep_save(val, connection=connection)
else:
val = output_field.get_db_prep_value(val, connection=connection)
if hasattr(output_field, 'get_placeholder'):
return output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value), []
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
output_field = fields.FloatField()
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
contains_column_references = True
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class SimpleCol(Expression):
"""
Represents the SQL of a column name without the table name.
This variant of Col doesn't include the table name (or an alias) to
avoid a syntax error in check constraints.
"""
contains_column_references = True
def __init__(self, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.target = target
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return qn(self.target.column), []
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (
self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection)
)
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionList(Func):
"""
An expression containing multiple expressions. Can be used to provide a
list of expressions as an argument to another expression, like an
ordering clause.
"""
template = '%(expressions)s'
def __init__(self, *expressions, **extra):
if not expressions:
raise ValueError('%s requires at least one expression.' % self.__class__.__name__)
super().__init__(*expressions, **extra)
def __str__(self):
return self.arg_joiner.join(str(arg) for arg in self.source_expressions)
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not getattr(condition, 'conditional', False) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
if isinstance(condition, Q) and not condition:
raise ValueError("An empty Q() can't be used as a When() condition.")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, 'resolve_expression'):
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
*self.cases, self.default = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = {**self.extra, **extra_context}
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params['cases'] = case_joiner.join(case_parts)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or template_params.get('template', self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Subquery(Expression):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = '(%(subquery)s)'
contains_aggregate = False
def __init__(self, queryset, output_field=None, **extra):
self.queryset = queryset
self.extra = extra
super().__init__(output_field)
def _resolve_output_field(self):
if len(self.queryset.query.select) == 1:
return self.queryset.query.select[0].field
return super()._resolve_output_field()
def copy(self):
clone = super().copy()
clone.queryset = clone.queryset.all()
return clone
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
clone = self.copy()
clone.is_summary = summarize
clone.queryset.query.bump_prefix(query)
# Need to recursively resolve these.
def resolve_all(child):
if hasattr(child, 'children'):
[resolve_all(_child) for _child in child.children]
if hasattr(child, 'rhs'):
child.rhs = resolve(child.rhs)
def resolve(child):
if hasattr(child, 'resolve_expression'):
resolved = child.resolve_expression(
query=query, allow_joins=allow_joins, reuse=reuse,
summarize=summarize, for_save=for_save,
)
# Add table alias to the parent query's aliases to prevent
# quoting.
if hasattr(resolved, 'alias') and resolved.alias != resolved.target.model._meta.db_table:
clone.queryset.query.external_aliases.add(resolved.alias)
return resolved
return child
resolve_all(clone.queryset.query.where)
for key, value in clone.queryset.query.annotations.items():
if isinstance(value, Subquery):
clone.queryset.query.annotations[key] = resolve(value)
return clone
def get_source_expressions(self):
return [
x for x in [
getattr(expr, 'lhs', None)
for expr in self.queryset.query.where.children
] if x
]
def relabeled_clone(self, change_map):
clone = self.copy()
clone.queryset.query = clone.queryset.query.relabeled_clone(change_map)
clone.queryset.query.external_aliases.update(
alias for alias in change_map.values()
if alias not in clone.queryset.query.alias_map
)
return clone
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
template_params['subquery'], sql_params = self.queryset.query.get_compiler(connection=connection).as_sql()
template = template or template_params.get('template', self.template)
sql = template % template_params
return sql, sql_params
def _prepare(self, output_field):
# This method will only be called if this instance is the "rhs" in an
# expression: the wrapping () must be removed (as the expression that
# contains this will provide them). SQLite evaluates ((subquery))
# differently than the other databases.
if self.template == '(%(subquery)s)':
clone = self.copy()
clone.template = '%(subquery)s'
return clone
return self
class Exists(Subquery):
template = 'EXISTS(%(subquery)s)'
output_field = fields.BooleanField()
def __init__(self, *args, negated=False, **kwargs):
self.negated = negated
super().__init__(*args, **kwargs)
def __invert__(self):
return type(self)(self.queryset, negated=(not self.negated), **self.extra)
def resolve_expression(self, query=None, *args, **kwargs):
# As a performance optimization, remove ordering since EXISTS doesn't
# care about it, just whether or not a row matches.
self.queryset = self.queryset.order_by()
return super().resolve_expression(query, *args, **kwargs)
def as_sql(self, compiler, connection, template=None, **extra_context):
sql, params = super().as_sql(compiler, connection, template, **extra_context)
if self.negated:
sql = 'NOT {}'.format(sql)
return sql, params
def as_oracle(self, compiler, connection, template=None, **extra_context):
# Oracle doesn't allow EXISTS() in the SELECT list, so wrap it with a
# CASE WHEN expression. Change the template since the When expression
# requires a left hand side (column) to compare against.
sql, params = self.as_sql(compiler, connection, template, **extra_context)
sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql)
return sql, params
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False):
if nulls_first and nulls_last:
raise ValueError('nulls_first and nulls_last are mutually exclusive')
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
if not template:
if self.nulls_last:
template = '%s NULLS LAST' % self.template
elif self.nulls_first:
template = '%s NULLS FIRST' % self.template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
'expression': expression_sql,
'ordering': 'DESC' if self.descending else 'ASC',
**extra_context,
}
template = template or self.template
params *= template.count('%(expression)s')
return (template % placeholders).rstrip(), params
def as_sqlite(self, compiler, connection):
template = None
if self.nulls_last:
template = '%(expression)s IS NULL, %(expression)s %(ordering)s'
elif self.nulls_first:
template = '%(expression)s IS NOT NULL, %(expression)s %(ordering)s'
return self.as_sql(compiler, connection, template=template)
def as_mysql(self, compiler, connection):
template = None
if self.nulls_last:
template = 'IF(ISNULL(%(expression)s),1,0), %(expression)s %(ordering)s '
elif self.nulls_first:
template = 'IF(ISNULL(%(expression)s),0,1), %(expression)s %(ordering)s '
return self.as_sql(compiler, connection, template=template)
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
if self.nulls_first or self.nulls_last:
self.nulls_first = not self.nulls_first
self.nulls_last = not self.nulls_last
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
class Window(Expression):
template = '%(expression)s OVER (%(window)s)'
# Although the main expression may either be an aggregate or an
# expression with an aggregate function, the GROUP BY that will
# be introduced in the query as a result is not desired.
contains_aggregate = False
contains_over_clause = True
filterable = False
def __init__(self, expression, partition_by=None, order_by=None, frame=None, output_field=None):
self.partition_by = partition_by
self.order_by = order_by
self.frame = frame
if not getattr(expression, 'window_compatible', False):
raise ValueError(
"Expression '%s' isn't compatible with OVER clauses." %
expression.__class__.__name__
)
if self.partition_by is not None:
if not isinstance(self.partition_by, (tuple, list)):
self.partition_by = (self.partition_by,)
self.partition_by = ExpressionList(*self.partition_by)
if self.order_by is not None:
if isinstance(self.order_by, (list, tuple)):
self.order_by = ExpressionList(*self.order_by)
elif not isinstance(self.order_by, BaseExpression):
raise ValueError(
'order_by must be either an Expression or a sequence of '
'expressions.'
)
super().__init__(output_field=output_field)
self.source_expression = self._parse_expressions(expression)[0]
def _resolve_output_field(self):
return self.source_expression.output_field
def get_source_expressions(self):
return [self.source_expression, self.partition_by, self.order_by, self.frame]
def set_source_expressions(self, exprs):
self.source_expression, self.partition_by, self.order_by, self.frame = exprs
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
expr_sql, params = compiler.compile(self.source_expression)
window_sql, window_params = [], []
if self.partition_by is not None:
sql_expr, sql_params = self.partition_by.as_sql(
compiler=compiler, connection=connection,
template='PARTITION BY %(expressions)s',
)
window_sql.extend(sql_expr)
window_params.extend(sql_params)
if self.order_by is not None:
window_sql.append(' ORDER BY ')
order_sql, order_params = compiler.compile(self.order_by)
window_sql.extend(''.join(order_sql))
window_params.extend(order_params)
if self.frame:
frame_sql, frame_params = compiler.compile(self.frame)
window_sql.extend(' ' + frame_sql)
window_params.extend(frame_params)
params.extend(window_params)
template = template or self.template
return template % {
'expression': expr_sql,
'window': ''.join(window_sql).strip()
}, params
def __str__(self):
return '{} OVER ({}{}{})'.format(
str(self.source_expression),
'PARTITION BY ' + str(self.partition_by) if self.partition_by else '',
'ORDER BY ' + str(self.order_by) if self.order_by else '',
str(self.frame or ''),
)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self)
def get_group_by_cols(self):
return []
class WindowFrame(Expression):
"""
Model the frame clause in window expressions. There are two types of frame
clauses which are subclasses, however, all processing and validation (by no
means intended to be complete) is done here. Thus, providing an end for a
frame is optional (the default is UNBOUNDED FOLLOWING, which is the last
row in the frame).
"""
template = '%(frame_type)s BETWEEN %(start)s AND %(end)s'
def __init__(self, start=None, end=None):
self.start = Value(start)
self.end = Value(end)
def set_source_expressions(self, exprs):
self.start, self.end = exprs
def get_source_expressions(self):
return [self.start, self.end]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
start, end = self.window_frame_start_end(connection, self.start.value, self.end.value)
return self.template % {
'frame_type': self.frame_type,
'start': start,
'end': end,
}, []
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self)
def get_group_by_cols(self):
return []
def __str__(self):
if self.start.value is not None and self.start.value < 0:
start = '%d %s' % (abs(self.start.value), connection.ops.PRECEDING)
elif self.start.value is not None and self.start.value == 0:
start = connection.ops.CURRENT_ROW
else:
start = connection.ops.UNBOUNDED_PRECEDING
if self.end.value is not None and self.end.value > 0:
end = '%d %s' % (self.end.value, connection.ops.FOLLOWING)
elif self.end.value is not None and self.end.value == 0:
end = connection.ops.CURRENT_ROW
else:
end = connection.ops.UNBOUNDED_FOLLOWING
return self.template % {
'frame_type': self.frame_type,
'start': start,
'end': end,
}
def window_frame_start_end(self, connection, start, end):
raise NotImplementedError('Subclasses must implement window_frame_start_end().')
class RowRange(WindowFrame):
frame_type = 'ROWS'
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_rows_start_end(start, end)
class ValueRange(WindowFrame):
frame_type = 'RANGE'
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_range_start_end(start, end)
|
88a016db6e03fcd64b4dd6cb087c2e78c3f0f4dc17969680f7caf3f31fac350c | from collections import Counter, OrderedDict
from operator import attrgetter
from django.db import IntegrityError, connections, transaction
from django.db.models import signals, sql
class ProtectedError(IntegrityError):
def __init__(self, msg, protected_objects):
self.protected_objects = protected_objects
super().__init__(msg, protected_objects)
def CASCADE(collector, field, sub_objs, using):
collector.collect(sub_objs, source=field.remote_field.model,
source_attr=field.name, nullable=field.null)
if field.null and not connections[using].features.can_defer_constraint_checks:
collector.add_field_update(field, None, sub_objs)
def PROTECT(collector, field, sub_objs, using):
raise ProtectedError(
"Cannot delete some instances of model '%s' because they are "
"referenced through a protected foreign key: '%s.%s'" % (
field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name
),
sub_objs
)
def SET(value):
if callable(value):
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value(), sub_objs)
else:
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value, sub_objs)
set_on_delete.deconstruct = lambda: ('django.db.models.SET', (value,), {})
return set_on_delete
def SET_NULL(collector, field, sub_objs, using):
collector.add_field_update(field, None, sub_objs)
def SET_DEFAULT(collector, field, sub_objs, using):
collector.add_field_update(field, field.get_default(), sub_objs)
def DO_NOTHING(collector, field, sub_objs, using):
pass
def get_candidate_relations_to_delete(opts):
# The candidate relations are the ones that come from N-1 and 1-1 relations.
# N-N (i.e., many-to-many) relations aren't candidates for deletion.
return (
f for f in opts.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many)
)
class Collector:
def __init__(self, using):
self.using = using
# Initially, {model: {instances}}, later values become lists.
self.data = OrderedDict()
self.field_updates = {} # {model: {(field, value): {instances}}}
# fast_deletes is a list of queryset-likes that can be deleted without
# fetching the objects into memory.
self.fast_deletes = []
# Tracks deletion-order dependency for databases without transactions
# or ability to defer constraint checks. Only concrete model classes
# should be included, as the dependencies exist only between actual
# database tables; proxy models are represented here by their concrete
# parent.
self.dependencies = {} # {model: {models}}
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
"""
Add 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it,
and 'nullable' should be set to True if the relation can be null.
Return a list of all objects that were not already collected.
"""
if not objs:
return []
new_objs = []
model = objs[0].__class__
instances = self.data.setdefault(model, set())
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.update(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if source is not None and not nullable:
if reverse_dependency:
source, model = model, source
self.dependencies.setdefault(
source._meta.concrete_model, set()).add(model._meta.concrete_model)
return new_objs
def add_field_update(self, field, value, objs):
"""
Schedule a field update. 'objs' must be a homogeneous iterable
collection of model instances (e.g. a QuerySet).
"""
if not objs:
return
model = objs[0].__class__
self.field_updates.setdefault(
model, {}).setdefault(
(field, value), set()).update(objs)
def can_fast_delete(self, objs, from_field=None):
"""
Determine if the objects in the given queryset-like or single object
can be fast-deleted. This can be done if there are no cascades, no
parents and no signal listeners for the object class.
The 'from_field' tells where we are coming from - we need this to
determine if the objects are in fact to be deleted. Allow also
skipping parent -> child -> parent chain preventing fast delete of
the child.
"""
if from_field and from_field.remote_field.on_delete is not CASCADE:
return False
if hasattr(objs, '_meta'):
model = type(objs)
elif hasattr(objs, 'model') and hasattr(objs, '_raw_delete'):
model = objs.model
else:
return False
if (signals.pre_delete.has_listeners(model) or
signals.post_delete.has_listeners(model) or
signals.m2m_changed.has_listeners(model)):
return False
# The use of from_field comes from the need to avoid cascade back to
# parent when parent delete is cascading to child.
opts = model._meta
return (
all(link == from_field for link in opts.concrete_model._meta.parents.values()) and
# Foreign keys pointing to this model.
all(
related.field.remote_field.on_delete is DO_NOTHING
for related in get_candidate_relations_to_delete(opts)
) and (
# Something like generic foreign key.
not any(hasattr(field, 'bulk_related_objects') for field in opts.private_fields)
)
)
def get_del_batches(self, objs, field):
"""
Return the objs in suitably sized batches for the used connection.
"""
conn_batch_size = max(
connections[self.using].ops.bulk_batch_size([field.name], objs), 1)
if len(objs) > conn_batch_size:
return [objs[i:i + conn_batch_size]
for i in range(0, len(objs), conn_batch_size)]
else:
return [objs]
def collect(self, objs, source=None, nullable=False, collect_related=True,
source_attr=None, reverse_dependency=False, keep_parents=False):
"""
Add 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogeneous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
If the call is the result of a cascade, 'source' should be the model
that caused it and 'nullable' should be set to True, if the relation
can be null.
If 'reverse_dependency' is True, 'source' will be deleted before the
current model, rather than after. (Needed for cascading to parent
models, the one case in which the cascade follows the forwards
direction of an FK rather than the reverse direction.)
If 'keep_parents' is True, data of parent model's will be not deleted.
"""
if self.can_fast_delete(objs):
self.fast_deletes.append(objs)
return
new_objs = self.add(objs, source, nullable,
reverse_dependency=reverse_dependency)
if not new_objs:
return
model = new_objs[0].__class__
if not keep_parents:
# Recursively collect concrete model's parent models, but not their
# related objects. These will be found by meta.get_fields()
concrete_model = model._meta.concrete_model
for ptr in concrete_model._meta.parents.values():
if ptr:
parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
self.collect(parent_objs, source=model,
source_attr=ptr.remote_field.related_name,
collect_related=False,
reverse_dependency=True)
if collect_related:
parents = model._meta.parents
for related in get_candidate_relations_to_delete(model._meta):
# Preserve parent reverse relationships if keep_parents=True.
if keep_parents and related.model in parents:
continue
field = related.field
if field.remote_field.on_delete == DO_NOTHING:
continue
batches = self.get_del_batches(new_objs, field)
for batch in batches:
sub_objs = self.related_objects(related, batch)
if self.can_fast_delete(sub_objs, from_field=field):
self.fast_deletes.append(sub_objs)
elif sub_objs:
field.remote_field.on_delete(self, field, sub_objs, self.using)
for field in model._meta.private_fields:
if hasattr(field, 'bulk_related_objects'):
# It's something like generic foreign key.
sub_objs = field.bulk_related_objects(new_objs, self.using)
self.collect(sub_objs, source=model, nullable=True)
def related_objects(self, related, objs):
"""
Get a QuerySet of objects related to `objs` via the relation `related`.
"""
return related.related_model._base_manager.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
def instances_with_model(self):
for model, instances in self.data.items():
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
concrete_models = set()
models = list(self.data)
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model._meta.concrete_model)
if not (dependencies and dependencies.difference(concrete_models)):
sorted_models.append(model)
concrete_models.add(model._meta.concrete_model)
found = True
if not found:
return
self.data = OrderedDict((model, self.data[model])
for model in sorted_models)
def delete(self):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until the
# end of a transaction.
self.sort()
# number of objects deleted for each model label
deleted_counter = Counter()
# Optimize for the case with a single obj and no dependencies
if len(self.data) == 1 and len(instances) == 1:
instance = list(instances)[0]
if self.can_fast_delete(instance):
with transaction.mark_for_rollback_on_error():
count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using)
return count, {model._meta.label: count}
with transaction.atomic(using=self.using, savepoint=False):
# send pre_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
# fast deletes
for qs in self.fast_deletes:
count = qs._raw_delete(using=self.using)
deleted_counter[qs.model._meta.label] += count
# update fields
for model, instances_for_fieldvalues in self.field_updates.items():
for (field, value), instances in instances_for_fieldvalues.items():
query = sql.UpdateQuery(model)
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in self.data.values():
instances.reverse()
# delete instances
for model, instances in self.data.items():
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
count = query.delete_batch(pk_list, self.using)
deleted_counter[model._meta.label] += count
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances
for instances_for_fieldvalues in self.field_updates.values():
for (field, value), instances in instances_for_fieldvalues.items():
for obj in instances:
setattr(obj, field.attname, value)
for model, instances in self.data.items():
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
return sum(deleted_counter.values()), dict(deleted_counter)
|
9d3697c2a26d49f4a335e64b6280e8184d0a6914013074596080d95db6df5c58 | import datetime
import decimal
import functools
import hashlib
import logging
from time import time
from django.conf import settings
from django.db.utils import NotSupportedError
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper:
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
yield from self.cursor
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Close instead of passing through to avoid backend-specific behavior
# (#17671). Catch errors liberally because errors in cleanup code
# aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None, kparams=None):
# Keyword parameters for callproc aren't supported in PEP 249, but the
# database driver may support them (e.g. cx_Oracle).
if kparams is not None and not self.db.features.supports_callproc_kwargs:
raise NotSupportedError(
'Keyword parameters for callproc are not supported on this '
'database backend.'
)
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None and kparams is None:
return self.cursor.callproc(procname)
elif kparams is None:
return self.cursor.callproc(procname, params)
else:
params = params or ()
return self.cursor.callproc(procname, params, kparams)
def execute(self, sql, params=None):
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
def executemany(self, sql, param_list):
return self._execute_with_wrappers(sql, param_list, many=True, executor=self._executemany)
def _execute_with_wrappers(self, sql, params, many, executor):
context = {'connection': self.db, 'cursor': self}
for wrapper in reversed(self.db.execute_wrappers):
executor = functools.partial(wrapper, executor)
return executor(sql, params, many, context)
def _execute(self, sql, params, *ignored_wrapper_args):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def _executemany(self, sql, param_list, *ignored_wrapper_args):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super().execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries_log.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, params,
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super().executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries_log.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, param_list,
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # return None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int((microseconds + '000000')[:6]))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Remove timezone information.
if '-' in t:
t, _ = t.split('-', 1)
elif '+' in t:
t, _ = t.split('+', 1)
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(
int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo
)
###############################################
# Converters from Python to database (string) #
###############################################
def split_identifier(identifier):
"""
Split a SQL identifier into a two element tuple of (namespace, name).
The identifier could be a table, column, or sequence name might be prefixed
by a namespace.
"""
try:
namespace, name = identifier.split('"."')
except ValueError:
namespace, name = '', identifier
return namespace.strip('"'), name.strip('"')
def truncate_name(identifier, length=None, hash_len=4):
"""
Shorten a SQL identifier to a repeatable mangled version with the given
length.
If a quote stripped name contains a namespace, e.g. USERNAME"."TABLE,
truncate the table portion only.
"""
namespace, name = split_identifier(identifier)
if length is None or len(name) <= length:
return identifier
digest = names_digest(name, length=hash_len)
return '%s%s%s' % ('%s"."' % namespace if namespace else '', name[:length - hash_len], digest)
def names_digest(*args, length):
"""
Generate a 32-bit digest of a set of arguments that can be used to shorten
identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(arg.encode())
return h.hexdigest()[:length]
def format_number(value, max_digits, decimal_places):
"""
Format a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(decimal.Decimal(1).scaleb(-decimal_places), context=context)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
def strip_quotes(table_name):
"""
Strip quotes off of quoted table names to make them safe for use in index
names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming
scheme) becomes 'USER"."TABLE'.
"""
has_quotes = table_name.startswith('"') and table_name.endswith('"')
return table_name[1:-1] if has_quotes else table_name
|
13449dc23401251971fc8c9d038ec8868eebdf57ab2eeb08d67d75107c568e7c | from collections import namedtuple
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
def is_referenced_by_foreign_key(state, model_name_lower, field, field_name):
for state_app_label, state_model in state.models:
for _, f in state.models[state_app_label, state_model].fields:
if (f.related_model and
'%s.%s' % (state_app_label, model_name_lower) == f.related_model.lower() and
hasattr(f, 'to_fields')):
if (f.to_fields[0] is None and field.primary_key) or field_name in f.to_fields:
return True
return False
class ModelTuple(namedtuple('ModelTupleBase', ('app_label', 'model_name'))):
@classmethod
def from_model(cls, model, app_label=None, model_name=None):
"""
Take a model class or a 'app_label.ModelName' string and return a
ModelTuple('app_label', 'modelname'). The optional app_label and
model_name arguments are the defaults if "self" or "ModelName" are
passed.
"""
if isinstance(model, str):
if model == RECURSIVE_RELATIONSHIP_CONSTANT:
return cls(app_label, model_name)
if '.' in model:
return cls(*model.lower().split('.', 1))
return cls(app_label, model.lower())
return cls(model._meta.app_label, model._meta.model_name)
def __eq__(self, other):
if isinstance(other, ModelTuple):
# Consider ModelTuple equal if their model_name is equal and either
# one of them is missing an app_label.
return self.model_name == other.model_name and (
self.app_label is None or other.app_label is None or self.app_label == other.app_label
)
return super().__eq__(other)
def field_references_model(field, model_tuple):
"""Return whether or not field references model_tuple."""
remote_field = field.remote_field
if remote_field:
if ModelTuple.from_model(remote_field.model) == model_tuple:
return True
through = getattr(remote_field, 'through', None)
if through and ModelTuple.from_model(through) == model_tuple:
return True
return False
|
2fe13148e47b852a41f3f92e65affb0db17eed7a00ffc7d50a84d35e151d8532 | import collections.abc
import copy
import datetime
import decimal
import operator
import uuid
import warnings
from base64 import b64decode, b64encode
from functools import partialmethod, total_ordering
from django import forms
from django.apps import apps
from django.conf import settings
from django.core import checks, exceptions, validators
# When the _meta object was formalized, this exception was moved to
# django.core.exceptions. It is retained here for backwards compatibility
# purposes.
from django.core.exceptions import FieldDoesNotExist # NOQA
from django.db import connection, connections, router
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin
from django.utils import timezone
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.duration import duration_microseconds, duration_string
from django.utils.functional import Promise, cached_property
from django.utils.ipv6 import clean_ipv6_address
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
__all__ = [
'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField',
'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField',
'DateField', 'DateTimeField', 'DecimalField', 'DurationField',
'EmailField', 'Empty', 'Field', 'FieldDoesNotExist', 'FilePathField',
'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField',
'NOT_PROVIDED', 'NullBooleanField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'SlugField', 'SmallIntegerField', 'TextField',
'TimeField', 'URLField', 'UUIDField',
]
class Empty:
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
def return_None():
return None
@total_ordering
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=(),
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.remote_field = rel
self.is_relation = self.remote_field is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
if isinstance(choices, collections.abc.Iterator):
choices = list(choices)
self.choices = choices or []
self.help_text = help_text
self.db_index = db_index
self.db_column = db_column
self._db_tablespace = db_tablespace
self.auto_created = auto_created
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = list(validators) # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
"""
Return "app_label.model_label.field_name" for fields attached to
models.
"""
if not hasattr(self, 'model'):
return super().__str__()
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""Display the module, class, and name of the field."""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
return [
*self._check_field_name(),
*self._check_choices(),
*self._check_db_index(),
*self._check_null_allowed_for_primary_keys(),
*self._check_backend_specific_checks(**kwargs),
*self._check_validators(),
*self._check_deprecation_details(),
]
def _check_field_name(self):
"""
Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk".
"""
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
obj=self,
id='fields.E001',
)
]
elif LOOKUP_SEP in self.name:
return [
checks.Error(
'Field names must not contain "%s".' % (LOOKUP_SEP,),
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=self,
id='fields.E003',
)
]
else:
return []
def _check_choices(self):
if not self.choices:
return []
def is_value(value, accept_promise=True):
return isinstance(value, (str, Promise) if accept_promise else str) or not is_iterable(value)
if is_value(self.choices, accept_promise=False):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
obj=self,
id='fields.E004',
)
]
# Expect [group_name, [value, display]]
for choices_group in self.choices:
try:
group_name, group_choices = choices_group
except (TypeError, ValueError):
# Containing non-pairs
break
try:
if not all(
is_value(value) and is_value(human_name)
for value, human_name in group_choices
):
break
except (TypeError, ValueError):
# No groups, choices in the form [value, display]
value, human_name = group_name, group_choices
if not is_value(value) or not is_value(human_name):
break
# Special case: choices=['ab']
if isinstance(choices_group, str):
break
else:
return []
return [
checks.Error(
"'choices' must be an iterable containing "
"(actual value, human readable name) tuples.",
obj=self,
id='fields.E005',
)
]
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
app_label = self.model._meta.app_label
for db in connections:
if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name):
return connections[db].validation.check_field(self, **kwargs)
return []
def _check_validators(self):
errors = []
for i, validator in enumerate(self.validators):
if not callable(validator):
errors.append(
checks.Error(
"All 'validators' must be callable.",
hint=(
"validators[{i}] ({repr}) isn't a function or "
"instance of a validator class.".format(
i=i, repr=repr(validator),
)
),
obj=self,
id='fields.E008',
)
)
return errors
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be
used by Django.
"""
return sql, params
def deconstruct(self):
"""
Return enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class() has
been run.
* The import path of the field, including the class:e.g.
django.db.models.IntegerField This should be the most portable
version, so less specific may be better.
* A list of positional arguments.
* A dict of keyword arguments.
Note that the positional or keyword arguments must contain values of
the following types (including inner values of collection types):
* None, bool, str, int, float, complex, set, frozenset, list, tuple,
dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their
full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this
time, just ensure that the resulting field is the same - prefer keyword
arguments over positional ones, and omit parameters with their default
values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": None,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
"db_tablespace": "_db_tablespace",
}
equals_comparison = {"choices", "validators"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.abc.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (self.name, path, [], keywords)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.remote_field:
obj.remote_field = copy.copy(self.remote_field)
if hasattr(self.remote_field, 'field') and self.remote_field.field is self:
obj.remote_field.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
state = self.__dict__.copy()
# The _get_default cached_property can't be pickled due to lambda
# usage.
state.pop('_get_default', None)
return _empty, (self.__class__,), state
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Convert the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Return the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
"""
Some validators can't be created at field initialization time.
This method provides a way to delay their creation until required.
"""
return [*self.default_validators, *self._validators]
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validate value and raise ValidationError if necessary. Subclasses
should override this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self.choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python() and validate() are propagated. Return the correct
value if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type_parameters(self, connection):
return DictWrapper(self.__dict__, connection.ops.quote_name, 'qn_')
def db_check(self, connection):
"""
Return the database column check constraint for this field, for the
provided connection. Works the same way as db_type() for the case that
get_internal_type() does not map to a preexisting model field.
"""
data = self.db_type_parameters(connection)
try:
return connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
return None
def db_type(self, connection):
"""
Return the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = self.db_type_parameters(connection)
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. For example, this method is called by ForeignKey and OneToOneField
to determine its data type.
"""
return self.db_type(connection)
def cast_db_type(self, connection):
"""Return the data type to use in the Cast() function."""
db_type = connection.ops.cast_data_types.get(self.get_internal_type())
if db_type:
return db_type % self.db_type_parameters(connection)
return self.db_type(connection)
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return values
(type, checks). This will look at db_type(), allowing custom model
fields to override it.
"""
type_string = self.db_type(connection)
check_string = self.db_check(connection)
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
@property
def db_tablespace(self):
return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
def set_attributes_from_name(self, name):
self.name = self.name or name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, private_only=False):
"""
Register the field with the model class it belongs to.
If private_only is True, create a separate instance of this field
for every subclass of cls, even if cls is not an abstract model.
"""
self.set_attributes_from_name(name)
self.model = cls
if private_only:
cls._meta.add_field(self, private=True)
else:
cls._meta.add_field(self)
if self.column:
# Don't override classmethods with the descriptor. This means that
# if you have a classmethod and a field with the same name, then
# such fields can't be deferred (we don't have a check for this).
if not getattr(cls, self.attname, None):
setattr(cls, self.attname, DeferredAttribute(self.attname))
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
partialmethod(cls._get_FIELD_display, field=self))
def get_filter_kwargs_for_object(self, obj):
"""
Return a dict that when passed as kwargs to self.model.filter(), would
yield all instances having the same value for this field as obj has.
"""
return {self.name: getattr(obj, self.attname)}
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""Return field's value just before saving."""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""Perform preliminary non-db specific value checks and conversions."""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""
Return field's value prepared for interacting with the database backend.
Used by the default implementations of get_db_prep_save().
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""Return field's value prepared for saving into a database."""
return self.get_db_prep_value(value, connection=connection, prepared=False)
def has_default(self):
"""Return a boolean of whether this field has a default value."""
return self.default is not NOT_PROVIDED
def get_default(self):
"""Return the default value for this field."""
return self._get_default()
@cached_property
def _get_default(self):
if self.has_default():
if callable(self.default):
return self.default
return lambda: self.default
if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls:
return return_None
return str # return empty string
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=()):
"""
Return choices with a default blank choices included, for use
as <select> choices for this field.
"""
if self.choices:
choices = list(self.choices)
if include_blank:
blank_defined = any(choice in ('', None) for choice, _ in self.flatchoices)
if not blank_defined:
choices = blank_choice + choices
return choices
rel_model = self.remote_field.model
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
choice_func = operator.attrgetter(
self.remote_field.get_related_field().attname
if hasattr(self.remote_field, 'get_related_field')
else 'pk'
)
return (blank_choice if include_blank else []) + [
(choice_func(x), str(x))
for x in rel_model._default_manager.complex_filter(limit_choices_to).order_by(*ordering)
]
def value_to_string(self, obj):
"""
Return a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return str(self.value_from_object(obj))
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""Return a django.forms.Field instance for this field."""
defaults = {
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text,
}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial', 'disabled'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""Return the value of this field in the given model instance."""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super().__init__(*args, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_primary_key(),
]
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def rel_db_type(self, connection):
return IntegerField().db_type(connection=connection)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
from django.db.models.expressions import OuterRef
value = super().get_prep_value(value)
if value is None or isinstance(value, OuterRef):
return value
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.auto_field, "Model %s can't have more than one AutoField." % cls._meta.label
super().contribute_to_class(cls, name, **kwargs)
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BigAutoField(AutoField):
description = _("Big (8 byte) integer")
def get_internal_type(self):
return "BigAutoField"
def rel_db_type(self, connection):
return BigIntegerField().db_type(connection=connection)
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
'invalid_nullable': _("'%(value)s' value must be either True, False, or None."),
}
description = _("Boolean (Either True or False)")
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if self.null and value in self.empty_values:
return None
if value in (True, False):
# 1/0 are equal to True/False. bool() converts former to latter.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid_nullable' if self.null else 'invalid'],
code='invalid',
params={'value': value},
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
form_class = forms.NullBooleanField if self.null else forms.BooleanField
# In HTML checkboxes, 'required' means "must be checked" which is
# different from the choices case ("must select some value").
# required=False allows unchecked checkboxes.
defaults = {'form_class': form_class, 'required': False}
return super().formfield(**{**defaults, **kwargs})
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_max_length_attribute(**kwargs),
]
def _check_max_length_attribute(self, **kwargs):
if self.max_length is None:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
obj=self,
id='fields.E120',
)
]
elif (not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or
self.max_length <= 0):
return [
checks.Error(
"'max_length' must be a positive integer.",
obj=self,
id='fields.E121',
)
]
else:
return []
def cast_db_type(self, connection):
if self.max_length is None:
return connection.ops.cast_char_field_without_max_length
return super().cast_db_type(connection)
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, str) or value is None:
return value
return str(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
# TODO: Handle multiple backends with different feature flags.
if self.null and not connection.features.interprets_empty_strings_as_nulls:
defaults['empty_value'] = None
defaults.update(kwargs)
return super().formfield(**defaults)
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
system_check_removed_details = {
'msg': (
'CommaSeparatedIntegerField is removed except for support in '
'historical migrations.'
),
'hint': (
'Use CharField(validators=[validate_comma_separated_integer_list]) '
'instead.'
),
'id': 'fields.E901',
}
class DateTimeCheckMixin:
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_mutually_exclusive_options(),
*self._check_fix_default_value(),
]
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()]
enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super().__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(
cls, 'get_next_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=True)
)
setattr(
cls, 'get_previous_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=False)
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DateField,
**kwargs,
})
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
def get_prep_value(self, value):
value = super().get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datetimefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DateTimeField,
**kwargs,
})
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
digits_errors = [
*self._check_decimal_places(),
*self._check_max_digits(),
]
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=self,
id='fields.E134',
)
]
return []
@cached_property
def validators(self):
return super().validators + [
validators.DecimalValidator(self.max_digits, self.decimal_places)
]
@cached_property
def context(self):
return decimal.Context(prec=self.max_digits)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, float):
return self.context.create_decimal_from_float(value)
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_save(self, value, connection):
return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
return super().formfield(**{
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
**kwargs,
})
class DurationField(Field):
"""
Store timedelta objects.
Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint
of microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"[DD] [HH:[MM:]]ss[.uuuuuu] format.")
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
return duration_microseconds(value)
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super().get_db_converters(connection)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else duration_string(val)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DurationField,
**kwargs,
})
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs.setdefault('max_length', 254)
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
return super().formfield(**{
'form_class': forms.EmailField,
**kwargs,
})
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs.setdefault('max_length', 100)
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_allowing_files_or_folders(**kwargs),
]
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return str(value)
def formfield(self, **kwargs):
return super().formfield(**{
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
**kwargs,
})
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.FloatField,
**kwargs,
})
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_max_length_warning(),
]
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with %s." % self.__class__.__name__,
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
validators_ = super().validators
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if (min_value is not None and not
any(isinstance(validator, validators.MinValueValidator) and
validator.limit_value >= min_value for validator in validators_)):
validators_.append(validators.MinValueValidator(min_value))
if (max_value is not None and not
any(isinstance(validator, validators.MaxValueValidator) and
validator.limit_value <= max_value for validator in validators_)):
validators_.append(validators.MaxValueValidator(max_value))
return validators_
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return int(value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.IntegerField,
**kwargs,
})
class BigIntegerField(IntegerField):
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT,
**kwargs,
})
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_removed_details = {
'msg': (
'IPAddressField has been removed except for support in '
'historical migrations.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.E900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return str(value)
def get_internal_type(self):
return "IPAddressField"
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super().__init__(verbose_name, name, *args, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_blank_and_null_values(**kwargs),
]
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
'GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.',
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length") == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value is None:
return None
if not isinstance(value, str):
value = str(value)
value = value.strip()
if ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return str(value)
def formfield(self, **kwargs):
return super().formfield(**{
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
**kwargs,
})
class NullBooleanField(BooleanField):
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
'invalid_nullable': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
class PositiveIntegerRelDbTypeMixin:
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. In most cases, a foreign key pointing to a positive integer
primary key will have an integer column data type but some databases
(e.g. MySQL) have an unsigned integer type. In that case
(related_fields_match_type=True), the primary key should return its
db_type.
"""
if connection.features.related_fields_match_type:
return self.db_type(connection)
else:
return IntegerField().db_type(connection=connection)
class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': 0,
**kwargs,
})
class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': 0,
**kwargs,
})
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs):
self.allow_unicode = allow_unicode
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
if self.allow_unicode is not False:
kwargs['allow_unicode'] = self.allow_unicode
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.SlugField,
'allow_unicode': self.allow_unicode,
**kwargs,
})
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if isinstance(value, str) or value is None:
return value
return str(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
return super().formfield(**{
'max_length': self.max_length,
**({} if self.choices else {'widget': forms.Textarea}),
**kwargs,
})
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super().__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_timefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.TimeField,
**kwargs,
})
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs.setdefault('max_length', 200)
super().__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
return super().formfield(**{
'form_class': forms.URLField,
**kwargs,
})
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs.setdefault('editable', False)
super().__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.editable:
kwargs['editable'] = True
else:
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_placeholder(self, value, compiler, connection):
return connection.ops.binary_placeholder_sql(value)
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super().get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super().get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(self.value_from_object(obj)).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, str):
return memoryview(b64decode(value.encode('ascii')))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _("'%(value)s' is not a valid UUID."),
}
description = _('Universally unique identifier')
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs['max_length'] = 32
super().__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
if not isinstance(value, uuid.UUID):
value = self.to_python(value)
if connection.features.has_native_uuid_field:
return value
return value.hex
def to_python(self, value):
if value is not None and not isinstance(value, uuid.UUID):
input_form = 'int' if isinstance(value, int) else 'hex'
try:
return uuid.UUID(**{input_form: value})
except (AttributeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.UUIDField,
**kwargs,
})
|
b673e9f51e92a741cd736f8e890af511607bec30e2ca130f520f9abd2c4cea0e | import functools
import inspect
from functools import partial
from django import forms
from django.apps import apps
from django.conf import SettingsReference
from django.core import checks, exceptions
from django.db import connection, router
from django.db.backends import utils
from django.db.models import Q
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL
from django.db.models.query_utils import PathInfo
from django.db.models.utils import make_model_tuple
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from . import Field
from .mixins import FieldCacheMixin
from .related_descriptors import (
ForwardManyToOneDescriptor, ForwardOneToOneDescriptor,
ManyToManyDescriptor, ReverseManyToOneDescriptor,
ReverseOneToOneDescriptor,
)
from .related_lookups import (
RelatedExact, RelatedGreaterThan, RelatedGreaterThanOrEqual, RelatedIn,
RelatedIsNull, RelatedLessThan, RelatedLessThanOrEqual,
)
from .reverse_related import (
ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel,
)
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def resolve_relation(scope_model, relation):
"""
Transform relation into a model or fully-qualified model string of the form
"app_label.ModelName", relative to scope_model.
The relation argument can be:
* RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case
the model argument will be returned.
* A bare model name without an app_label, in which case scope_model's
app_label will be prepended.
* An "app_label.ModelName" string.
* A model class, which will be returned unchanged.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
relation = scope_model
# Look for an "app.Model" relation
if isinstance(relation, str):
if "." not in relation:
relation = "%s.%s" % (scope_model._meta.app_label, relation)
return relation
def lazy_related_operation(function, model, *related_models, **kwargs):
"""
Schedule `function` to be called once `model` and all `related_models`
have been imported and registered with the app registry. `function` will
be called with the newly-loaded model classes as its positional arguments,
plus any optional keyword arguments.
The `model` argument must be a model class. Each subsequent positional
argument is another model, or a reference to another model - see
`resolve_relation()` for the various forms these may take. Any relative
references will be resolved relative to `model`.
This is a convenience wrapper for `Apps.lazy_model_operation` - the app
registry model used is the one found in `model._meta.apps`.
"""
models = [model] + [resolve_relation(model, rel) for rel in related_models]
model_keys = (make_model_tuple(m) for m in models)
apps = model._meta.apps
return apps.lazy_model_operation(partial(function, **kwargs), *model_keys)
class RelatedField(FieldCacheMixin, Field):
"""Base class that all relational fields inherit from."""
# Field flags
one_to_many = False
one_to_one = False
many_to_many = False
many_to_one = False
@cached_property
def related_model(self):
# Can't cache this property until all the models are loaded.
apps.check_models_ready()
return self.remote_field.model
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_related_name_is_valid(),
*self._check_related_query_name_is_valid(),
*self._check_relation_model_exists(),
*self._check_referencing_to_swapped_model(),
*self._check_clashes(),
]
def _check_related_name_is_valid(self):
import keyword
related_name = self.remote_field.related_name
if related_name is None:
return []
is_valid_id = not keyword.iskeyword(related_name) and related_name.isidentifier()
if not (is_valid_id or related_name.endswith('+')):
return [
checks.Error(
"The name '%s' is invalid related_name for field %s.%s" %
(self.remote_field.related_name, self.model._meta.object_name,
self.name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=self,
id='fields.E306',
)
]
return []
def _check_related_query_name_is_valid(self):
if self.remote_field.is_hidden():
return []
rel_query_name = self.related_query_name()
errors = []
if rel_query_name.endswith('_'):
errors.append(
checks.Error(
"Reverse query name '%s' must not end with an underscore."
% (rel_query_name,),
hint=("Add or change a related_name or related_query_name "
"argument for this field."),
obj=self,
id='fields.E308',
)
)
if LOOKUP_SEP in rel_query_name:
errors.append(
checks.Error(
"Reverse query name '%s' must not contain '%s'."
% (rel_query_name, LOOKUP_SEP),
hint=("Add or change a related_name or related_query_name "
"argument for this field."),
obj=self,
id='fields.E309',
)
)
return errors
def _check_relation_model_exists(self):
rel_is_missing = self.remote_field.model not in self.opts.apps.get_models()
rel_is_string = isinstance(self.remote_field.model, str)
model_name = self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name
if rel_is_missing and (rel_is_string or not self.remote_field.model._meta.swapped):
return [
checks.Error(
"Field defines a relation with model '%s', which is either "
"not installed, or is abstract." % model_name,
obj=self,
id='fields.E300',
)
]
return []
def _check_referencing_to_swapped_model(self):
if (self.remote_field.model not in self.opts.apps.get_models() and
not isinstance(self.remote_field.model, str) and
self.remote_field.model._meta.swapped):
model = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name
)
return [
checks.Error(
"Field defines a relation with the model '%s', which has "
"been swapped out." % model,
hint="Update the relation to point at 'settings.%s'." % self.remote_field.model._meta.swappable,
obj=self,
id='fields.E301',
)
]
return []
def _check_clashes(self):
"""Check accessor and reverse query name clashes."""
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# `f.remote_field.model` may be a string instead of a model. Skip if model name is
# not resolved.
if not isinstance(self.remote_field.model, ModelBase):
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
# rel_opts.object_name == "Target"
rel_opts = self.remote_field.model._meta
# If the field doesn't install a backward relation on the target model
# (so `is_hidden` returns True), then there are no clashes to check
# and we can skip these fields.
rel_is_hidden = self.remote_field.is_hidden()
rel_name = self.remote_field.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
field_name = "%s.%s" % (opts.object_name, self.name) # i. e. "Model.field"
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i.e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
clash_name = "%s.%s" % (rel_opts.object_name, clash_field.name) # i.e. "Target.model_set"
if not rel_is_hidden and clash_field.name == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E302',
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E303',
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = (r for r in rel_opts.related_objects if r.field is not self)
for clash_field in potential_clashes:
clash_name = "%s.%s" % ( # i. e. "Model.m2m"
clash_field.related_model._meta.object_name,
clash_field.field.name)
if not rel_is_hidden and clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E304',
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name for '%s'."
% (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E305',
)
)
return errors
def db_type(self, connection):
# By default related field will not have a column as it relates to
# columns from another table.
return None
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
super().contribute_to_class(cls, name, private_only=private_only, **kwargs)
self.opts = cls._meta
if not cls._meta.abstract:
if self.remote_field.related_name:
related_name = self.remote_field.related_name
else:
related_name = self.opts.default_related_name
if related_name:
related_name = related_name % {
'class': cls.__name__.lower(),
'model_name': cls._meta.model_name.lower(),
'app_label': cls._meta.app_label.lower()
}
self.remote_field.related_name = related_name
if self.remote_field.related_query_name:
related_query_name = self.remote_field.related_query_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower(),
}
self.remote_field.related_query_name = related_query_name
def resolve_related_class(model, related, field):
field.remote_field.model = related
field.do_related_class(related, model)
lazy_related_operation(resolve_related_class, cls, self.remote_field.model, field=self)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.remote_field.limit_choices_to:
kwargs['limit_choices_to'] = self.remote_field.limit_choices_to
if self.remote_field.related_name is not None:
kwargs['related_name'] = self.remote_field.related_name
if self.remote_field.related_query_name is not None:
kwargs['related_query_name'] = self.remote_field.related_query_name
return name, path, args, kwargs
def get_forward_related_filter(self, obj):
"""
Return the keyword arguments that when supplied to
self.model.object.filter(), would select all instances related through
this field to the remote obj. This is used to build the querysets
returned by related descriptors. obj is an instance of
self.related_field.model.
"""
return {
'%s__%s' % (self.name, rh_field.name): getattr(obj, rh_field.attname)
for _, rh_field in self.related_fields
}
def get_reverse_related_filter(self, obj):
"""
Complement to get_forward_related_filter(). Return the keyword
arguments that when passed to self.related_field.model.object.filter()
select all instances of self.related_field.model related through
this field to obj. obj is an instance of self.model.
"""
base_filter = {
rh_field.attname: getattr(obj, lh_field.attname)
for lh_field, rh_field in self.related_fields
}
descriptor_filter = self.get_extra_descriptor_filter(obj)
base_q = Q(**base_filter)
if isinstance(descriptor_filter, dict):
return base_q & Q(**descriptor_filter)
elif descriptor_filter:
return base_q & descriptor_filter
return base_q
@property
def swappable_setting(self):
"""
Get the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.remote_field.model, str):
to_string = self.remote_field.model
else:
to_string = self.remote_field.model._meta.label
return apps.get_swappable_settings_name(to_string)
return None
def set_attributes_from_rel(self):
self.name = (
self.name or
(self.remote_field.model._meta.model_name + '_' + self.remote_field.model._meta.pk.name)
)
if self.verbose_name is None:
self.verbose_name = self.remote_field.model._meta.verbose_name
self.remote_field.set_field_name()
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.contribute_to_related_class(other, self.remote_field)
def get_limit_choices_to(self):
"""
Return ``limit_choices_to`` for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.remote_field.limit_choices_to):
return self.remote_field.limit_choices_to()
return self.remote_field.limit_choices_to
def formfield(self, **kwargs):
"""
Pass ``limit_choices_to`` to the field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.remote_field, 'get_related_field'):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.remote_field.limit_choices_to
defaults.update({
'limit_choices_to': limit_choices_to,
})
defaults.update(kwargs)
return super().formfield(**defaults)
def related_query_name(self):
"""
Define the name that can be used to identify this related object in a
table-spanning query.
"""
return self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name
@property
def target_field(self):
"""
When filtering against this relation, return the field on the remote
model against which the filtering should happen.
"""
target_fields = self.get_path_info()[-1].target_fields
if len(target_fields) > 1:
raise exceptions.FieldError(
"The relation has multiple target fields, but only single target field was asked for")
return target_fields[0]
def get_cache_name(self):
return self.name
class ForeignObject(RelatedField):
"""
Abstraction of the ForeignKey relation to support multi-column relations.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
requires_unique_target = True
related_accessor_class = ReverseManyToOneDescriptor
forward_related_accessor_class = ForwardManyToOneDescriptor
rel_class = ForeignObjectRel
def __init__(self, to, on_delete, from_fields, to_fields, rel=None, related_name=None,
related_query_name=None, limit_choices_to=None, parent_link=False,
swappable=True, **kwargs):
if rel is None:
rel = self.rel_class(
self, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
super().__init__(rel=rel, **kwargs)
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_to_fields_exist(),
*self._check_unique_target(),
]
def _check_to_fields_exist(self):
# Skip nonexistent models.
if isinstance(self.remote_field.model, str):
return []
errors = []
for to_field in self.to_fields:
if to_field:
try:
self.remote_field.model._meta.get_field(to_field)
except exceptions.FieldDoesNotExist:
errors.append(
checks.Error(
"The to_field '%s' doesn't exist on the related "
"model '%s'."
% (to_field, self.remote_field.model._meta.label),
obj=self,
id='fields.E312',
)
)
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.remote_field.model, str)
if rel_is_string or not self.requires_unique_target:
return []
try:
self.foreign_related_fields
except exceptions.FieldDoesNotExist:
return []
if not self.foreign_related_fields:
return []
unique_foreign_fields = {
frozenset([f.name])
for f in self.remote_field.model._meta.get_fields()
if getattr(f, 'unique', False)
}
unique_foreign_fields.update({
frozenset(ut)
for ut in self.remote_field.model._meta.unique_together
})
foreign_fields = {f.name for f in self.foreign_related_fields}
has_unique_constraint = any(u <= foreign_fields for u in unique_foreign_fields)
if not has_unique_constraint and len(self.foreign_related_fields) > 1:
field_combination = ', '.join(
"'%s'" % rel_field.name for rel_field in self.foreign_related_fields
)
model_name = self.remote_field.model.__name__
return [
checks.Error(
"No subset of the fields %s on model '%s' is unique."
% (field_combination, model_name),
hint=(
"Add unique=True on any of those fields or add at "
"least a subset of them to a unique_together constraint."
),
obj=self,
id='fields.E310',
)
]
elif not has_unique_constraint:
field_name = self.foreign_related_fields[0].name
model_name = self.remote_field.model.__name__
return [
checks.Error(
"'%s.%s' must set unique=True because it is referenced by "
"a foreign key." % (model_name, field_name),
obj=self,
id='fields.E311',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs['on_delete'] = self.remote_field.on_delete
kwargs['from_fields'] = self.from_fields
kwargs['to_fields'] = self.to_fields
if self.remote_field.parent_link:
kwargs['parent_link'] = self.remote_field.parent_link
# Work out string form of "to"
if isinstance(self.remote_field.model, str):
kwargs['to'] = self.remote_field.model
else:
kwargs['to'] = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name,
)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ForeignKey pointing to a model "
"that is swapped in place of more than one model (%s and %s)"
% (kwargs['to'].setting_name, swappable_setting)
)
# Set it
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if not self.from_fields or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.remote_field.model, str):
raise ValueError('Related model %r cannot be resolved' % self.remote_field.model)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field(from_field_name))
to_field = (self.remote_field.model._meta.pk if to_field_name is None
else self.remote_field.model._meta.get_field(to_field_name))
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (not possible_parent_link or
possible_parent_link.primary_key or
possible_parent_link.model._meta.abstract):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super().get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Return an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Return a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(compiler, connection)
method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self, filtered_relation=None):
"""Get path from this field to the related model."""
opts = self.remote_field.model._meta
from_opts = self.model._meta
return [PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=self.foreign_related_fields,
join_field=self,
m2m=False,
direct=True,
filtered_relation=filtered_relation,
)]
def get_reverse_path_info(self, filtered_relation=None):
"""Get path from the related model to this field's model."""
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self.remote_field,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)]
@classmethod
@functools.lru_cache(maxsize=None)
def get_lookups(cls):
bases = inspect.getmro(cls)
bases = bases[:bases.index(ForeignObject) + 1]
class_lookups = [parent.__dict__.get('class_lookups', {}) for parent in bases]
return cls.merge_dicts(class_lookups)
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
super().contribute_to_class(cls, name, private_only=private_only, **kwargs)
setattr(cls, self.name, self.forward_related_accessor_class(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.remote_field.is_hidden() and not related.related_model._meta.swapped:
setattr(cls._meta.concrete_model, related.get_accessor_name(), self.related_accessor_class(related))
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.remote_field.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.remote_field.limit_choices_to)
ForeignObject.register_lookup(RelatedIn)
ForeignObject.register_lookup(RelatedExact)
ForeignObject.register_lookup(RelatedLessThan)
ForeignObject.register_lookup(RelatedGreaterThan)
ForeignObject.register_lookup(RelatedGreaterThanOrEqual)
ForeignObject.register_lookup(RelatedLessThanOrEqual)
ForeignObject.register_lookup(RelatedIsNull)
class ForeignKey(ForeignObject):
"""
Provide a many-to-one relation by adding a column to the local model
to hold the remote value.
By default ForeignKey will target the pk of the remote model but this
behavior can be changed by using the ``to_field`` argument.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
rel_class = ManyToOneRel
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, on_delete, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, to_field=None,
db_constraint=True, **kwargs):
try:
to._meta.model_name
except AttributeError:
assert isinstance(to, str), (
"%s(%r) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string %r" % (
self.__class__.__name__, to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
kwargs['rel'] = self.rel_class(
self, to, to_field,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
kwargs.setdefault('db_index', True)
super().__init__(to, on_delete, from_fields=['self'], to_fields=[to_field], **kwargs)
self.db_constraint = db_constraint
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_on_delete(),
*self._check_unique(),
]
def _check_on_delete(self):
on_delete = getattr(self.remote_field, 'on_delete', None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=self,
id='fields.E320',
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=self,
id='fields.E321',
)
]
else:
return []
def _check_unique(self, **kwargs):
return [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=self,
id='fields.W342',
)
] if self.unique else []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['to_fields']
del kwargs['from_fields']
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.remote_field.model, "_meta", None)
if self.remote_field.field_name and (
not to_meta or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name)):
kwargs['to_field'] = self.remote_field.field_name
return name, path, args, kwargs
def to_python(self, value):
return self.target_field.to_python(value)
@property
def target_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self, filtered_relation=None):
"""Get path from the related model to this field's model."""
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self.remote_field,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)]
def validate(self, value, model_instance):
if self.remote_field.parent_link:
return
super().validate(value, model_instance)
if value is None:
return
using = router.db_for_read(self.remote_field.model, instance=model_instance)
qs = self.remote_field.model._default_manager.using(using).filter(
**{self.remote_field.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={
'model': self.remote_field.model._meta.verbose_name, 'pk': value,
'field': self.remote_field.field_name, 'value': value,
}, # 'pk' is included for backwards compatibility
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_default(self):
"""Return the to_field if the default value is an object."""
field_default = super().get_default()
if isinstance(field_default, self.remote_field.model):
return getattr(field_default, self.target_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (value == '' and
(not self.target_field.empty_strings_allowed or
connection.features.interprets_empty_strings_as_nulls)):
return None
else:
return self.target_field.get_db_prep_save(value, connection=connection)
def get_db_prep_value(self, value, connection, prepared=False):
return self.target_field.get_db_prep_value(value, connection, prepared)
def contribute_to_related_class(self, cls, related):
super().contribute_to_related_class(cls, related)
if self.remote_field.field_name is None:
self.remote_field.field_name = cls._meta.pk.name
def formfield(self, *, using=None, **kwargs):
if isinstance(self.remote_field.model, str):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.remote_field.model))
return super().formfield(**{
'form_class': forms.ModelChoiceField,
'queryset': self.remote_field.model._default_manager.using(using),
'to_field_name': self.remote_field.field_name,
**kwargs,
})
def db_check(self, connection):
return []
def db_type(self, connection):
return self.target_field.rel_db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": self.db_check(connection)}
def convert_empty_strings(self, value, expression, connection):
if (not value) and isinstance(value, str):
return None
return value
def get_db_converters(self, connection):
converters = super().get_db_converters(connection)
if connection.features.interprets_empty_strings_as_nulls:
converters += [self.convert_empty_strings]
return converters
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self.target_field
while isinstance(output_field, ForeignKey):
output_field = output_field.target_field
if output_field is self:
raise ValueError('Cannot resolve output_field.')
return super().get_col(alias, output_field)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that it always carries a "unique" constraint with it and the reverse
relation always returns the object pointed to (since there will only ever
be one), rather than returning a list.
"""
# Field flags
many_to_many = False
many_to_one = False
one_to_many = False
one_to_one = True
related_accessor_class = ReverseOneToOneDescriptor
forward_related_accessor_class = ForwardOneToOneDescriptor
rel_class = OneToOneRel
description = _("One-to-one relationship")
def __init__(self, to, on_delete, to_field=None, **kwargs):
kwargs['unique'] = True
super().__init__(to, on_delete, to_field=to_field, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.remote_field.parent_link:
return None
return super().formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.remote_field.model):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def _check_unique(self, **kwargs):
# Override ForeignKey since check isn't applicable here.
return []
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
def set_managed(model, related, through):
through._meta.managed = model._meta.managed or related._meta.managed
to_model = resolve_relation(klass, field.remote_field.model)
name = '%s_%s' % (klass._meta.object_name, field.name)
lazy_related_operation(set_managed, klass, to_model, name)
to = make_model_tuple(to_model)[1]
from_ = klass._meta.model_name
if to == from_:
to = 'to_%s' % to
from_ = 'from_%s' % from_
meta = type('Meta', (), {
'db_table': field._get_m2m_db_table(klass._meta),
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': _('%(from)s-%(to)s relationship') % {'from': from_, 'to': to},
'verbose_name_plural': _('%(from)s-%(to)s relationships') % {'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
# Construct and return the new class.
return type(name, (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(
klass,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
),
to: models.ForeignKey(
to_model,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
)
})
class ManyToManyField(RelatedField):
"""
Provide a many-to-many relation by using an intermediary model that
holds two ForeignKey fields pointed at the two sides of the relation.
Unless a ``through`` model was provided, ManyToManyField will use the
create_many_to_many_intermediary_model factory to automatically generate
the intermediary model.
"""
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
rel_class = ManyToManyRel
description = _("Many-to-many relationship")
def __init__(self, to, related_name=None, related_query_name=None,
limit_choices_to=None, symmetrical=None, through=None,
through_fields=None, db_constraint=True, db_table=None,
swappable=True, **kwargs):
try:
to._meta
except AttributeError:
assert isinstance(to, str), (
"%s(%r) is invalid. First parameter to ManyToManyField must be "
"either a model, a model name, or the string %r" %
(self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
)
if symmetrical is None:
symmetrical = (to == RECURSIVE_RELATIONSHIP_CONSTANT)
if through is not None:
assert db_table is None, (
"Cannot specify a db_table if an intermediary model is used."
)
kwargs['rel'] = self.rel_class(
self, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
symmetrical=symmetrical,
through=through,
through_fields=through_fields,
db_constraint=db_constraint,
)
self.has_null_arg = 'null' in kwargs
super().__init__(**kwargs)
self.db_table = db_table
self.swappable = swappable
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_unique(**kwargs),
*self._check_relationship_model(**kwargs),
*self._check_ignored_options(**kwargs),
*self._check_table_uniqueness(**kwargs),
]
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
'ManyToManyFields cannot be unique.',
obj=self,
id='fields.E330',
)
]
return []
def _check_ignored_options(self, **kwargs):
warnings = []
if self.has_null_arg:
warnings.append(
checks.Warning(
'null has no effect on ManyToManyField.',
obj=self,
id='fields.W340',
)
)
if self._validators:
warnings.append(
checks.Warning(
'ManyToManyField does not support validators.',
obj=self,
id='fields.W341',
)
)
if (self.remote_field.limit_choices_to and self.remote_field.through and
not self.remote_field.through._meta.auto_created):
warnings.append(
checks.Warning(
'limit_choices_to has no effect on ManyToManyField '
'with a through model.',
obj=self,
id='fields.W343',
)
)
return warnings
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.remote_field.through, '_meta'):
qualified_model_name = "%s.%s" % (
self.remote_field.through._meta.app_label, self.remote_field.through.__name__)
else:
qualified_model_name = self.remote_field.through
errors = []
if self.remote_field.through not in self.opts.apps.get_models(include_auto_created=True):
# The relationship model is not installed.
errors.append(
checks.Error(
"Field specifies a many-to-many relation through model "
"'%s', which has not been installed." % qualified_model_name,
obj=self,
id='fields.E331',
)
)
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
# Set some useful local variables
to_model = resolve_relation(from_model, self.remote_field.model)
from_model_name = from_model._meta.object_name
if isinstance(to_model, str):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.remote_field.through._meta.object_name
self_referential = from_model == to_model
# Check symmetrical attribute.
if (self_referential and self.remote_field.symmetrical and
not self.remote_field.through._meta.auto_created):
errors.append(
checks.Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
obj=self,
id='fields.E332',
)
)
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(
from_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields
)
if seen_self > 2 and not self.remote_field.through_fields:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument." % (self, from_model_name),
hint="Use through_fields to specify which two foreign keys Django should use.",
obj=self.remote_field.through,
id='fields.E333',
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(
from_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields
)
seen_to = sum(
to_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields
)
if seen_from > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=(
'If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, through="%s").'
) % relationship_model_name,
obj=self,
id='fields.E334',
)
)
if seen_to > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument." % (self, to_model_name),
hint=(
'If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, through="%s").'
) % relationship_model_name,
obj=self,
id='fields.E335',
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'." % (
self, from_model_name, to_model_name
),
obj=self.remote_field.through,
id='fields.E336',
)
)
# Validate `through_fields`.
if self.remote_field.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy".
if not (len(self.remote_field.through_fields) >= 2 and
self.remote_field.through_fields[0] and self.remote_field.through_fields[1]):
errors.append(
checks.Error(
"Field specifies 'through_fields' but does not provide "
"the names of the two link fields that should be used "
"for the relation through model '%s'." % qualified_model_name,
hint="Make sure you specify 'through_fields' as through_fields=('field1', 'field2')",
obj=self,
id='fields.E337',
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models.
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
source, through, target = from_model, self.remote_field.through, self.remote_field.model
source_field_name, target_field_name = self.remote_field.through_fields[:2]
for field_name, related_model in ((source_field_name, source),
(target_field_name, target)):
possible_field_names = []
for f in through._meta.fields:
if hasattr(f, 'remote_field') and getattr(f.remote_field, 'model', None) == related_model:
possible_field_names.append(f.name)
if possible_field_names:
hint = "Did you mean one of the following foreign keys to '%s': %s?" % (
related_model._meta.object_name,
', '.join(possible_field_names),
)
else:
hint = None
try:
field = through._meta.get_field(field_name)
except exceptions.FieldDoesNotExist:
errors.append(
checks.Error(
"The intermediary model '%s' has no field '%s'."
% (qualified_model_name, field_name),
hint=hint,
obj=self,
id='fields.E338',
)
)
else:
if not (hasattr(field, 'remote_field') and
getattr(field.remote_field, 'model', None) == related_model):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'." % (
through._meta.object_name, field_name,
related_model._meta.object_name,
),
hint=hint,
obj=self,
id='fields.E339',
)
)
return errors
def _check_table_uniqueness(self, **kwargs):
if isinstance(self.remote_field.through, str) or not self.remote_field.through._meta.managed:
return []
registered_tables = {
model._meta.db_table: model
for model in self.opts.apps.get_models(include_auto_created=True)
if model != self.remote_field.through and model._meta.managed
}
m2m_db_table = self.m2m_db_table()
model = registered_tables.get(m2m_db_table)
# The second condition allows multiple m2m relations on a model if
# some point to a through model that proxies another through model.
if model and model._meta.concrete_model != self.remote_field.through._meta.concrete_model:
if model._meta.auto_created:
def _get_field_name(model):
for field in model._meta.auto_created._meta.many_to_many:
if field.remote_field.through is model:
return field.name
opts = model._meta.auto_created._meta
clashing_obj = '%s.%s' % (opts.label, _get_field_name(model))
else:
clashing_obj = model._meta.label
return [
checks.Error(
"The field's intermediary table '%s' clashes with the "
"table name of '%s'." % (m2m_db_table, clashing_obj),
obj=self,
id='fields.E340',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# Handle the simpler arguments.
if self.db_table is not None:
kwargs['db_table'] = self.db_table
if self.remote_field.db_constraint is not True:
kwargs['db_constraint'] = self.remote_field.db_constraint
# Rel needs more work.
if isinstance(self.remote_field.model, str):
kwargs['to'] = self.remote_field.model
else:
kwargs['to'] = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name,
)
if getattr(self.remote_field, 'through', None) is not None:
if isinstance(self.remote_field.through, str):
kwargs['through'] = self.remote_field.through
elif not self.remote_field.through._meta.auto_created:
kwargs['through'] = "%s.%s" % (
self.remote_field.through._meta.app_label,
self.remote_field.through._meta.object_name,
)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error.
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ManyToManyField pointing to a "
"model that is swapped in place of more than one model "
"(%s and %s)" % (kwargs['to'].setting_name, swappable_setting)
)
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False, filtered_relation=None):
"""Called by both direct and indirect m2m traversal."""
int_model = self.remote_field.through
linkfield1 = int_model._meta.get_field(self.m2m_field_name())
linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name())
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info(filtered_relation)
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info(filtered_relation)
# Get join infos between the last model of join 1 and the first model
# of join 2. Assume the only reason these may differ is due to model
# inheritance.
join1_final = join1infos[-1].to_opts
join2_initial = join2infos[0].from_opts
if join1_final is join2_initial:
intermediate_infos = []
elif issubclass(join1_final.model, join2_initial.model):
intermediate_infos = join1_final.get_path_to_parent(join2_initial.model)
else:
intermediate_infos = join2_initial.get_path_from_parent(join1_final.model)
return [*join1infos, *intermediate_infos, *join2infos]
def get_path_info(self, filtered_relation=None):
return self._get_path_info(direct=True, filtered_relation=filtered_relation)
def get_reverse_path_info(self, filtered_relation=None):
return self._get_path_info(direct=False, filtered_relation=filtered_relation)
def _get_m2m_db_table(self, opts):
"""
Function that can be curried to provide the m2m table name for this
relation.
"""
if self.remote_field.through is not None:
return self.remote_field.through._meta.db_table
elif self.db_table:
return self.db_table
else:
m2m_table_name = '%s_%s' % (utils.strip_quotes(opts.db_table), self.name)
return utils.truncate_name(m2m_table_name, connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"""
Function that can be curried to provide the source accessor or DB
column name for the m2m table.
"""
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[0]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if (f.is_relation and f.remote_field.model == related.related_model and
(link_field_name is None or link_field_name == f.name)):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"""
Function that can be curried to provide the related accessor or DB
column name for the m2m table.
"""
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[1]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if f.is_relation and f.remote_field.model == related.model:
if link_field_name is None and related.related_model == related.model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def contribute_to_class(self, cls, name, **kwargs):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.remote_field.symmetrical and (
self.remote_field.model == "self" or self.remote_field.model == cls._meta.object_name):
self.remote_field.related_name = "%s_rel_+" % name
elif self.remote_field.is_hidden():
# If the backwards relation is disabled, replace the original
# related_name with one generated from the m2m field name. Django
# still uses backwards relations internally and we need to avoid
# clashes between multiple m2m fields with related_name == '+'.
self.remote_field.related_name = "_%s_%s_+" % (cls.__name__.lower(), name)
super().contribute_to_class(cls, name, **kwargs)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not cls._meta.abstract:
if self.remote_field.through:
def resolve_through_model(_, model, field):
field.remote_field.through = model
lazy_related_operation(resolve_through_model, cls, self.remote_field.through, field=self)
elif not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation.
setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False))
# Set up the accessor for the m2m table name for the relation.
self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.remote_field.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyToManyDescriptor(self.remote_field, reverse=True))
# Set up the accessors for the column names on the m2m table.
self.m2m_column_name = partial(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = partial(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = partial(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = partial(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = partial(self._get_m2m_attr, related, 'remote_field')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = partial(self._get_m2m_reverse_attr, related, 'remote_field')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
return [] if obj.pk is None else list(getattr(obj, self.attname).all())
def save_form_data(self, instance, data):
getattr(instance, self.attname).set(data)
def formfield(self, *, using=None, **kwargs):
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.remote_field.model._default_manager.using(using),
**kwargs,
}
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i.pk for i in initial]
return super().formfield(**defaults)
def db_check(self, connection):
return None
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
|
c551eb12bfc32aa29ad00c0d47e93fd06c4133ba7d8d621ca9aa3ef87c2e9254 | """
Accessors for related objects.
When a field defines a relation between two models, each model class provides
an attribute to access related instances of the other model class (unless the
reverse accessor has been disabled with related_name='+').
Accessors are implemented as descriptors in order to customize access and
assignment. This module defines the descriptor classes.
Forward accessors follow foreign keys. Reverse accessors trace them back. For
example, with the following models::
class Parent(Model):
pass
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a forward many-to-one relation. ``parent.children`` is a
reverse many-to-one relation.
There are three types of relations (many-to-one, one-to-one, and many-to-many)
and two directions (forward and reverse) for a total of six combinations.
1. Related instance on the forward side of a many-to-one relation:
``ForwardManyToOneDescriptor``.
Uniqueness of foreign key values is irrelevant to accessing the related
instance, making the many-to-one and one-to-one cases identical as far as
the descriptor is concerned. The constraint is checked upstream (unicity
validation in forms) or downstream (unique indexes in the database).
2. Related instance on the forward side of a one-to-one
relation: ``ForwardOneToOneDescriptor``.
It avoids querying the database when accessing the parent link field in
a multi-table inheritance scenario.
3. Related instance on the reverse side of a one-to-one relation:
``ReverseOneToOneDescriptor``.
One-to-one relations are asymmetrical, despite the apparent symmetry of the
name, because they're implemented in the database with a foreign key from
one table to another. As a consequence ``ReverseOneToOneDescriptor`` is
slightly different from ``ForwardManyToOneDescriptor``.
4. Related objects manager for related instances on the reverse side of a
many-to-one relation: ``ReverseManyToOneDescriptor``.
Unlike the previous two classes, this one provides access to a collection
of objects. It returns a manager rather than an instance.
5. Related objects manager for related instances on the forward or reverse
sides of a many-to-many relation: ``ManyToManyDescriptor``.
Many-to-many relations are symmetrical. The syntax of Django models
requires declaring them on one side but that's an implementation detail.
They could be declared on the other side without any change in behavior.
Therefore the forward and reverse descriptors can be the same.
If you're looking for ``ForwardManyToManyDescriptor`` or
``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.
"""
from django.core.exceptions import FieldError
from django.db import connections, router, transaction
from django.db.models import Q, signals
from django.db.models.query import QuerySet
from django.utils.functional import cached_property
class ForwardManyToOneDescriptor:
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one (via ForwardOneToOneDescriptor subclass) relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``Child.parent`` is a ``ForwardManyToOneDescriptor`` instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `self.field.model` might
# still be a string model reference.
return type(
'RelatedObjectDoesNotExist',
(self.field.remote_field.model.DoesNotExist, AttributeError), {
'__module__': self.field.model.__module__,
'__qualname__': '%s.%s.RelatedObjectDoesNotExist' % (
self.field.model.__qualname__,
self.field.name,
),
}
)
def is_cached(self, instance):
return self.field.is_cached(instance)
def get_queryset(self, **hints):
return self.field.remote_field.model._base_manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
remote_field = self.field.remote_field
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: {instance_attr(inst)[0] for inst in instances}}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not remote_field.multiple:
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
remote_field.set_cached_value(rel_obj, instance)
return queryset, rel_obj_attr, instance_attr, True, self.field.get_cache_name(), False
def get_object(self, instance):
qs = self.get_queryset(instance=instance)
# Assuming the database enforces foreign keys, this won't fail.
return qs.get(self.field.get_reverse_related_filter(instance))
def __get__(self, instance, cls=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached
# by the field on the model instance state. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = self.field.get_cached_value(instance)
except KeyError:
has_value = None not in self.field.get_local_related_value(instance)
ancestor_link = instance._meta.get_ancestor_link(self.field.model) if has_value else None
if ancestor_link and ancestor_link.is_cached(instance):
# An ancestor link will exist if this field is defined on a
# multi-table inheritance parent of the instance's class.
ancestor = ancestor_link.get_cached_value(instance)
# The value might be cached on an ancestor if the instance
# originated from walking down the inheritance chain.
rel_obj = self.field.get_cached_value(ancestor, default=None)
else:
rel_obj = None
if rel_obj is None and has_value:
rel_obj = self.get_object(instance)
remote_field = self.field.remote_field
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not remote_field.multiple:
remote_field.set_cached_value(rel_obj, instance)
self.field.set_cached_value(instance, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` is the ``parent`` instance on the right of the equal sign
"""
# An object must be an instance of the related class.
if value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
remote_field = self.field.remote_field
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = self.field.get_cached_value(instance, default=None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
remote_field.set_cached_value(related, None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
self.field.set_cached_value(instance, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not remote_field.multiple:
remote_field.set_cached_value(value, instance)
def __reduce__(self):
"""
Pickling should return the instance attached by self.field on the
model, not a new copy of that descriptor. Use getattr() to retrieve
the instance directly from the model.
"""
return getattr, (self.field.model, self.field.name)
class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor):
"""
Accessor to the related object on the forward side of a one-to-one relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``Restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance.
"""
def get_object(self, instance):
if self.field.remote_field.parent_link:
deferred = instance.get_deferred_fields()
# Because it's a parent link, all the data is available in the
# instance, so populate the parent model with this data.
rel_model = self.field.remote_field.model
fields = [field.attname for field in rel_model._meta.concrete_fields]
# If any of the related model's fields are deferred, fallback to
# fetching all fields from the related model. This avoids a query
# on the related model for every deferred field.
if not any(field in fields for field in deferred):
kwargs = {field: getattr(instance, field) for field in fields}
obj = rel_model(**kwargs)
obj._state.adding = instance._state.adding
obj._state.db = instance._state.db
return obj
return super().get_object(instance)
def __set__(self, instance, value):
super().__set__(instance, value)
# If the primary key is a link to a parent model and a parent instance
# is being set, update the value of the inherited pk(s).
if self.field.primary_key and self.field.remote_field.parent_link:
opts = instance._meta
# Inherited primary key fields from this object's base classes.
inherited_pk_fields = [
field for field in opts.concrete_fields
if field.primary_key and field.remote_field
]
for field in inherited_pk_fields:
rel_model_pk_name = field.remote_field.model._meta.pk.attname
raw_value = getattr(value, rel_model_pk_name) if value is not None else None
setattr(instance, rel_model_pk_name, raw_value)
class ReverseOneToOneDescriptor:
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``Place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.
"""
def __init__(self, related):
# Following the example above, `related` is an instance of OneToOneRel
# which represents the reverse restaurant field (place.restaurant).
self.related = related
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ForwardManyToOneDescriptor`.
return type(
'RelatedObjectDoesNotExist',
(self.related.related_model.DoesNotExist, AttributeError), {
'__module__': self.related.model.__module__,
'__qualname__': '%s.%s.RelatedObjectDoesNotExist' % (
self.related.model.__qualname__,
self.related.name,
)
},
)
def is_cached(self, instance):
return self.related.is_cached(instance)
def get_queryset(self, **hints):
return self.related.related_model._base_manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.related.field.get_local_related_value
instance_attr = self.related.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
self.related.field.set_cached_value(rel_obj, instance)
return queryset, rel_obj_attr, instance_attr, True, self.related.get_cache_name(), False
def __get__(self, instance, cls=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``cls`` is the ``Place`` class (unused)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached
# by the field on the model instance state. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = self.related.get_cached_value(instance)
except KeyError:
related_pk = instance.pk
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
self.related.field.set_cached_value(rel_obj, instance)
self.related.set_cached_value(instance, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` is the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
if value is None:
# Update the cached related instance (if any) & clear the cache.
# Following the example above, this would be the cached
# ``restaurant`` instance (if any).
rel_obj = self.related.get_cached_value(instance, default=None)
if rel_obj is not None:
# Remove the ``restaurant`` instance from the ``place``
# instance cache.
self.related.delete_cached_value(instance)
# Set the ``place`` field on the ``restaurant``
# instance to None.
setattr(rel_obj, self.related.field.name, None)
elif not isinstance(value, self.related.related_model):
# An object must be an instance of the related class.
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
if value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
self.related.set_cached_value(instance, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
self.related.field.set_cached_value(value, instance)
def __reduce__(self):
# Same purpose as ForwardManyToOneDescriptor.__reduce__().
return getattr, (self.related.model, self.related.name)
class ReverseManyToOneDescriptor:
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``Parent.children`` is a ``ReverseManyToOneDescriptor`` instance.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model
return create_reverse_many_to_one_manager(
related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, cls=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``cls`` is the ``Parent`` class (unused)
"""
if instance is None:
return self
return self.related_manager_cls(instance)
def _get_set_deprecation_msg_params(self):
return (
'reverse side of a related set',
self.rel.get_accessor_name(),
)
def __set__(self, instance, value):
raise TypeError(
'Direct assignment to the %s is prohibited. Use %s.set() instead.'
% self._get_set_deprecation_msg_params(),
)
def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super().__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return queryset.none()
if self.field.many_to_one:
# Guard against field-like objects such as GenericRelation
# that abuse create_reverse_many_to_one_manager() with reverse
# one-to-many relationships instead and break known related
# objects assignment.
try:
target_field = self.field.target_field
except FieldError:
# The relationship has multiple target fields. Use a tuple
# for related object id.
rel_obj_id = tuple([
getattr(self.instance, target_field.attname)
for target_field in self.field.get_path_info()[-1].target_fields
])
else:
rel_obj_id = getattr(self.instance, target_field.attname)
queryset._known_related_objects = {self.field: {rel_obj_id: self.instance}}
return queryset
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.field.remote_field.get_cache_name())
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.remote_field.get_cache_name()]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.remote_field.get_cache_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name, False
def add(self, *objs, bulk=True):
self._remove_prefetched_objects()
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, bulk=True):
if not objs:
return
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear(bulk=bulk)
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ManyToManyDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``Pizza.toppings`` and ``Topping.pizzas`` are ``ManyToManyDescriptor``
instances.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel, reverse=False):
super().__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model if self.reverse else self.rel.model
return create_forward_many_to_many_manager(
related_model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def _get_set_deprecation_msg_params(self):
return (
'%s side of a many-to-many set' % ('reverse' if self.reverse else 'forward'),
self.rel.get_accessor_name() if self.reverse else self.field.name,
)
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
self.pk_field_names = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.pk_field_names[lh_field.name] = rh_field.name
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.pk_field_names[self.source_field_name]))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
return queryset._next_is_sticky().filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = fk.model._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
False,
)
@property
def constrained_target(self):
# If the through relation's target field's foreign integrity is
# enforced, the query can be performed solely against the through
# table as the INNER JOIN'ing against target table is unnecessary.
if not self.target_field.db_constraint:
return None
db = router.db_for_read(self.through, instance=self.instance)
if not connections[db].features.supports_foreign_keys:
return None
hints = {'instance': self.instance}
manager = self.through._base_manager.db_manager(db, hints=hints)
filters = {self.source_field_name: self.instance.pk}
# Nullable target rows must be excluded as well as they would have
# been filtered out from an INNER JOIN.
if self.target_field.null:
filters['%s__isnull' % self.target_field_name] = False
return manager.filter(**filters)
def exists(self):
constrained_target = self.constrained_target
return constrained_target.exists() if constrained_target else super().exists()
def count(self):
constrained_target = self.constrained_target
return constrained_target.count() if constrained_target else super().count()
def add(self, *objs, through_defaults=None):
self._remove_prefetched_objects()
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(
self.source_field_name, self.target_field_name, *objs,
through_defaults=through_defaults,
)
# If this is a symmetrical m2m relation to self, add the mirror
# entry in the m2m table. `through_defaults` aren't used here
# because of the system check error fields.E332: Many-to-many
# fields with intermediate tables must not be symmetrical.
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_prefetched_objects()
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(
sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
self._remove_prefetched_objects()
filters = self._build_remove_filters(super().get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
clear.alters_data = True
def set(self, objs, *, clear=False, through_defaults=None):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, through_defaults=through_defaults)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (
self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj
)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs, through_defaults=through_defaults)
set.alters_data = True
def create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj, through_defaults=through_defaults)
return new_obj
create.alters_data = True
def get_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs, through_defaults=None):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
through_defaults = through_defaults or {}
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids.difference_update(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(
sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db,
)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**through_defaults, **{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(
sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db,
)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(
sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
target_model_qs = super().get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
return ManyRelatedManager
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.