hash
stringlengths
64
64
content
stringlengths
0
1.51M
c0245a2f09a163337e01194e3600aba353f365647ee94c1509fa318fd0f79768
import os from io import StringIO from unittest import mock from admin_scripts.tests import AdminScriptTestCase from django.apps import apps from django.core import management from django.core.checks import Tags from django.core.management import BaseCommand, CommandError, find_commands from django.core.management.utils import ( find_command, get_random_secret_key, is_ignored_path, normalize_path_patterns, popen_wrapper, ) from django.db import connection from django.test import SimpleTestCase, override_settings from django.test.utils import captured_stderr, extend_sys_path, ignore_warnings from django.utils import translation from django.utils.deprecation import RemovedInDjango41Warning from django.utils.version import PY37 from .management.commands import dance # A minimal set of apps to avoid system checks running on all apps. @override_settings( INSTALLED_APPS=[ 'django.contrib.auth', 'django.contrib.contenttypes', 'user_commands', ], ) class CommandTests(SimpleTestCase): def test_command(self): out = StringIO() management.call_command('dance', stdout=out) self.assertIn("I don't feel like dancing Rock'n'Roll.\n", out.getvalue()) def test_command_style(self): out = StringIO() management.call_command('dance', style='Jive', stdout=out) self.assertIn("I don't feel like dancing Jive.\n", out.getvalue()) # Passing options as arguments also works (thanks argparse) management.call_command('dance', '--style', 'Jive', stdout=out) self.assertIn("I don't feel like dancing Jive.\n", out.getvalue()) def test_language_preserved(self): with translation.override('fr'): management.call_command('dance', verbosity=0) self.assertEqual(translation.get_language(), 'fr') def test_explode(self): """ An unknown command raises CommandError """ with self.assertRaisesMessage(CommandError, "Unknown command: 'explode'"): management.call_command(('explode',)) def test_system_exit(self): """ Exception raised in a command should raise CommandError with call_command, but SystemExit when run from command line """ with self.assertRaises(CommandError) as cm: management.call_command('dance', example="raise") self.assertEqual(cm.exception.returncode, 3) dance.Command.requires_system_checks = [] try: with captured_stderr() as stderr, self.assertRaises(SystemExit) as cm: management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute() self.assertEqual(cm.exception.code, 3) finally: dance.Command.requires_system_checks = '__all__' self.assertIn("CommandError", stderr.getvalue()) def test_no_translations_deactivate_translations(self): """ When the Command handle method is decorated with @no_translations, translations are deactivated inside the command. """ current_locale = translation.get_language() with translation.override('pl'): result = management.call_command('no_translations') self.assertIsNone(result) self.assertEqual(translation.get_language(), current_locale) def test_find_command_without_PATH(self): """ find_command should still work when the PATH environment variable doesn't exist (#22256). """ current_path = os.environ.pop('PATH', None) try: self.assertIsNone(find_command('_missing_')) finally: if current_path is not None: os.environ['PATH'] = current_path def test_discover_commands_in_eggs(self): """ Management commands can also be loaded from Python eggs. """ egg_dir = '%s/eggs' % os.path.dirname(__file__) egg_name = '%s/basic.egg' % egg_dir with extend_sys_path(egg_name): with self.settings(INSTALLED_APPS=['commandegg']): cmds = find_commands(os.path.join(apps.get_app_config('commandegg').path, 'management')) self.assertEqual(cmds, ['eggcommand']) def test_call_command_option_parsing(self): """ When passing the long option name to call_command, the available option key is the option dest name (#22985). """ out = StringIO() management.call_command('dance', stdout=out, opt_3=True) self.assertIn("option3", out.getvalue()) self.assertNotIn("opt_3", out.getvalue()) self.assertNotIn("opt-3", out.getvalue()) def test_call_command_option_parsing_non_string_arg(self): """ It should be possible to pass non-string arguments to call_command. """ out = StringIO() management.call_command('dance', 1, verbosity=0, stdout=out) self.assertIn("You passed 1 as a positional argument.", out.getvalue()) def test_calling_a_command_with_only_empty_parameter_should_ends_gracefully(self): out = StringIO() management.call_command('hal', "--empty", stdout=out) self.assertEqual(out.getvalue(), "\nDave, I can't do that.\n") def test_calling_command_with_app_labels_and_parameters_should_be_ok(self): out = StringIO() management.call_command('hal', 'myapp', "--verbosity", "3", stdout=out) self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue()) def test_calling_command_with_parameters_and_app_labels_at_the_end_should_be_ok(self): out = StringIO() management.call_command('hal', "--verbosity", "3", "myapp", stdout=out) self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue()) def test_calling_a_command_with_no_app_labels_and_parameters_should_raise_a_command_error(self): with self.assertRaises(CommandError): management.call_command('hal') def test_output_transaction(self): output = management.call_command('transaction', stdout=StringIO(), no_color=True) self.assertTrue(output.strip().startswith(connection.ops.start_transaction_sql())) self.assertTrue(output.strip().endswith(connection.ops.end_transaction_sql())) def test_call_command_no_checks(self): """ By default, call_command should not trigger the check framework, unless specifically asked. """ self.counter = 0 def patched_check(self_, **kwargs): self.counter += 1 self.kwargs = kwargs saved_check = BaseCommand.check BaseCommand.check = patched_check try: management.call_command("dance", verbosity=0) self.assertEqual(self.counter, 0) management.call_command("dance", verbosity=0, skip_checks=False) self.assertEqual(self.counter, 1) self.assertEqual(self.kwargs, {}) finally: BaseCommand.check = saved_check def test_requires_system_checks_empty(self): with mock.patch('django.core.management.base.BaseCommand.check') as mocked_check: management.call_command('no_system_checks') self.assertIs(mocked_check.called, False) def test_requires_system_checks_specific(self): with mock.patch('django.core.management.base.BaseCommand.check') as mocked_check: management.call_command('specific_system_checks') mocked_check.called_once_with(tags=[Tags.staticfiles, Tags.models]) def test_requires_system_checks_invalid(self): class Command(BaseCommand): requires_system_checks = 'x' msg = 'requires_system_checks must be a list or tuple.' with self.assertRaisesMessage(TypeError, msg): Command() def test_check_migrations(self): requires_migrations_checks = dance.Command.requires_migrations_checks self.assertIs(requires_migrations_checks, False) try: with mock.patch.object(BaseCommand, 'check_migrations') as check_migrations: management.call_command('dance', verbosity=0) self.assertFalse(check_migrations.called) dance.Command.requires_migrations_checks = True management.call_command('dance', verbosity=0) self.assertTrue(check_migrations.called) finally: dance.Command.requires_migrations_checks = requires_migrations_checks def test_call_command_unrecognized_option(self): msg = ( 'Unknown option(s) for dance command: unrecognized. Valid options ' 'are: example, force_color, help, integer, no_color, opt_3, ' 'option3, pythonpath, settings, skip_checks, stderr, stdout, ' 'style, traceback, verbosity, version.' ) with self.assertRaisesMessage(TypeError, msg): management.call_command('dance', unrecognized=1) msg = ( 'Unknown option(s) for dance command: unrecognized, unrecognized2. ' 'Valid options are: example, force_color, help, integer, no_color, ' 'opt_3, option3, pythonpath, settings, skip_checks, stderr, ' 'stdout, style, traceback, verbosity, version.' ) with self.assertRaisesMessage(TypeError, msg): management.call_command('dance', unrecognized=1, unrecognized2=1) def test_call_command_with_required_parameters_in_options(self): out = StringIO() management.call_command('required_option', need_me='foo', needme2='bar', stdout=out) self.assertIn('need_me', out.getvalue()) self.assertIn('needme2', out.getvalue()) def test_call_command_with_required_parameters_in_mixed_options(self): out = StringIO() management.call_command('required_option', '--need-me=foo', needme2='bar', stdout=out) self.assertIn('need_me', out.getvalue()) self.assertIn('needme2', out.getvalue()) def test_command_add_arguments_after_common_arguments(self): out = StringIO() management.call_command('common_args', stdout=out) self.assertIn('Detected that --version already exists', out.getvalue()) def test_mutually_exclusive_group_required_options(self): out = StringIO() management.call_command('mutually_exclusive_required', foo_id=1, stdout=out) self.assertIn('foo_id', out.getvalue()) management.call_command('mutually_exclusive_required', foo_name='foo', stdout=out) self.assertIn('foo_name', out.getvalue()) msg = ( 'Error: one of the arguments --foo-id --foo-name --append_const ' '--const --count --flag_false --flag_true is required' ) with self.assertRaisesMessage(CommandError, msg): management.call_command('mutually_exclusive_required', stdout=out) def test_mutually_exclusive_group_required_const_options(self): tests = [ ('append_const', [42]), ('const', 31), ('count', 1), ('flag_false', False), ('flag_true', True), ] for arg, value in tests: out = StringIO() expected_output = '%s=%s' % (arg, value) with self.subTest(arg=arg): management.call_command( 'mutually_exclusive_required', '--%s' % arg, stdout=out, ) self.assertIn(expected_output, out.getvalue()) out.truncate(0) management.call_command( 'mutually_exclusive_required', **{arg: value, 'stdout': out}, ) self.assertIn(expected_output, out.getvalue()) def test_required_const_options(self): args = { 'append_const': [42], 'const': 31, 'count': 1, 'flag_false': False, 'flag_true': True, } expected_output = '\n'.join( '%s=%s' % (arg, value) for arg, value in args.items() ) out = StringIO() management.call_command( 'required_constant_option', '--append_const', '--const', '--count', '--flag_false', '--flag_true', stdout=out, ) self.assertIn(expected_output, out.getvalue()) out.truncate(0) management.call_command('required_constant_option', **{**args, 'stdout': out}) self.assertIn(expected_output, out.getvalue()) def test_subparser(self): out = StringIO() management.call_command('subparser', 'foo', 12, stdout=out) self.assertIn('bar', out.getvalue()) def test_subparser_dest_args(self): out = StringIO() management.call_command('subparser_dest', 'foo', bar=12, stdout=out) self.assertIn('bar', out.getvalue()) def test_subparser_dest_required_args(self): out = StringIO() management.call_command('subparser_required', 'foo_1', 'foo_2', bar=12, stdout=out) self.assertIn('bar', out.getvalue()) def test_subparser_invalid_option(self): msg = "Error: invalid choice: 'test' (choose from 'foo')" with self.assertRaisesMessage(CommandError, msg): management.call_command('subparser', 'test', 12) if PY37: # "required" option requires Python 3.7 and later. msg = 'Error: the following arguments are required: subcommand' with self.assertRaisesMessage(CommandError, msg): management.call_command('subparser_dest', subcommand='foo', bar=12) else: msg = ( 'Unknown option(s) for subparser_dest command: subcommand. ' 'Valid options are: bar, force_color, help, no_color, ' 'pythonpath, settings, skip_checks, stderr, stdout, ' 'traceback, verbosity, version.' ) with self.assertRaisesMessage(TypeError, msg): management.call_command('subparser_dest', subcommand='foo', bar=12) def test_create_parser_kwargs(self): """BaseCommand.create_parser() passes kwargs to CommandParser.""" epilog = 'some epilog text' parser = BaseCommand().create_parser('prog_name', 'subcommand', epilog=epilog) self.assertEqual(parser.epilog, epilog) def test_outputwrapper_flush(self): out = StringIO() with mock.patch.object(out, 'flush') as mocked_flush: management.call_command('outputwrapper', stdout=out) self.assertIn('Working...', out.getvalue()) self.assertIs(mocked_flush.called, True) class CommandRunTests(AdminScriptTestCase): """ Tests that need to run by simulating the command line, not by call_command. """ def test_script_prefix_set_in_commands(self): self.write_settings('settings.py', apps=['user_commands'], sdict={ 'ROOT_URLCONF': '"user_commands.urls"', 'FORCE_SCRIPT_NAME': '"/PREFIX/"', }) out, err = self.run_manage(['reverse_url']) self.assertNoOutput(err) self.assertEqual(out.strip(), '/PREFIX/some/url/') def test_disallowed_abbreviated_options(self): """ To avoid conflicts with custom options, commands don't allow abbreviated forms of the --setting and --pythonpath options. """ self.write_settings('settings.py', apps=['user_commands']) out, err = self.run_manage(['set_option', '--set', 'foo']) self.assertNoOutput(err) self.assertEqual(out.strip(), 'Set foo') def test_skip_checks(self): self.write_settings('settings.py', apps=['django.contrib.staticfiles', 'user_commands'], sdict={ # (staticfiles.E001) The STATICFILES_DIRS setting is not a tuple or # list. 'STATICFILES_DIRS': '"foo"', }) out, err = self.run_manage(['set_option', '--skip-checks', '--set', 'foo']) self.assertNoOutput(err) self.assertEqual(out.strip(), 'Set foo') class UtilsTests(SimpleTestCase): def test_no_existent_external_program(self): msg = 'Error executing a_42_command_that_doesnt_exist_42' with self.assertRaisesMessage(CommandError, msg): popen_wrapper(['a_42_command_that_doesnt_exist_42']) def test_get_random_secret_key(self): key = get_random_secret_key() self.assertEqual(len(key), 50) for char in key: self.assertIn(char, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') def test_is_ignored_path_true(self): patterns = ( ['foo/bar/baz'], ['baz'], ['foo/bar/baz'], ['*/baz'], ['*'], ['b?z'], ['[abc]az'], ['*/ba[!z]/baz'], ) for ignore_patterns in patterns: with self.subTest(ignore_patterns=ignore_patterns): self.assertIs(is_ignored_path('foo/bar/baz', ignore_patterns=ignore_patterns), True) def test_is_ignored_path_false(self): self.assertIs(is_ignored_path('foo/bar/baz', ignore_patterns=['foo/bar/bat', 'bar', 'flub/blub']), False) def test_normalize_path_patterns_truncates_wildcard_base(self): expected = [os.path.normcase(p) for p in ['foo/bar', 'bar/*/']] self.assertEqual(normalize_path_patterns(['foo/bar/*', 'bar/*/']), expected) class DeprecationTests(SimpleTestCase): def test_requires_system_checks_warning(self): class Command(BaseCommand): pass msg = ( "Using a boolean value for requires_system_checks is deprecated. " "Use '__all__' instead of True, and [] (an empty list) instead of " "False." ) for value in [False, True]: Command.requires_system_checks = value with self.assertRaisesMessage(RemovedInDjango41Warning, msg): Command() @ignore_warnings(category=RemovedInDjango41Warning) def test_requires_system_checks_true(self): class Command(BaseCommand): requires_system_checks = True def handle(self, *args, **options): pass command = Command() with mock.patch('django.core.management.base.BaseCommand.check') as mocked_check: management.call_command(command, skip_checks=False) mocked_check.assert_called_once_with() @ignore_warnings(category=RemovedInDjango41Warning) def test_requires_system_checks_false(self): class Command(BaseCommand): requires_system_checks = False def handle(self, *args, **options): pass command = Command() with mock.patch('django.core.management.base.BaseCommand.check') as mocked_check: management.call_command(command) self.assertIs(mocked_check.called, False)
77119ea8912468cc906af0e0a910d4b64fe0e5851149013100fdb59b63c158c9
from django.contrib.auth.models import User from django.test import override_settings from .tests import AdminDocsTestCase, TestDataMixin class XViewMiddlewareTest(TestDataMixin, AdminDocsTestCase): def test_xview_func(self): user = User.objects.get(username='super') response = self.client.head('/xview/func/') self.assertNotIn('X-View', response) self.client.force_login(self.superuser) response = self.client.head('/xview/func/') self.assertIn('X-View', response) self.assertEqual(response.headers['X-View'], 'admin_docs.views.xview') user.is_staff = False user.save() response = self.client.head('/xview/func/') self.assertNotIn('X-View', response) user.is_staff = True user.is_active = False user.save() response = self.client.head('/xview/func/') self.assertNotIn('X-View', response) def test_xview_class(self): user = User.objects.get(username='super') response = self.client.head('/xview/class/') self.assertNotIn('X-View', response) self.client.force_login(self.superuser) response = self.client.head('/xview/class/') self.assertIn('X-View', response) self.assertEqual(response.headers['X-View'], 'admin_docs.views.XViewClass') user.is_staff = False user.save() response = self.client.head('/xview/class/') self.assertNotIn('X-View', response) user.is_staff = True user.is_active = False user.save() response = self.client.head('/xview/class/') self.assertNotIn('X-View', response) def test_callable_object_view(self): self.client.force_login(self.superuser) response = self.client.head('/xview/callable_object/') self.assertEqual(response.headers['X-View'], 'admin_docs.views.XViewCallableObject') @override_settings(MIDDLEWARE=[]) def test_no_auth_middleware(self): msg = ( "The XView middleware requires authentication middleware to be " "installed. Edit your MIDDLEWARE setting to insert " "'django.contrib.auth.middleware.AuthenticationMiddleware'." ) with self.assertRaisesMessage(AssertionError, msg): self.client.head('/xview/func/')
878c89f0ddbcd15232ef9494afc9c93a183fb528c8edcf4dbcaab005c68d030b
import errno import gzip import os import struct import tempfile import unittest from io import BytesIO, StringIO, TextIOWrapper from pathlib import Path from unittest import mock from django.core.files import File, locks from django.core.files.base import ContentFile from django.core.files.move import file_move_safe from django.core.files.temp import NamedTemporaryFile from django.core.files.uploadedfile import ( InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile, UploadedFile, ) from django.test import override_settings try: from PIL import Image, features HAS_WEBP = features.check('webp') except ImportError: Image = None HAS_WEBP = False else: from django.core.files import images class FileTests(unittest.TestCase): def test_unicode_uploadedfile_name(self): uf = UploadedFile(name='¿Cómo?', content_type='text') self.assertIs(type(repr(uf)), str) def test_unicode_file_name(self): f = File(None, 'djángö') self.assertIs(type(repr(f)), str) def test_context_manager(self): orig_file = tempfile.TemporaryFile() base_file = File(orig_file) with base_file as f: self.assertIs(base_file, f) self.assertFalse(f.closed) self.assertTrue(f.closed) self.assertTrue(orig_file.closed) def test_open_resets_opened_file_to_start_and_returns_context_manager(self): file = File(BytesIO(b'content')) file.read() with file.open() as f: self.assertEqual(f.read(), b'content') def test_open_reopens_closed_file_and_returns_context_manager(self): temporary_file = tempfile.NamedTemporaryFile(delete=False) file = File(temporary_file) try: file.close() with file.open() as f: self.assertFalse(f.closed) finally: # remove temporary file os.unlink(file.name) def test_namedtemporaryfile_closes(self): """ The symbol django.core.files.NamedTemporaryFile is assigned as a different class on different operating systems. In any case, the result should minimally mock some of the API of tempfile.NamedTemporaryFile from the Python standard library. """ tempfile = NamedTemporaryFile() self.assertTrue(hasattr(tempfile, "closed")) self.assertFalse(tempfile.closed) tempfile.close() self.assertTrue(tempfile.closed) def test_file_mode(self): # Should not set mode to None if it is not present. # See #14681, stdlib gzip module crashes if mode is set to None file = SimpleUploadedFile("mode_test.txt", b"content") self.assertFalse(hasattr(file, 'mode')) gzip.GzipFile(fileobj=file) def test_file_iteration(self): """ File objects should yield lines when iterated over. Refs #22107. """ file = File(BytesIO(b'one\ntwo\nthree')) self.assertEqual(list(file), [b'one\n', b'two\n', b'three']) def test_file_iteration_windows_newlines(self): """ #8149 - File objects with \r\n line endings should yield lines when iterated over. """ f = File(BytesIO(b'one\r\ntwo\r\nthree')) self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three']) def test_file_iteration_mac_newlines(self): """ #8149 - File objects with \r line endings should yield lines when iterated over. """ f = File(BytesIO(b'one\rtwo\rthree')) self.assertEqual(list(f), [b'one\r', b'two\r', b'three']) def test_file_iteration_mixed_newlines(self): f = File(BytesIO(b'one\rtwo\nthree\r\nfour')) self.assertEqual(list(f), [b'one\r', b'two\n', b'three\r\n', b'four']) def test_file_iteration_with_unix_newline_at_chunk_boundary(self): f = File(BytesIO(b'one\ntwo\nthree')) # Set chunk size to create a boundary after \n: # b'one\n... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b'one\n', b'two\n', b'three']) def test_file_iteration_with_windows_newline_at_chunk_boundary(self): f = File(BytesIO(b'one\r\ntwo\r\nthree')) # Set chunk size to create a boundary between \r and \n: # b'one\r\n... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three']) def test_file_iteration_with_mac_newline_at_chunk_boundary(self): f = File(BytesIO(b'one\rtwo\rthree')) # Set chunk size to create a boundary after \r: # b'one\r... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b'one\r', b'two\r', b'three']) def test_file_iteration_with_text(self): f = File(StringIO('one\ntwo\nthree')) self.assertEqual(list(f), ['one\n', 'two\n', 'three']) def test_readable(self): with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file: self.assertTrue(test_file.readable()) self.assertFalse(test_file.readable()) def test_writable(self): with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file: self.assertTrue(test_file.writable()) self.assertFalse(test_file.writable()) with tempfile.TemporaryFile('rb') as temp, File(temp, name='something.txt') as test_file: self.assertFalse(test_file.writable()) def test_seekable(self): with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file: self.assertTrue(test_file.seekable()) self.assertFalse(test_file.seekable()) def test_io_wrapper(self): content = "vive l'été\n" with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file: test_file.write(content.encode()) test_file.seek(0) wrapper = TextIOWrapper(test_file, 'utf-8', newline='\n') self.assertEqual(wrapper.read(), content) wrapper.write(content) wrapper.seek(0) self.assertEqual(wrapper.read(), content * 2) test_file = wrapper.detach() test_file.seek(0) self.assertEqual(test_file.read(), (content * 2).encode()) def test_exclusive_lock(self): file_path = Path(__file__).parent / 'test.png' with open(file_path) as f1, open(file_path) as f2: self.assertIs(locks.lock(f1, locks.LOCK_EX), True) self.assertIs(locks.lock(f2, locks.LOCK_EX | locks.LOCK_NB), False) self.assertIs(locks.lock(f2, locks.LOCK_SH | locks.LOCK_NB), False) self.assertIs(locks.unlock(f1), True) def test_shared_lock(self): file_path = Path(__file__).parent / 'test.png' with open(file_path) as f1, open(file_path) as f2: self.assertIs(locks.lock(f1, locks.LOCK_SH), True) self.assertIs(locks.lock(f2, locks.LOCK_SH | locks.LOCK_NB), True) self.assertIs(locks.unlock(f1), True) self.assertIs(locks.unlock(f2), True) class NoNameFileTestCase(unittest.TestCase): """ Other examples of unnamed files may be tempfile.SpooledTemporaryFile or urllib.urlopen() """ def test_noname_file_default_name(self): self.assertIsNone(File(BytesIO(b'A file with no name')).name) def test_noname_file_get_size(self): self.assertEqual(File(BytesIO(b'A file with no name')).size, 19) class ContentFileTestCase(unittest.TestCase): def test_content_file_default_name(self): self.assertIsNone(ContentFile(b"content").name) def test_content_file_custom_name(self): """ The constructor of ContentFile accepts 'name' (#16590). """ name = "I can have a name too!" self.assertEqual(ContentFile(b"content", name=name).name, name) def test_content_file_input_type(self): """ ContentFile can accept both bytes and strings and the retrieved content is of the same type. """ self.assertIsInstance(ContentFile(b"content").read(), bytes) self.assertIsInstance(ContentFile("español").read(), str) def test_open_resets_file_to_start_and_returns_context_manager(self): file = ContentFile(b'content') with file.open() as f: self.assertEqual(f.read(), b'content') with file.open() as f: self.assertEqual(f.read(), b'content') def test_size_changing_after_writing(self): """ContentFile.size changes after a write().""" f = ContentFile('') self.assertEqual(f.size, 0) f.write('Test ') f.write('string') self.assertEqual(f.size, 11) with f.open() as fh: self.assertEqual(fh.read(), 'Test string') class InMemoryUploadedFileTests(unittest.TestCase): def test_open_resets_file_to_start_and_returns_context_manager(self): uf = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8') uf.read() with uf.open() as f: self.assertEqual(f.read(), '1') class TemporaryUploadedFileTests(unittest.TestCase): def test_extension_kept(self): """The temporary file name has the same suffix as the original file.""" with TemporaryUploadedFile('test.txt', 'text/plain', 1, 'utf8') as temp_file: self.assertTrue(temp_file.file.name.endswith('.upload.txt')) def test_file_upload_temp_dir_pathlib(self): with tempfile.TemporaryDirectory() as tmp_dir: with override_settings(FILE_UPLOAD_TEMP_DIR=Path(tmp_dir)): with TemporaryUploadedFile('test.txt', 'text/plain', 1, 'utf-8') as temp_file: self.assertTrue(os.path.exists(temp_file.file.name)) class DimensionClosingBug(unittest.TestCase): """ get_image_dimensions() properly closes files (#8817) """ @unittest.skipUnless(Image, "Pillow not installed") def test_not_closing_of_files(self): """ Open files passed into get_image_dimensions() should stay opened. """ empty_io = BytesIO() try: images.get_image_dimensions(empty_io) finally: self.assertTrue(not empty_io.closed) @unittest.skipUnless(Image, "Pillow not installed") def test_closing_of_filenames(self): """ get_image_dimensions() called with a filename should closed the file. """ # We need to inject a modified open() builtin into the images module # that checks if the file was closed properly if the function is # called with a filename instead of a file object. # get_image_dimensions will call our catching_open instead of the # regular builtin one. class FileWrapper: _closed = [] def __init__(self, f): self.f = f def __getattr__(self, name): return getattr(self.f, name) def close(self): self._closed.append(True) self.f.close() def catching_open(*args): return FileWrapper(open(*args)) images.open = catching_open try: images.get_image_dimensions(os.path.join(os.path.dirname(__file__), "test1.png")) finally: del images.open self.assertTrue(FileWrapper._closed) class InconsistentGetImageDimensionsBug(unittest.TestCase): """ get_image_dimensions() works properly after various calls using a file handler (#11158) """ @unittest.skipUnless(Image, "Pillow not installed") def test_multiple_calls(self): """ Multiple calls of get_image_dimensions() should return the same size. """ img_path = os.path.join(os.path.dirname(__file__), "test.png") with open(img_path, 'rb') as fh: image = images.ImageFile(fh) image_pil = Image.open(fh) size_1 = images.get_image_dimensions(image) size_2 = images.get_image_dimensions(image) self.assertEqual(image_pil.size, size_1) self.assertEqual(size_1, size_2) @unittest.skipUnless(Image, "Pillow not installed") def test_bug_19457(self): """ Regression test for #19457 get_image_dimensions fails on some pngs, while Image.size is working good on them """ img_path = os.path.join(os.path.dirname(__file__), "magic.png") size = images.get_image_dimensions(img_path) with open(img_path, 'rb') as fh: self.assertEqual(size, Image.open(fh).size) @unittest.skipUnless(Image, "Pillow not installed") class GetImageDimensionsTests(unittest.TestCase): def test_invalid_image(self): """ get_image_dimensions() should return (None, None) for the dimensions of invalid images (#24441). brokenimg.png is not a valid image and it has been generated by: $ echo "123" > brokenimg.png """ img_path = os.path.join(os.path.dirname(__file__), "brokenimg.png") with open(img_path, 'rb') as fh: size = images.get_image_dimensions(fh) self.assertEqual(size, (None, None)) def test_valid_image(self): """ get_image_dimensions() should catch struct.error while feeding the PIL Image parser (#24544). Emulates the Parser feed error. Since the error is raised on every feed attempt, the resulting image size should be invalid: (None, None). """ img_path = os.path.join(os.path.dirname(__file__), "test.png") with mock.patch('PIL.ImageFile.Parser.feed', side_effect=struct.error): with open(img_path, 'rb') as fh: size = images.get_image_dimensions(fh) self.assertEqual(size, (None, None)) @unittest.skipUnless(HAS_WEBP, 'WEBP not installed') def test_webp(self): img_path = os.path.join(os.path.dirname(__file__), 'test.webp') with open(img_path, 'rb') as fh: size = images.get_image_dimensions(fh) self.assertEqual(size, (540, 405)) class FileMoveSafeTests(unittest.TestCase): def test_file_move_overwrite(self): handle_a, self.file_a = tempfile.mkstemp() handle_b, self.file_b = tempfile.mkstemp() # file_move_safe() raises OSError if the destination file exists and # allow_overwrite is False. with self.assertRaises(FileExistsError): file_move_safe(self.file_a, self.file_b, allow_overwrite=False) # should allow it and continue on if allow_overwrite is True self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True)) os.close(handle_a) os.close(handle_b) def test_file_move_copystat_cifs(self): """ file_move_safe() ignores a copystat() EPERM PermissionError. This happens when the destination filesystem is CIFS, for example. """ copystat_EACCES_error = PermissionError(errno.EACCES, 'msg') copystat_EPERM_error = PermissionError(errno.EPERM, 'msg') handle_a, self.file_a = tempfile.mkstemp() handle_b, self.file_b = tempfile.mkstemp() try: # This exception is required to reach the copystat() call in # file_safe_move(). with mock.patch('django.core.files.move.os.rename', side_effect=OSError()): # An error besides EPERM isn't ignored. with mock.patch('django.core.files.move.copystat', side_effect=copystat_EACCES_error): with self.assertRaises(PermissionError): file_move_safe(self.file_a, self.file_b, allow_overwrite=True) # EPERM is ignored. with mock.patch('django.core.files.move.copystat', side_effect=copystat_EPERM_error): self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True)) finally: os.close(handle_a) os.close(handle_b) class SpooledTempTests(unittest.TestCase): def test_in_memory_spooled_temp(self): with tempfile.SpooledTemporaryFile() as temp: temp.write(b"foo bar baz quux\n") django_file = File(temp, name="something.txt") self.assertEqual(django_file.size, 17) def test_written_spooled_temp(self): with tempfile.SpooledTemporaryFile(max_size=4) as temp: temp.write(b"foo bar baz quux\n") django_file = File(temp, name="something.txt") self.assertEqual(django_file.size, 17)
28ffcd570fbbfe3106bde76cea6ca91e7fc8d5bdffa08adb4813dbc855a2bb5c
import json from django.db.models import CharField, F, OuterRef, Q, Subquery, Value from django.db.models.fields.json import KeyTextTransform, KeyTransform from django.db.models.functions import Cast, Concat, Substr from django.test.utils import Approximate from . import PostgreSQLTestCase from .models import AggregateTestModel, StatTestModel try: from django.contrib.postgres.aggregates import ( ArrayAgg, BitAnd, BitOr, BoolAnd, BoolOr, Corr, CovarPop, JSONBAgg, RegrAvgX, RegrAvgY, RegrCount, RegrIntercept, RegrR2, RegrSlope, RegrSXX, RegrSXY, RegrSYY, StatAggregate, StringAgg, ) except ImportError: pass # psycopg2 is not installed class TestGeneralAggregate(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.aggs = AggregateTestModel.objects.bulk_create([ AggregateTestModel(boolean_field=True, char_field='Foo1', integer_field=0), AggregateTestModel( boolean_field=False, char_field='Foo2', integer_field=1, json_field={'lang': 'pl'}, ), AggregateTestModel( boolean_field=False, char_field='Foo4', integer_field=2, json_field={'lang': 'en'}, ), AggregateTestModel( boolean_field=True, char_field='Foo3', integer_field=0, json_field={'breed': 'collie'}, ), ]) def test_array_agg_charfield(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field')) self.assertEqual(values, {'arrayagg': ['Foo1', 'Foo2', 'Foo4', 'Foo3']}) def test_array_agg_charfield_ordering(self): ordering_test_cases = ( (F('char_field').desc(), ['Foo4', 'Foo3', 'Foo2', 'Foo1']), (F('char_field').asc(), ['Foo1', 'Foo2', 'Foo3', 'Foo4']), (F('char_field'), ['Foo1', 'Foo2', 'Foo3', 'Foo4']), ([F('boolean_field'), F('char_field').desc()], ['Foo4', 'Foo2', 'Foo3', 'Foo1']), ((F('boolean_field'), F('char_field').desc()), ['Foo4', 'Foo2', 'Foo3', 'Foo1']), ('char_field', ['Foo1', 'Foo2', 'Foo3', 'Foo4']), ('-char_field', ['Foo4', 'Foo3', 'Foo2', 'Foo1']), (Concat('char_field', Value('@')), ['Foo1', 'Foo2', 'Foo3', 'Foo4']), (Concat('char_field', Value('@')).desc(), ['Foo4', 'Foo3', 'Foo2', 'Foo1']), ( (Substr('char_field', 1, 1), F('integer_field'), Substr('char_field', 4, 1).desc()), ['Foo3', 'Foo1', 'Foo2', 'Foo4'], ), ) for ordering, expected_output in ordering_test_cases: with self.subTest(ordering=ordering, expected_output=expected_output): values = AggregateTestModel.objects.aggregate( arrayagg=ArrayAgg('char_field', ordering=ordering) ) self.assertEqual(values, {'arrayagg': expected_output}) def test_array_agg_integerfield(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field')) self.assertEqual(values, {'arrayagg': [0, 1, 2, 0]}) def test_array_agg_integerfield_ordering(self): values = AggregateTestModel.objects.aggregate( arrayagg=ArrayAgg('integer_field', ordering=F('integer_field').desc()) ) self.assertEqual(values, {'arrayagg': [2, 1, 0, 0]}) def test_array_agg_booleanfield(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field')) self.assertEqual(values, {'arrayagg': [True, False, False, True]}) def test_array_agg_booleanfield_ordering(self): ordering_test_cases = ( (F('boolean_field').asc(), [False, False, True, True]), (F('boolean_field').desc(), [True, True, False, False]), (F('boolean_field'), [False, False, True, True]), ) for ordering, expected_output in ordering_test_cases: with self.subTest(ordering=ordering, expected_output=expected_output): values = AggregateTestModel.objects.aggregate( arrayagg=ArrayAgg('boolean_field', ordering=ordering) ) self.assertEqual(values, {'arrayagg': expected_output}) def test_array_agg_jsonfield(self): values = AggregateTestModel.objects.aggregate( arrayagg=ArrayAgg( KeyTransform('lang', 'json_field'), filter=Q(json_field__lang__isnull=False), ), ) self.assertEqual(values, {'arrayagg': ['pl', 'en']}) def test_array_agg_jsonfield_ordering(self): values = AggregateTestModel.objects.aggregate( arrayagg=ArrayAgg( KeyTransform('lang', 'json_field'), filter=Q(json_field__lang__isnull=False), ordering=KeyTransform('lang', 'json_field'), ), ) self.assertEqual(values, {'arrayagg': ['en', 'pl']}) def test_array_agg_filter(self): values = AggregateTestModel.objects.aggregate( arrayagg=ArrayAgg('integer_field', filter=Q(integer_field__gt=0)), ) self.assertEqual(values, {'arrayagg': [1, 2]}) def test_array_agg_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field')) self.assertEqual(values, {'arrayagg': []}) values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field')) self.assertEqual(values, {'arrayagg': []}) values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field')) self.assertEqual(values, {'arrayagg': []}) def test_array_agg_lookups(self): aggr1 = AggregateTestModel.objects.create() aggr2 = AggregateTestModel.objects.create() StatTestModel.objects.create(related_field=aggr1, int1=1, int2=0) StatTestModel.objects.create(related_field=aggr1, int1=2, int2=0) StatTestModel.objects.create(related_field=aggr2, int1=3, int2=0) StatTestModel.objects.create(related_field=aggr2, int1=4, int2=0) qs = StatTestModel.objects.values('related_field').annotate( array=ArrayAgg('int1') ).filter(array__overlap=[2]).values_list('array', flat=True) self.assertCountEqual(qs.get(), [1, 2]) def test_bit_and_general(self): values = AggregateTestModel.objects.filter( integer_field__in=[0, 1]).aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': 0}) def test_bit_and_on_only_true_values(self): values = AggregateTestModel.objects.filter( integer_field=1).aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': 1}) def test_bit_and_on_only_false_values(self): values = AggregateTestModel.objects.filter( integer_field=0).aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': 0}) def test_bit_and_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': None}) def test_bit_or_general(self): values = AggregateTestModel.objects.filter( integer_field__in=[0, 1]).aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': 1}) def test_bit_or_on_only_true_values(self): values = AggregateTestModel.objects.filter( integer_field=1).aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': 1}) def test_bit_or_on_only_false_values(self): values = AggregateTestModel.objects.filter( integer_field=0).aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': 0}) def test_bit_or_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': None}) def test_bool_and_general(self): values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field')) self.assertEqual(values, {'booland': False}) def test_bool_and_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field')) self.assertEqual(values, {'booland': None}) def test_bool_and_q_object(self): values = AggregateTestModel.objects.aggregate( booland=BoolAnd(Q(integer_field__gt=2)), ) self.assertEqual(values, {'booland': False}) def test_bool_or_general(self): values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field')) self.assertEqual(values, {'boolor': True}) def test_bool_or_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field')) self.assertEqual(values, {'boolor': None}) def test_bool_or_q_object(self): values = AggregateTestModel.objects.aggregate( boolor=BoolOr(Q(integer_field__gt=2)), ) self.assertEqual(values, {'boolor': False}) def test_string_agg_requires_delimiter(self): with self.assertRaises(TypeError): AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field')) def test_string_agg_delimiter_escaping(self): values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter="'")) self.assertEqual(values, {'stringagg': "Foo1'Foo2'Foo4'Foo3"}) def test_string_agg_charfield(self): values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';')) self.assertEqual(values, {'stringagg': 'Foo1;Foo2;Foo4;Foo3'}) def test_string_agg_charfield_ordering(self): ordering_test_cases = ( (F('char_field').desc(), 'Foo4;Foo3;Foo2;Foo1'), (F('char_field').asc(), 'Foo1;Foo2;Foo3;Foo4'), (F('char_field'), 'Foo1;Foo2;Foo3;Foo4'), ('char_field', 'Foo1;Foo2;Foo3;Foo4'), ('-char_field', 'Foo4;Foo3;Foo2;Foo1'), (Concat('char_field', Value('@')), 'Foo1;Foo2;Foo3;Foo4'), (Concat('char_field', Value('@')).desc(), 'Foo4;Foo3;Foo2;Foo1'), ) for ordering, expected_output in ordering_test_cases: with self.subTest(ordering=ordering, expected_output=expected_output): values = AggregateTestModel.objects.aggregate( stringagg=StringAgg('char_field', delimiter=';', ordering=ordering) ) self.assertEqual(values, {'stringagg': expected_output}) def test_string_agg_jsonfield_ordering(self): values = AggregateTestModel.objects.aggregate( stringagg=StringAgg( KeyTextTransform('lang', 'json_field'), delimiter=';', ordering=KeyTextTransform('lang', 'json_field'), output_field=CharField(), ), ) self.assertEqual(values, {'stringagg': 'en;pl'}) def test_string_agg_filter(self): values = AggregateTestModel.objects.aggregate( stringagg=StringAgg( 'char_field', delimiter=';', filter=Q(char_field__endswith='3') | Q(char_field__endswith='1'), ) ) self.assertEqual(values, {'stringagg': 'Foo1;Foo3'}) def test_string_agg_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';')) self.assertEqual(values, {'stringagg': ''}) def test_orderable_agg_alternative_fields(self): values = AggregateTestModel.objects.aggregate( arrayagg=ArrayAgg('integer_field', ordering=F('char_field').asc()) ) self.assertEqual(values, {'arrayagg': [0, 1, 0, 2]}) def test_json_agg(self): values = AggregateTestModel.objects.aggregate(jsonagg=JSONBAgg('char_field')) self.assertEqual(values, {'jsonagg': ['Foo1', 'Foo2', 'Foo4', 'Foo3']}) def test_json_agg_empty(self): values = AggregateTestModel.objects.none().aggregate(jsonagg=JSONBAgg('integer_field')) self.assertEqual(values, json.loads('{"jsonagg": []}')) def test_json_agg_charfield_ordering(self): ordering_test_cases = ( (F('char_field').desc(), ['Foo4', 'Foo3', 'Foo2', 'Foo1']), (F('char_field').asc(), ['Foo1', 'Foo2', 'Foo3', 'Foo4']), (F('char_field'), ['Foo1', 'Foo2', 'Foo3', 'Foo4']), ('char_field', ['Foo1', 'Foo2', 'Foo3', 'Foo4']), ('-char_field', ['Foo4', 'Foo3', 'Foo2', 'Foo1']), (Concat('char_field', Value('@')), ['Foo1', 'Foo2', 'Foo3', 'Foo4']), (Concat('char_field', Value('@')).desc(), ['Foo4', 'Foo3', 'Foo2', 'Foo1']), ) for ordering, expected_output in ordering_test_cases: with self.subTest(ordering=ordering, expected_output=expected_output): values = AggregateTestModel.objects.aggregate( jsonagg=JSONBAgg('char_field', ordering=ordering), ) self.assertEqual(values, {'jsonagg': expected_output}) def test_json_agg_integerfield_ordering(self): values = AggregateTestModel.objects.aggregate( jsonagg=JSONBAgg('integer_field', ordering=F('integer_field').desc()), ) self.assertEqual(values, {'jsonagg': [2, 1, 0, 0]}) def test_json_agg_booleanfield_ordering(self): ordering_test_cases = ( (F('boolean_field').asc(), [False, False, True, True]), (F('boolean_field').desc(), [True, True, False, False]), (F('boolean_field'), [False, False, True, True]), ) for ordering, expected_output in ordering_test_cases: with self.subTest(ordering=ordering, expected_output=expected_output): values = AggregateTestModel.objects.aggregate( jsonagg=JSONBAgg('boolean_field', ordering=ordering), ) self.assertEqual(values, {'jsonagg': expected_output}) def test_json_agg_jsonfield_ordering(self): values = AggregateTestModel.objects.aggregate( jsonagg=JSONBAgg( KeyTransform('lang', 'json_field'), filter=Q(json_field__lang__isnull=False), ordering=KeyTransform('lang', 'json_field'), ), ) self.assertEqual(values, {'jsonagg': ['en', 'pl']}) def test_string_agg_array_agg_ordering_in_subquery(self): stats = [] for i, agg in enumerate(AggregateTestModel.objects.order_by('char_field')): stats.append(StatTestModel(related_field=agg, int1=i, int2=i + 1)) stats.append(StatTestModel(related_field=agg, int1=i + 1, int2=i)) StatTestModel.objects.bulk_create(stats) for aggregate, expected_result in ( ( ArrayAgg('stattestmodel__int1', ordering='-stattestmodel__int2'), [('Foo1', [0, 1]), ('Foo2', [1, 2]), ('Foo3', [2, 3]), ('Foo4', [3, 4])], ), ( StringAgg( Cast('stattestmodel__int1', CharField()), delimiter=';', ordering='-stattestmodel__int2', ), [('Foo1', '0;1'), ('Foo2', '1;2'), ('Foo3', '2;3'), ('Foo4', '3;4')], ), ): with self.subTest(aggregate=aggregate.__class__.__name__): subquery = AggregateTestModel.objects.filter( pk=OuterRef('pk'), ).annotate(agg=aggregate).values('agg') values = AggregateTestModel.objects.annotate( agg=Subquery(subquery), ).order_by('char_field').values_list('char_field', 'agg') self.assertEqual(list(values), expected_result) def test_string_agg_array_agg_filter_in_subquery(self): StatTestModel.objects.bulk_create([ StatTestModel(related_field=self.aggs[0], int1=0, int2=5), StatTestModel(related_field=self.aggs[0], int1=1, int2=4), StatTestModel(related_field=self.aggs[0], int1=2, int2=3), ]) for aggregate, expected_result in ( ( ArrayAgg('stattestmodel__int1', filter=Q(stattestmodel__int2__gt=3)), [('Foo1', [0, 1]), ('Foo2', None)], ), ( StringAgg( Cast('stattestmodel__int2', CharField()), delimiter=';', filter=Q(stattestmodel__int1__lt=2), ), [('Foo1', '5;4'), ('Foo2', None)], ), ): with self.subTest(aggregate=aggregate.__class__.__name__): subquery = AggregateTestModel.objects.filter( pk=OuterRef('pk'), ).annotate(agg=aggregate).values('agg') values = AggregateTestModel.objects.annotate( agg=Subquery(subquery), ).filter( char_field__in=['Foo1', 'Foo2'], ).order_by('char_field').values_list('char_field', 'agg') self.assertEqual(list(values), expected_result) def test_string_agg_filter_in_subquery_with_exclude(self): subquery = AggregateTestModel.objects.annotate( stringagg=StringAgg( 'char_field', delimiter=';', filter=Q(char_field__endswith='1'), ) ).exclude(stringagg='').values('id') self.assertSequenceEqual( AggregateTestModel.objects.filter(id__in=Subquery(subquery)), [self.aggs[0]], ) class TestAggregateDistinct(PostgreSQLTestCase): @classmethod def setUpTestData(cls): AggregateTestModel.objects.create(char_field='Foo') AggregateTestModel.objects.create(char_field='Foo') AggregateTestModel.objects.create(char_field='Bar') def test_string_agg_distinct_false(self): values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=False)) self.assertEqual(values['stringagg'].count('Foo'), 2) self.assertEqual(values['stringagg'].count('Bar'), 1) def test_string_agg_distinct_true(self): values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=True)) self.assertEqual(values['stringagg'].count('Foo'), 1) self.assertEqual(values['stringagg'].count('Bar'), 1) def test_array_agg_distinct_false(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field', distinct=False)) self.assertEqual(sorted(values['arrayagg']), ['Bar', 'Foo', 'Foo']) def test_array_agg_distinct_true(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field', distinct=True)) self.assertEqual(sorted(values['arrayagg']), ['Bar', 'Foo']) class TestStatisticsAggregate(PostgreSQLTestCase): @classmethod def setUpTestData(cls): StatTestModel.objects.create( int1=1, int2=3, related_field=AggregateTestModel.objects.create(integer_field=0), ) StatTestModel.objects.create( int1=2, int2=2, related_field=AggregateTestModel.objects.create(integer_field=1), ) StatTestModel.objects.create( int1=3, int2=1, related_field=AggregateTestModel.objects.create(integer_field=2), ) # Tests for base class (StatAggregate) def test_missing_arguments_raises_exception(self): with self.assertRaisesMessage(ValueError, 'Both y and x must be provided.'): StatAggregate(x=None, y=None) def test_correct_source_expressions(self): func = StatAggregate(x='test', y=13) self.assertIsInstance(func.source_expressions[0], Value) self.assertIsInstance(func.source_expressions[1], F) def test_alias_is_required(self): class SomeFunc(StatAggregate): function = 'TEST' with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): StatTestModel.objects.aggregate(SomeFunc(y='int2', x='int1')) # Test aggregates def test_corr_general(self): values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1')) self.assertEqual(values, {'corr': -1.0}) def test_corr_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1')) self.assertEqual(values, {'corr': None}) def test_covar_pop_general(self): values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1')) self.assertEqual(values, {'covarpop': Approximate(-0.66, places=1)}) def test_covar_pop_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1')) self.assertEqual(values, {'covarpop': None}) def test_covar_pop_sample(self): values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True)) self.assertEqual(values, {'covarpop': -1.0}) def test_covar_pop_sample_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True)) self.assertEqual(values, {'covarpop': None}) def test_regr_avgx_general(self): values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1')) self.assertEqual(values, {'regravgx': 2.0}) def test_regr_avgx_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1')) self.assertEqual(values, {'regravgx': None}) def test_regr_avgy_general(self): values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1')) self.assertEqual(values, {'regravgy': 2.0}) def test_regr_avgy_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1')) self.assertEqual(values, {'regravgy': None}) def test_regr_count_general(self): values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1')) self.assertEqual(values, {'regrcount': 3}) def test_regr_count_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1')) self.assertEqual(values, {'regrcount': 0}) def test_regr_intercept_general(self): values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1')) self.assertEqual(values, {'regrintercept': 4}) def test_regr_intercept_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1')) self.assertEqual(values, {'regrintercept': None}) def test_regr_r2_general(self): values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1')) self.assertEqual(values, {'regrr2': 1}) def test_regr_r2_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1')) self.assertEqual(values, {'regrr2': None}) def test_regr_slope_general(self): values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1')) self.assertEqual(values, {'regrslope': -1}) def test_regr_slope_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1')) self.assertEqual(values, {'regrslope': None}) def test_regr_sxx_general(self): values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1')) self.assertEqual(values, {'regrsxx': 2.0}) def test_regr_sxx_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1')) self.assertEqual(values, {'regrsxx': None}) def test_regr_sxy_general(self): values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1')) self.assertEqual(values, {'regrsxy': -2.0}) def test_regr_sxy_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1')) self.assertEqual(values, {'regrsxy': None}) def test_regr_syy_general(self): values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1')) self.assertEqual(values, {'regrsyy': 2.0}) def test_regr_syy_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1')) self.assertEqual(values, {'regrsyy': None}) def test_regr_avgx_with_related_obj_and_number_as_argument(self): """ This is more complex test to check if JOIN on field and number as argument works as expected. """ values = StatTestModel.objects.aggregate(complex_regravgx=RegrAvgX(y=5, x='related_field__integer_field')) self.assertEqual(values, {'complex_regravgx': 1.0})
dfed9602fd143408733a613a490d3e83e5b5e40c818d72f9baaec39a00b5c365
from django.db import models from .fields import ( ArrayField, BigIntegerRangeField, CICharField, CIEmailField, CITextField, DateRangeField, DateTimeRangeField, DecimalRangeField, EnumField, HStoreField, IntegerRangeField, SearchVectorField, ) class Tag: def __init__(self, tag_id): self.tag_id = tag_id def __eq__(self, other): return isinstance(other, Tag) and self.tag_id == other.tag_id class TagField(models.SmallIntegerField): def from_db_value(self, value, expression, connection): if value is None: return value return Tag(int(value)) def to_python(self, value): if isinstance(value, Tag): return value if value is None: return value return Tag(int(value)) def get_prep_value(self, value): return value.tag_id class PostgreSQLModel(models.Model): class Meta: abstract = True required_db_vendor = 'postgresql' class IntegerArrayModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=list, blank=True) class NullableIntegerArrayModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), blank=True, null=True) field_nested = ArrayField(ArrayField(models.IntegerField(null=True)), null=True) class CharArrayModel(PostgreSQLModel): field = ArrayField(models.CharField(max_length=10)) class DateTimeArrayModel(PostgreSQLModel): datetimes = ArrayField(models.DateTimeField()) dates = ArrayField(models.DateField()) times = ArrayField(models.TimeField()) class NestedIntegerArrayModel(PostgreSQLModel): field = ArrayField(ArrayField(models.IntegerField())) class OtherTypesArrayModel(PostgreSQLModel): ips = ArrayField(models.GenericIPAddressField(), default=list) uuids = ArrayField(models.UUIDField(), default=list) decimals = ArrayField(models.DecimalField(max_digits=5, decimal_places=2), default=list) tags = ArrayField(TagField(), blank=True, null=True) json = ArrayField(models.JSONField(default=dict), default=list) int_ranges = ArrayField(IntegerRangeField(), blank=True, null=True) bigint_ranges = ArrayField(BigIntegerRangeField(), blank=True, null=True) class HStoreModel(PostgreSQLModel): field = HStoreField(blank=True, null=True) array_field = ArrayField(HStoreField(), null=True) class ArrayEnumModel(PostgreSQLModel): array_of_enums = ArrayField(EnumField(max_length=20)) class CharFieldModel(models.Model): field = models.CharField(max_length=16) class TextFieldModel(models.Model): field = models.TextField() class SmallAutoFieldModel(models.Model): id = models.SmallAutoField(primary_key=True) class BigAutoFieldModel(models.Model): id = models.BigAutoField(primary_key=True) # Scene/Character/Line models are used to test full text search. They're # populated with content from Monty Python and the Holy Grail. class Scene(models.Model): scene = models.CharField(max_length=255) setting = models.CharField(max_length=255) class Character(models.Model): name = models.CharField(max_length=255) class CITestModel(PostgreSQLModel): name = CICharField(primary_key=True, max_length=255) email = CIEmailField() description = CITextField() array_field = ArrayField(CITextField(), null=True) class Line(PostgreSQLModel): scene = models.ForeignKey('Scene', models.CASCADE) character = models.ForeignKey('Character', models.CASCADE) dialogue = models.TextField(blank=True, null=True) dialogue_search_vector = SearchVectorField(blank=True, null=True) dialogue_config = models.CharField(max_length=100, blank=True, null=True) class LineSavedSearch(PostgreSQLModel): line = models.ForeignKey('Line', models.CASCADE) query = models.CharField(max_length=100) class RangesModel(PostgreSQLModel): ints = IntegerRangeField(blank=True, null=True) bigints = BigIntegerRangeField(blank=True, null=True) decimals = DecimalRangeField(blank=True, null=True) timestamps = DateTimeRangeField(blank=True, null=True) timestamps_inner = DateTimeRangeField(blank=True, null=True) dates = DateRangeField(blank=True, null=True) dates_inner = DateRangeField(blank=True, null=True) class RangeLookupsModel(PostgreSQLModel): parent = models.ForeignKey(RangesModel, models.SET_NULL, blank=True, null=True) integer = models.IntegerField(blank=True, null=True) big_integer = models.BigIntegerField(blank=True, null=True) float = models.FloatField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) date = models.DateField(blank=True, null=True) small_integer = models.SmallIntegerField(blank=True, null=True) decimal_field = models.DecimalField(max_digits=5, decimal_places=2, blank=True, null=True) class ArrayFieldSubclass(ArrayField): def __init__(self, *args, **kwargs): super().__init__(models.IntegerField()) class AggregateTestModel(PostgreSQLModel): """ To test postgres-specific general aggregation functions """ char_field = models.CharField(max_length=30, blank=True) integer_field = models.IntegerField(null=True) boolean_field = models.BooleanField(null=True) json_field = models.JSONField(null=True) class StatTestModel(PostgreSQLModel): """ To test postgres-specific aggregation functions for statistics """ int1 = models.IntegerField() int2 = models.IntegerField() related_field = models.ForeignKey(AggregateTestModel, models.SET_NULL, null=True) class NowTestModel(models.Model): when = models.DateTimeField(null=True, default=None) class UUIDTestModel(models.Model): uuid = models.UUIDField(default=None, null=True) class Room(models.Model): number = models.IntegerField(unique=True) class HotelReservation(PostgreSQLModel): room = models.ForeignKey('Room', on_delete=models.CASCADE) datespan = DateRangeField() start = models.DateTimeField() end = models.DateTimeField() cancelled = models.BooleanField(default=False) requirements = models.JSONField(blank=True, null=True)
765fc77ffb65d3ab0324fce8cb863af8a383ca75cfe44bd5cef9688b6f6979d6
import unittest from unittest import mock from migrations.test_base import OperationTestBase from django.db import NotSupportedError, connection from django.db.migrations.state import ProjectState from django.db.models import Index from django.db.utils import ProgrammingError from django.test import modify_settings, override_settings, skipUnlessDBFeature from django.test.utils import CaptureQueriesContext from . import PostgreSQLTestCase try: from django.contrib.postgres.indexes import BrinIndex, BTreeIndex from django.contrib.postgres.operations import ( AddIndexConcurrently, BloomExtension, CreateCollation, CreateExtension, RemoveCollation, RemoveIndexConcurrently, ) except ImportError: pass @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.') @modify_settings(INSTALLED_APPS={'append': 'migrations'}) class AddIndexConcurrentlyTests(OperationTestBase): app_label = 'test_add_concurrently' def test_requires_atomic_false(self): project_state = self.set_up_test_model(self.app_label) new_state = project_state.clone() operation = AddIndexConcurrently( 'Pony', Index(fields=['pink'], name='pony_pink_idx'), ) msg = ( 'The AddIndexConcurrently operation cannot be executed inside ' 'a transaction (set atomic = False on the migration).' ) with self.assertRaisesMessage(NotSupportedError, msg): with connection.schema_editor(atomic=True) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) def test_add(self): project_state = self.set_up_test_model(self.app_label, index=False) table_name = '%s_pony' % self.app_label index = Index(fields=['pink'], name='pony_pink_idx') new_state = project_state.clone() operation = AddIndexConcurrently('Pony', index) self.assertEqual( operation.describe(), 'Concurrently create index pony_pink_idx on field(s) pink of ' 'model Pony' ) operation.state_forwards(self.app_label, new_state) self.assertEqual(len(new_state.models[self.app_label, 'pony'].options['indexes']), 1) self.assertIndexNotExists(table_name, ['pink']) # Add index. with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertIndexExists(table_name, ['pink']) # Reversal. with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertIndexNotExists(table_name, ['pink']) # Deconstruction. name, args, kwargs = operation.deconstruct() self.assertEqual(name, 'AddIndexConcurrently') self.assertEqual(args, []) self.assertEqual(kwargs, {'model_name': 'Pony', 'index': index}) def test_add_other_index_type(self): project_state = self.set_up_test_model(self.app_label, index=False) table_name = '%s_pony' % self.app_label new_state = project_state.clone() operation = AddIndexConcurrently( 'Pony', BrinIndex(fields=['pink'], name='pony_pink_brin_idx'), ) self.assertIndexNotExists(table_name, ['pink']) # Add index. with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertIndexExists(table_name, ['pink'], index_type='brin') # Reversal. with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertIndexNotExists(table_name, ['pink']) def test_add_with_options(self): project_state = self.set_up_test_model(self.app_label, index=False) table_name = '%s_pony' % self.app_label new_state = project_state.clone() index = BTreeIndex(fields=['pink'], name='pony_pink_btree_idx', fillfactor=70) operation = AddIndexConcurrently('Pony', index) self.assertIndexNotExists(table_name, ['pink']) # Add index. with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertIndexExists(table_name, ['pink'], index_type='btree') # Reversal. with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertIndexNotExists(table_name, ['pink']) @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.') @modify_settings(INSTALLED_APPS={'append': 'migrations'}) class RemoveIndexConcurrentlyTests(OperationTestBase): app_label = 'test_rm_concurrently' def test_requires_atomic_false(self): project_state = self.set_up_test_model(self.app_label, index=True) new_state = project_state.clone() operation = RemoveIndexConcurrently('Pony', 'pony_pink_idx') msg = ( 'The RemoveIndexConcurrently operation cannot be executed inside ' 'a transaction (set atomic = False on the migration).' ) with self.assertRaisesMessage(NotSupportedError, msg): with connection.schema_editor(atomic=True) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) def test_remove(self): project_state = self.set_up_test_model(self.app_label, index=True) table_name = '%s_pony' % self.app_label self.assertTableExists(table_name) new_state = project_state.clone() operation = RemoveIndexConcurrently('Pony', 'pony_pink_idx') self.assertEqual( operation.describe(), 'Concurrently remove index pony_pink_idx from Pony', ) operation.state_forwards(self.app_label, new_state) self.assertEqual(len(new_state.models[self.app_label, 'pony'].options['indexes']), 0) self.assertIndexExists(table_name, ['pink']) # Remove index. with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertIndexNotExists(table_name, ['pink']) # Reversal. with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertIndexExists(table_name, ['pink']) # Deconstruction. name, args, kwargs = operation.deconstruct() self.assertEqual(name, 'RemoveIndexConcurrently') self.assertEqual(args, []) self.assertEqual(kwargs, {'model_name': 'Pony', 'name': 'pony_pink_idx'}) class NoMigrationRouter(): def allow_migrate(self, db, app_label, **hints): return False @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.') class CreateExtensionTests(PostgreSQLTestCase): app_label = 'test_allow_create_extention' @override_settings(DATABASE_ROUTERS=[NoMigrationRouter()]) def test_no_allow_migrate(self): operation = CreateExtension('tablefunc') project_state = ProjectState() new_state = project_state.clone() # Don't create an extension. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertEqual(len(captured_queries), 0) # Reversal. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertEqual(len(captured_queries), 0) def test_allow_migrate(self): operation = CreateExtension('tablefunc') self.assertEqual(operation.migration_name_fragment, 'create_extension_tablefunc') project_state = ProjectState() new_state = project_state.clone() # Create an extension. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertEqual(len(captured_queries), 4) self.assertIn('CREATE EXTENSION IF NOT EXISTS', captured_queries[1]['sql']) # Reversal. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertEqual(len(captured_queries), 2) self.assertIn('DROP EXTENSION IF EXISTS', captured_queries[1]['sql']) def test_create_existing_extension(self): operation = BloomExtension() self.assertEqual(operation.migration_name_fragment, 'create_extension_bloom') project_state = ProjectState() new_state = project_state.clone() # Don't create an existing extension. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertEqual(len(captured_queries), 3) self.assertIn('SELECT', captured_queries[0]['sql']) def test_drop_nonexistent_extension(self): operation = CreateExtension('tablefunc') project_state = ProjectState() new_state = project_state.clone() # Don't drop a nonexistent extension. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, project_state, new_state) self.assertEqual(len(captured_queries), 1) self.assertIn('SELECT', captured_queries[0]['sql']) @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.') class CreateCollationTests(PostgreSQLTestCase): app_label = 'test_allow_create_collation' @override_settings(DATABASE_ROUTERS=[NoMigrationRouter()]) def test_no_allow_migrate(self): operation = CreateCollation('C_test', locale='C') project_state = ProjectState() new_state = project_state.clone() # Don't create a collation. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertEqual(len(captured_queries), 0) # Reversal. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertEqual(len(captured_queries), 0) def test_create(self): operation = CreateCollation('C_test', locale='C') self.assertEqual(operation.migration_name_fragment, 'create_collation_c_test') self.assertEqual(operation.describe(), 'Create collation C_test') project_state = ProjectState() new_state = project_state.clone() # Create a collation. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertEqual(len(captured_queries), 1) self.assertIn('CREATE COLLATION', captured_queries[0]['sql']) # Creating the same collation raises an exception. with self.assertRaisesMessage(ProgrammingError, 'already exists'): with connection.schema_editor(atomic=True) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) # Reversal. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertEqual(len(captured_queries), 1) self.assertIn('DROP COLLATION', captured_queries[0]['sql']) # Deconstruction. name, args, kwargs = operation.deconstruct() self.assertEqual(name, 'CreateCollation') self.assertEqual(args, []) self.assertEqual(kwargs, {'name': 'C_test', 'locale': 'C'}) @skipUnlessDBFeature('supports_non_deterministic_collations') def test_create_non_deterministic_collation(self): operation = CreateCollation( 'case_insensitive_test', 'und-u-ks-level2', provider='icu', deterministic=False, ) project_state = ProjectState() new_state = project_state.clone() # Create a collation. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertEqual(len(captured_queries), 1) self.assertIn('CREATE COLLATION', captured_queries[0]['sql']) # Reversal. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertEqual(len(captured_queries), 1) self.assertIn('DROP COLLATION', captured_queries[0]['sql']) # Deconstruction. name, args, kwargs = operation.deconstruct() self.assertEqual(name, 'CreateCollation') self.assertEqual(args, []) self.assertEqual(kwargs, { 'name': 'case_insensitive_test', 'locale': 'und-u-ks-level2', 'provider': 'icu', 'deterministic': False, }) @skipUnlessDBFeature('supports_alternate_collation_providers') def test_create_collation_alternate_provider(self): operation = CreateCollation( 'german_phonebook_test', provider='icu', locale='de-u-co-phonebk', ) project_state = ProjectState() new_state = project_state.clone() # Create an collation. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertEqual(len(captured_queries), 1) self.assertIn('CREATE COLLATION', captured_queries[0]['sql']) # Reversal. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertEqual(len(captured_queries), 1) self.assertIn('DROP COLLATION', captured_queries[0]['sql']) def test_nondeterministic_collation_not_supported(self): operation = CreateCollation( 'case_insensitive_test', provider='icu', locale='und-u-ks-level2', deterministic=False, ) project_state = ProjectState() new_state = project_state.clone() msg = 'Non-deterministic collations require PostgreSQL 12+.' with connection.schema_editor(atomic=False) as editor: with mock.patch( 'django.db.backends.postgresql.features.DatabaseFeatures.' 'supports_non_deterministic_collations', False, ): with self.assertRaisesMessage(NotSupportedError, msg): operation.database_forwards(self.app_label, editor, project_state, new_state) def test_collation_with_icu_provider_raises_error(self): operation = CreateCollation( 'german_phonebook', provider='icu', locale='de-u-co-phonebk', ) project_state = ProjectState() new_state = project_state.clone() msg = 'Non-libc providers require PostgreSQL 10+.' with connection.schema_editor(atomic=False) as editor: with mock.patch( 'django.db.backends.postgresql.features.DatabaseFeatures.' 'supports_alternate_collation_providers', False, ): with self.assertRaisesMessage(NotSupportedError, msg): operation.database_forwards(self.app_label, editor, project_state, new_state) @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.') class RemoveCollationTests(PostgreSQLTestCase): app_label = 'test_allow_remove_collation' @override_settings(DATABASE_ROUTERS=[NoMigrationRouter()]) def test_no_allow_migrate(self): operation = RemoveCollation('C_test', locale='C') project_state = ProjectState() new_state = project_state.clone() # Don't create a collation. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertEqual(len(captured_queries), 0) # Reversal. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertEqual(len(captured_queries), 0) def test_remove(self): operation = CreateCollation('C_test', locale='C') project_state = ProjectState() new_state = project_state.clone() with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) operation = RemoveCollation('C_test', locale='C') self.assertEqual(operation.migration_name_fragment, 'remove_collation_c_test') self.assertEqual(operation.describe(), 'Remove collation C_test') project_state = ProjectState() new_state = project_state.clone() # Remove a collation. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertEqual(len(captured_queries), 1) self.assertIn('DROP COLLATION', captured_queries[0]['sql']) # Removing a nonexistent collation raises an exception. with self.assertRaisesMessage(ProgrammingError, 'does not exist'): with connection.schema_editor(atomic=True) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) # Reversal. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertEqual(len(captured_queries), 1) self.assertIn('CREATE COLLATION', captured_queries[0]['sql']) # Deconstruction. name, args, kwargs = operation.deconstruct() self.assertEqual(name, 'RemoveCollation') self.assertEqual(args, []) self.assertEqual(kwargs, {'name': 'C_test', 'locale': 'C'})
ef06542e8747137825ea93c0b2eaf5753cd567ebd9b49fbafb1ee0b8d64c0a76
import datetime from unittest import mock from django.db import ( IntegrityError, NotSupportedError, connection, transaction, ) from django.db.models import ( CheckConstraint, Deferrable, F, Func, Q, UniqueConstraint, ) from django.db.models.fields.json import KeyTextTransform from django.db.models.functions import Left from django.test import skipUnlessDBFeature from django.utils import timezone from . import PostgreSQLTestCase from .models import HotelReservation, RangesModel, Room, Scene try: from psycopg2.extras import DateRange, NumericRange from django.contrib.postgres.constraints import ExclusionConstraint from django.contrib.postgres.fields import ( DateTimeRangeField, RangeBoundary, RangeOperators, ) except ImportError: pass class SchemaTests(PostgreSQLTestCase): get_opclass_query = ''' SELECT opcname, c.relname FROM pg_opclass AS oc JOIN pg_index as i on oc.oid = ANY(i.indclass) JOIN pg_class as c on c.oid = i.indexrelid WHERE c.relname = %s ''' def get_constraints(self, table): """Get the constraints on the table using a new cursor.""" with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def test_check_constraint_range_value(self): constraint_name = 'ints_between' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(ints__contained_by=NumericRange(10, 30)), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(20, 50)) RangesModel.objects.create(ints=(10, 30)) def test_check_constraint_daterange_contains(self): constraint_name = 'dates_contains' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(dates__contains=F('dates_inner')), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) date_1 = datetime.date(2016, 1, 1) date_2 = datetime.date(2016, 1, 4) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create( dates=(date_1, date_2), dates_inner=(date_1, date_2.replace(day=5)), ) RangesModel.objects.create( dates=(date_1, date_2), dates_inner=(date_1, date_2), ) def test_check_constraint_datetimerange_contains(self): constraint_name = 'timestamps_contains' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(timestamps__contains=F('timestamps_inner')), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) datetime_1 = datetime.datetime(2016, 1, 1) datetime_2 = datetime.datetime(2016, 1, 2, 12) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create( timestamps=(datetime_1, datetime_2), timestamps_inner=(datetime_1, datetime_2.replace(hour=13)), ) RangesModel.objects.create( timestamps=(datetime_1, datetime_2), timestamps_inner=(datetime_1, datetime_2), ) def test_opclass(self): constraint = UniqueConstraint( name='test_opclass', fields=['scene'], opclasses=['varchar_pattern_ops'], ) with connection.schema_editor() as editor: editor.add_constraint(Scene, constraint) self.assertIn(constraint.name, self.get_constraints(Scene._meta.db_table)) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query, [constraint.name]) self.assertEqual( cursor.fetchall(), [('varchar_pattern_ops', constraint.name)], ) # Drop the constraint. with connection.schema_editor() as editor: editor.remove_constraint(Scene, constraint) self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table)) def test_opclass_multiple_columns(self): constraint = UniqueConstraint( name='test_opclass_multiple', fields=['scene', 'setting'], opclasses=['varchar_pattern_ops', 'text_pattern_ops'], ) with connection.schema_editor() as editor: editor.add_constraint(Scene, constraint) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query, [constraint.name]) expected_opclasses = ( ('varchar_pattern_ops', constraint.name), ('text_pattern_ops', constraint.name), ) self.assertCountEqual(cursor.fetchall(), expected_opclasses) def test_opclass_partial(self): constraint = UniqueConstraint( name='test_opclass_partial', fields=['scene'], opclasses=['varchar_pattern_ops'], condition=Q(setting__contains="Sir Bedemir's Castle"), ) with connection.schema_editor() as editor: editor.add_constraint(Scene, constraint) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query, [constraint.name]) self.assertCountEqual( cursor.fetchall(), [('varchar_pattern_ops', constraint.name)], ) @skipUnlessDBFeature('supports_covering_indexes') def test_opclass_include(self): constraint = UniqueConstraint( name='test_opclass_include', fields=['scene'], opclasses=['varchar_pattern_ops'], include=['setting'], ) with connection.schema_editor() as editor: editor.add_constraint(Scene, constraint) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query, [constraint.name]) self.assertCountEqual( cursor.fetchall(), [('varchar_pattern_ops', constraint.name)], ) class ExclusionConstraintTests(PostgreSQLTestCase): def get_constraints(self, table): """Get the constraints on the table using a new cursor.""" with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def test_invalid_condition(self): msg = 'ExclusionConstraint.condition must be a Q instance.' with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='GIST', name='exclude_invalid_condition', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], condition=F('invalid'), ) def test_invalid_index_type(self): msg = 'Exclusion constraints only support GiST or SP-GiST indexes.' with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='gin', name='exclude_invalid_index_type', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], ) def test_invalid_expressions(self): msg = 'The expressions must be a list of 2-tuples.' for expressions in (['foo'], [('foo')], [('foo_1', 'foo_2', 'foo_3')]): with self.subTest(expressions), self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='GIST', name='exclude_invalid_expressions', expressions=expressions, ) def test_empty_expressions(self): msg = 'At least one expression is required to define an exclusion constraint.' for empty_expressions in (None, []): with self.subTest(empty_expressions), self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='GIST', name='exclude_empty_expressions', expressions=empty_expressions, ) def test_invalid_deferrable(self): msg = 'ExclusionConstraint.deferrable must be a Deferrable instance.' with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( name='exclude_invalid_deferrable', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], deferrable='invalid', ) def test_deferrable_with_condition(self): msg = 'ExclusionConstraint with conditions cannot be deferred.' with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( name='exclude_invalid_condition', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], condition=Q(cancelled=False), deferrable=Deferrable.DEFERRED, ) def test_invalid_include_type(self): msg = 'ExclusionConstraint.include must be a list or tuple.' with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( name='exclude_invalid_include', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], include='invalid', ) def test_invalid_include_index_type(self): msg = 'Covering exclusion constraints only support GiST indexes.' with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( name='exclude_invalid_index_type', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], include=['cancelled'], index_type='spgist', ) def test_invalid_opclasses_type(self): msg = 'ExclusionConstraint.opclasses must be a list or tuple.' with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( name='exclude_invalid_opclasses', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], opclasses='invalid', ) def test_opclasses_and_expressions_same_length(self): msg = ( 'ExclusionConstraint.expressions and ' 'ExclusionConstraint.opclasses must have the same number of ' 'elements.' ) with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( name='exclude_invalid_expressions_opclasses_length', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], opclasses=['foo', 'bar'], ) def test_repr(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[ (F('datespan'), RangeOperators.OVERLAPS), (F('room'), RangeOperators.EQUAL), ], ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type=GIST, expressions=[" "(F(datespan), '&&'), (F(room), '=')]>", ) constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)], condition=Q(cancelled=False), index_type='SPGiST', ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type=SPGiST, expressions=[" "(F(datespan), '-|-')], condition=(AND: ('cancelled', False))>", ) constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)], deferrable=Deferrable.IMMEDIATE, ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type=GIST, expressions=[" "(F(datespan), '-|-')], deferrable=Deferrable.IMMEDIATE>", ) constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)], include=['cancelled', 'room'], ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type=GIST, expressions=[" "(F(datespan), '-|-')], include=('cancelled', 'room')>", ) constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)], opclasses=['range_ops'], ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type=GIST, expressions=[" "(F(datespan), '-|-')], opclasses=['range_ops']>", ) def test_eq(self): constraint_1 = ExclusionConstraint( name='exclude_overlapping', expressions=[ (F('datespan'), RangeOperators.OVERLAPS), (F('room'), RangeOperators.EQUAL), ], condition=Q(cancelled=False), ) constraint_2 = ExclusionConstraint( name='exclude_overlapping', expressions=[ ('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL), ], ) constraint_3 = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS)], condition=Q(cancelled=False), ) constraint_4 = ExclusionConstraint( name='exclude_overlapping', expressions=[ ('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL), ], deferrable=Deferrable.DEFERRED, ) constraint_5 = ExclusionConstraint( name='exclude_overlapping', expressions=[ ('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL), ], deferrable=Deferrable.IMMEDIATE, ) constraint_6 = ExclusionConstraint( name='exclude_overlapping', expressions=[ ('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL), ], deferrable=Deferrable.IMMEDIATE, include=['cancelled'], ) constraint_7 = ExclusionConstraint( name='exclude_overlapping', expressions=[ ('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL), ], include=['cancelled'], ) constraint_8 = ExclusionConstraint( name='exclude_overlapping', expressions=[ ('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL), ], include=['cancelled'], opclasses=['range_ops', 'range_ops'] ) constraint_9 = ExclusionConstraint( name='exclude_overlapping', expressions=[ ('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL), ], opclasses=['range_ops', 'range_ops'] ) self.assertEqual(constraint_1, constraint_1) self.assertEqual(constraint_1, mock.ANY) self.assertNotEqual(constraint_1, constraint_2) self.assertNotEqual(constraint_1, constraint_3) self.assertNotEqual(constraint_1, constraint_4) self.assertNotEqual(constraint_2, constraint_3) self.assertNotEqual(constraint_2, constraint_4) self.assertNotEqual(constraint_2, constraint_7) self.assertNotEqual(constraint_2, constraint_9) self.assertNotEqual(constraint_4, constraint_5) self.assertNotEqual(constraint_5, constraint_6) self.assertNotEqual(constraint_7, constraint_8) self.assertNotEqual(constraint_1, object()) def test_deconstruct(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], }) def test_deconstruct_index_type(self): constraint = ExclusionConstraint( name='exclude_overlapping', index_type='SPGIST', expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'index_type': 'SPGIST', 'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], }) def test_deconstruct_condition(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], condition=Q(cancelled=False), ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], 'condition': Q(cancelled=False), }) def test_deconstruct_deferrable(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS)], deferrable=Deferrable.DEFERRED, ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'expressions': [('datespan', RangeOperators.OVERLAPS)], 'deferrable': Deferrable.DEFERRED, }) def test_deconstruct_include(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS)], include=['cancelled', 'room'], ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'expressions': [('datespan', RangeOperators.OVERLAPS)], 'include': ('cancelled', 'room'), }) def test_deconstruct_opclasses(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS)], opclasses=['range_ops'], ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'expressions': [('datespan', RangeOperators.OVERLAPS)], 'opclasses': ['range_ops'], }) def _test_range_overlaps(self, constraint): # Create exclusion constraint. self.assertNotIn(constraint.name, self.get_constraints(HotelReservation._meta.db_table)) with connection.schema_editor() as editor: editor.add_constraint(HotelReservation, constraint) self.assertIn(constraint.name, self.get_constraints(HotelReservation._meta.db_table)) # Add initial reservations. room101 = Room.objects.create(number=101) room102 = Room.objects.create(number=102) datetimes = [ timezone.datetime(2018, 6, 20), timezone.datetime(2018, 6, 24), timezone.datetime(2018, 6, 26), timezone.datetime(2018, 6, 28), timezone.datetime(2018, 6, 29), ] HotelReservation.objects.create( datespan=DateRange(datetimes[0].date(), datetimes[1].date()), start=datetimes[0], end=datetimes[1], room=room102, ) HotelReservation.objects.create( datespan=DateRange(datetimes[1].date(), datetimes[3].date()), start=datetimes[1], end=datetimes[3], room=room102, ) # Overlap dates. with self.assertRaises(IntegrityError), transaction.atomic(): reservation = HotelReservation( datespan=(datetimes[1].date(), datetimes[2].date()), start=datetimes[1], end=datetimes[2], room=room102, ) reservation.save() # Valid range. HotelReservation.objects.bulk_create([ # Other room. HotelReservation( datespan=(datetimes[1].date(), datetimes[2].date()), start=datetimes[1], end=datetimes[2], room=room101, ), # Cancelled reservation. HotelReservation( datespan=(datetimes[1].date(), datetimes[1].date()), start=datetimes[1], end=datetimes[2], room=room102, cancelled=True, ), # Other adjacent dates. HotelReservation( datespan=(datetimes[3].date(), datetimes[4].date()), start=datetimes[3], end=datetimes[4], room=room102, ), ]) def test_range_overlaps_custom(self): class TsTzRange(Func): function = 'TSTZRANGE' output_field = DateTimeRangeField() constraint = ExclusionConstraint( name='exclude_overlapping_reservations_custom', expressions=[ (TsTzRange('start', 'end', RangeBoundary()), RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL) ], condition=Q(cancelled=False), opclasses=['range_ops', 'gist_int4_ops'], ) self._test_range_overlaps(constraint) def test_range_overlaps(self): constraint = ExclusionConstraint( name='exclude_overlapping_reservations', expressions=[ (F('datespan'), RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL) ], condition=Q(cancelled=False), ) self._test_range_overlaps(constraint) def test_range_adjacent(self): constraint_name = 'ints_adjacent' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) RangesModel.objects.create(ints=(20, 50)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(10, 20)) RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60)) # Drop the constraint. with connection.schema_editor() as editor: editor.remove_constraint(RangesModel, constraint) self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) def test_expressions_with_params(self): constraint_name = 'scene_left_equal' self.assertNotIn(constraint_name, self.get_constraints(Scene._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[(Left('scene', 4), RangeOperators.EQUAL)], ) with connection.schema_editor() as editor: editor.add_constraint(Scene, constraint) self.assertIn(constraint_name, self.get_constraints(Scene._meta.db_table)) def test_expressions_with_key_transform(self): constraint_name = 'exclude_overlapping_reservations_smoking' constraint = ExclusionConstraint( name=constraint_name, expressions=[ (F('datespan'), RangeOperators.OVERLAPS), (KeyTextTransform('smoking', 'requirements'), RangeOperators.EQUAL), ], ) with connection.schema_editor() as editor: editor.add_constraint(HotelReservation, constraint) self.assertIn( constraint_name, self.get_constraints(HotelReservation._meta.db_table), ) def test_range_adjacent_initially_deferred(self): constraint_name = 'ints_adjacent_deferred' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], deferrable=Deferrable.DEFERRED, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) RangesModel.objects.create(ints=(20, 50)) adjacent_range = RangesModel.objects.create(ints=(10, 20)) # Constraint behavior can be changed with SET CONSTRAINTS. with self.assertRaises(IntegrityError): with transaction.atomic(), connection.cursor() as cursor: quoted_name = connection.ops.quote_name(constraint_name) cursor.execute('SET CONSTRAINTS %s IMMEDIATE' % quoted_name) # Remove adjacent range before the end of transaction. adjacent_range.delete() RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60)) @skipUnlessDBFeature('supports_covering_gist_indexes') def test_range_adjacent_include(self): constraint_name = 'ints_adjacent_include' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], include=['decimals', 'ints'], index_type='gist', ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) RangesModel.objects.create(ints=(20, 50)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(10, 20)) RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60)) @skipUnlessDBFeature('supports_covering_gist_indexes') def test_range_adjacent_include_condition(self): constraint_name = 'ints_adjacent_include_condition' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], include=['decimals'], condition=Q(id__gte=100), ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) @skipUnlessDBFeature('supports_covering_gist_indexes') def test_range_adjacent_include_deferrable(self): constraint_name = 'ints_adjacent_include_deferrable' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], include=['decimals'], deferrable=Deferrable.DEFERRED, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) def test_include_not_supported(self): constraint_name = 'ints_adjacent_include_not_supported' constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], include=['id'], ) msg = 'Covering exclusion constraints requires PostgreSQL 12+.' with connection.schema_editor() as editor: with mock.patch( 'django.db.backends.postgresql.features.DatabaseFeatures.supports_covering_gist_indexes', False, ): with self.assertRaisesMessage(NotSupportedError, msg): editor.add_constraint(RangesModel, constraint) def test_range_adjacent_opclasses(self): constraint_name = 'ints_adjacent_opclasses' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], opclasses=['range_ops'], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) RangesModel.objects.create(ints=(20, 50)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(10, 20)) RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60)) # Drop the constraint. with connection.schema_editor() as editor: editor.remove_constraint(RangesModel, constraint) self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) def test_range_adjacent_opclasses_condition(self): constraint_name = 'ints_adjacent_opclasses_condition' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], opclasses=['range_ops'], condition=Q(id__gte=100), ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) def test_range_adjacent_opclasses_deferrable(self): constraint_name = 'ints_adjacent_opclasses_deferrable' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], opclasses=['range_ops'], deferrable=Deferrable.DEFERRED, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) @skipUnlessDBFeature('supports_covering_gist_indexes') def test_range_adjacent_opclasses_include(self): constraint_name = 'ints_adjacent_opclasses_include' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], opclasses=['range_ops'], include=['decimals'], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
69f4b5044e984ade14316f109789c3f844874d8f93d2c67069ae601256a335ad
import datetime from xml.dom import minidom from django.contrib.sites.models import Site from django.contrib.syndication import views from django.core.exceptions import ImproperlyConfigured from django.test import TestCase, override_settings from django.test.utils import requires_tz_support from django.utils import timezone from django.utils.feedgenerator import rfc2822_date, rfc3339_date from .models import Article, Entry TZ = timezone.get_default_timezone() class FeedTestCase(TestCase): @classmethod def setUpTestData(cls): cls.e1 = Entry.objects.create( title='My first entry', updated=datetime.datetime(1980, 1, 1, 12, 30), published=datetime.datetime(1986, 9, 25, 20, 15, 00) ) cls.e2 = Entry.objects.create( title='My second entry', updated=datetime.datetime(2008, 1, 2, 12, 30), published=datetime.datetime(2006, 3, 17, 18, 0) ) cls.e3 = Entry.objects.create( title='My third entry', updated=datetime.datetime(2008, 1, 2, 13, 30), published=datetime.datetime(2005, 6, 14, 10, 45) ) cls.e4 = Entry.objects.create( title='A & B < C > D', updated=datetime.datetime(2008, 1, 3, 13, 30), published=datetime.datetime(2005, 11, 25, 12, 11, 23) ) cls.e5 = Entry.objects.create( title='My last entry', updated=datetime.datetime(2013, 1, 20, 0, 0), published=datetime.datetime(2013, 3, 25, 20, 0) ) cls.a1 = Article.objects.create( title='My first article', entry=cls.e1, updated=datetime.datetime(1986, 11, 21, 9, 12, 18), published=datetime.datetime(1986, 10, 21, 9, 12, 18), ) def assertChildNodes(self, elem, expected): actual = {n.nodeName for n in elem.childNodes} expected = set(expected) self.assertEqual(actual, expected) def assertChildNodeContent(self, elem, expected): for k, v in expected.items(): self.assertEqual( elem.getElementsByTagName(k)[0].firstChild.wholeText, v) def assertCategories(self, elem, expected): self.assertEqual( {i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'}, set(expected) ) @override_settings(ROOT_URLCONF='syndication_tests.urls') class SyndicationFeedTest(FeedTestCase): """ Tests for the high-level syndication feed framework. """ @classmethod def setUpClass(cls): super().setUpClass() # This cleanup is necessary because contrib.sites cache # makes tests interfere with each other, see #11505 Site.objects.clear_cache() def test_rss2_feed(self): """ Test the structure and content of feeds generated by Rss201rev2Feed. """ response = self.client.get('/syndication/rss2/') doc = minidom.parseString(response.content) # Making sure there's only 1 `rss` element and that the correct # RSS version was specified. feed_elem = doc.getElementsByTagName('rss') self.assertEqual(len(feed_elem), 1) feed = feed_elem[0] self.assertEqual(feed.getAttribute('version'), '2.0') self.assertEqual(feed.getElementsByTagName('language')[0].firstChild.nodeValue, 'en') # Making sure there's only one `channel` element w/in the # `rss` element. chan_elem = feed.getElementsByTagName('channel') self.assertEqual(len(chan_elem), 1) chan = chan_elem[0] # Find the last build date d = Entry.objects.latest('published').published last_build_date = rfc2822_date(timezone.make_aware(d, TZ)) self.assertChildNodes( chan, [ 'title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category', ] ) self.assertChildNodeContent(chan, { 'title': 'My blog', 'description': 'A more thorough description of my blog.', 'link': 'http://example.com/blog/', 'language': 'en', 'lastBuildDate': last_build_date, 'ttl': '600', 'copyright': 'Copyright (c) 2007, Sally Smith', }) self.assertCategories(chan, ['python', 'django']) # Ensure the content of the channel is correct self.assertChildNodeContent(chan, { 'title': 'My blog', 'link': 'http://example.com/blog/', }) # Check feed_url is passed self.assertEqual( chan.getElementsByTagName('atom:link')[0].getAttribute('href'), 'http://example.com/syndication/rss2/' ) # Find the pubdate of the first feed item d = Entry.objects.get(pk=self.e1.pk).published pub_date = rfc2822_date(timezone.make_aware(d, TZ)) items = chan.getElementsByTagName('item') self.assertEqual(len(items), Entry.objects.count()) self.assertChildNodeContent(items[0], { 'title': 'My first entry', 'description': 'Overridden description: My first entry', 'link': 'http://example.com/blog/%s/' % self.e1.pk, 'guid': 'http://example.com/blog/%s/' % self.e1.pk, 'pubDate': pub_date, 'author': '[email protected] (Sally Smith)', 'comments': '/blog/%s/comments' % self.e1.pk, }) self.assertCategories(items[0], ['python', 'testing']) for item in items: self.assertChildNodes(item, [ 'title', 'link', 'description', 'guid', 'category', 'pubDate', 'author', 'comments', ]) # Assert that <guid> does not have any 'isPermaLink' attribute self.assertIsNone(item.getElementsByTagName( 'guid')[0].attributes.get('isPermaLink')) def test_rss2_feed_guid_permalink_false(self): """ Test if the 'isPermaLink' attribute of <guid> element of an item in the RSS feed is 'false'. """ response = self.client.get( '/syndication/rss2/guid_ispermalink_false/') doc = minidom.parseString(response.content) chan = doc.getElementsByTagName( 'rss')[0].getElementsByTagName('channel')[0] items = chan.getElementsByTagName('item') for item in items: self.assertEqual( item.getElementsByTagName('guid')[0].attributes.get( 'isPermaLink').value, "false") def test_rss2_feed_guid_permalink_true(self): """ Test if the 'isPermaLink' attribute of <guid> element of an item in the RSS feed is 'true'. """ response = self.client.get( '/syndication/rss2/guid_ispermalink_true/') doc = minidom.parseString(response.content) chan = doc.getElementsByTagName( 'rss')[0].getElementsByTagName('channel')[0] items = chan.getElementsByTagName('item') for item in items: self.assertEqual( item.getElementsByTagName('guid')[0].attributes.get( 'isPermaLink').value, "true") def test_rss2_single_enclosure(self): response = self.client.get('/syndication/rss2/single-enclosure/') doc = minidom.parseString(response.content) chan = doc.getElementsByTagName('rss')[0].getElementsByTagName('channel')[0] items = chan.getElementsByTagName('item') for item in items: enclosures = item.getElementsByTagName('enclosure') self.assertEqual(len(enclosures), 1) def test_rss2_multiple_enclosures(self): with self.assertRaisesMessage( ValueError, "RSS feed items may only have one enclosure, see " "http://www.rssboard.org/rss-profile#element-channel-item-enclosure" ): self.client.get('/syndication/rss2/multiple-enclosure/') def test_rss091_feed(self): """ Test the structure and content of feeds generated by RssUserland091Feed. """ response = self.client.get('/syndication/rss091/') doc = minidom.parseString(response.content) # Making sure there's only 1 `rss` element and that the correct # RSS version was specified. feed_elem = doc.getElementsByTagName('rss') self.assertEqual(len(feed_elem), 1) feed = feed_elem[0] self.assertEqual(feed.getAttribute('version'), '0.91') # Making sure there's only one `channel` element w/in the # `rss` element. chan_elem = feed.getElementsByTagName('channel') self.assertEqual(len(chan_elem), 1) chan = chan_elem[0] self.assertChildNodes( chan, [ 'title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category', ] ) # Ensure the content of the channel is correct self.assertChildNodeContent(chan, { 'title': 'My blog', 'link': 'http://example.com/blog/', }) self.assertCategories(chan, ['python', 'django']) # Check feed_url is passed self.assertEqual( chan.getElementsByTagName('atom:link')[0].getAttribute('href'), 'http://example.com/syndication/rss091/' ) items = chan.getElementsByTagName('item') self.assertEqual(len(items), Entry.objects.count()) self.assertChildNodeContent(items[0], { 'title': 'My first entry', 'description': 'Overridden description: My first entry', 'link': 'http://example.com/blog/%s/' % self.e1.pk, }) for item in items: self.assertChildNodes(item, ['title', 'link', 'description']) self.assertCategories(item, []) def test_atom_feed(self): """ Test the structure and content of feeds generated by Atom1Feed. """ response = self.client.get('/syndication/atom/') feed = minidom.parseString(response.content).firstChild self.assertEqual(feed.nodeName, 'feed') self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom') self.assertChildNodes( feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'] ) for link in feed.getElementsByTagName('link'): if link.getAttribute('rel') == 'self': self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/') entries = feed.getElementsByTagName('entry') self.assertEqual(len(entries), Entry.objects.count()) for entry in entries: self.assertChildNodes(entry, [ 'title', 'link', 'id', 'summary', 'category', 'updated', 'published', 'rights', 'author', ]) summary = entry.getElementsByTagName('summary')[0] self.assertEqual(summary.getAttribute('type'), 'html') def test_atom_feed_published_and_updated_elements(self): """ The published and updated elements are not the same and now adhere to RFC 4287. """ response = self.client.get('/syndication/atom/') feed = minidom.parseString(response.content).firstChild entries = feed.getElementsByTagName('entry') published = entries[0].getElementsByTagName('published')[0].firstChild.wholeText updated = entries[0].getElementsByTagName('updated')[0].firstChild.wholeText self.assertNotEqual(published, updated) def test_atom_single_enclosure(self): response = self.client.get('/syndication/atom/single-enclosure/') feed = minidom.parseString(response.content).firstChild items = feed.getElementsByTagName('entry') for item in items: links = item.getElementsByTagName('link') links = [link for link in links if link.getAttribute('rel') == 'enclosure'] self.assertEqual(len(links), 1) def test_atom_multiple_enclosures(self): response = self.client.get('/syndication/atom/multiple-enclosure/') feed = minidom.parseString(response.content).firstChild items = feed.getElementsByTagName('entry') for item in items: links = item.getElementsByTagName('link') links = [link for link in links if link.getAttribute('rel') == 'enclosure'] self.assertEqual(len(links), 2) def test_latest_post_date(self): """ Both the published and updated dates are considered when determining the latest post date. """ # this feed has a `published` element with the latest date response = self.client.get('/syndication/atom/') feed = minidom.parseString(response.content).firstChild updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText d = Entry.objects.latest('published').published latest_published = rfc3339_date(timezone.make_aware(d, TZ)) self.assertEqual(updated, latest_published) # this feed has an `updated` element with the latest date response = self.client.get('/syndication/latest/') feed = minidom.parseString(response.content).firstChild updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText d = Entry.objects.exclude(title='My last entry').latest('updated').updated latest_updated = rfc3339_date(timezone.make_aware(d, TZ)) self.assertEqual(updated, latest_updated) def test_custom_feed_generator(self): response = self.client.get('/syndication/custom/') feed = minidom.parseString(response.content).firstChild self.assertEqual(feed.nodeName, 'feed') self.assertEqual(feed.getAttribute('django'), 'rocks') self.assertChildNodes( feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'] ) entries = feed.getElementsByTagName('entry') self.assertEqual(len(entries), Entry.objects.count()) for entry in entries: self.assertEqual(entry.getAttribute('bacon'), 'yum') self.assertChildNodes(entry, [ 'title', 'link', 'id', 'summary', 'ministry', 'rights', 'author', 'updated', 'published', 'category', ]) summary = entry.getElementsByTagName('summary')[0] self.assertEqual(summary.getAttribute('type'), 'html') def test_feed_generator_language_attribute(self): response = self.client.get('/syndication/language/') feed = minidom.parseString(response.content).firstChild self.assertEqual(feed.firstChild.getElementsByTagName('language')[0].firstChild.nodeValue, 'de') def test_title_escaping(self): """ Titles are escaped correctly in RSS feeds. """ response = self.client.get('/syndication/rss2/') doc = minidom.parseString(response.content) for item in doc.getElementsByTagName('item'): link = item.getElementsByTagName('link')[0] if link.firstChild.wholeText == 'http://example.com/blog/4/': title = item.getElementsByTagName('title')[0] self.assertEqual(title.firstChild.wholeText, 'A &amp; B &lt; C &gt; D') def test_naive_datetime_conversion(self): """ Datetimes are correctly converted to the local time zone. """ # Naive date times passed in get converted to the local time zone, so # check the received zone offset against the local offset. response = self.client.get('/syndication/naive-dates/') doc = minidom.parseString(response.content) updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText d = Entry.objects.latest('published').published latest = rfc3339_date(timezone.make_aware(d, TZ)) self.assertEqual(updated, latest) def test_aware_datetime_conversion(self): """ Datetimes with timezones don't get trodden on. """ response = self.client.get('/syndication/aware-dates/') doc = minidom.parseString(response.content) published = doc.getElementsByTagName('published')[0].firstChild.wholeText self.assertEqual(published[-6:], '+00:42') @requires_tz_support def test_feed_last_modified_time_naive_date(self): """ Tests the Last-Modified header with naive publication dates. """ response = self.client.get('/syndication/naive-dates/') self.assertEqual(response.headers['Last-Modified'], 'Tue, 26 Mar 2013 01:00:00 GMT') def test_feed_last_modified_time(self): """ Tests the Last-Modified header with aware publication dates. """ response = self.client.get('/syndication/aware-dates/') self.assertEqual(response.headers['Last-Modified'], 'Mon, 25 Mar 2013 19:18:00 GMT') # No last-modified when feed has no item_pubdate response = self.client.get('/syndication/no_pubdate/') self.assertFalse(response.has_header('Last-Modified')) def test_feed_url(self): """ The feed_url can be overridden. """ response = self.client.get('/syndication/feedurl/') doc = minidom.parseString(response.content) for link in doc.getElementsByTagName('link'): if link.getAttribute('rel') == 'self': self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/') def test_secure_urls(self): """ Test URLs are prefixed with https:// when feed is requested over HTTPS. """ response = self.client.get('/syndication/rss2/', **{ 'wsgi.url_scheme': 'https', }) doc = minidom.parseString(response.content) chan = doc.getElementsByTagName('channel')[0] self.assertEqual( chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5], 'https' ) atom_link = chan.getElementsByTagName('atom:link')[0] self.assertEqual(atom_link.getAttribute('href')[0:5], 'https') for link in doc.getElementsByTagName('link'): if link.getAttribute('rel') == 'self': self.assertEqual(link.getAttribute('href')[0:5], 'https') def test_item_link_error(self): """ An ImproperlyConfigured is raised if no link could be found for the item(s). """ msg = ( 'Give your Article class a get_absolute_url() method, or define ' 'an item_link() method in your Feed class.' ) with self.assertRaisesMessage(ImproperlyConfigured, msg): self.client.get('/syndication/articles/') def test_template_feed(self): """ The item title and description can be overridden with templates. """ response = self.client.get('/syndication/template/') doc = minidom.parseString(response.content) feed = doc.getElementsByTagName('rss')[0] chan = feed.getElementsByTagName('channel')[0] items = chan.getElementsByTagName('item') self.assertChildNodeContent(items[0], { 'title': 'Title in your templates: My first entry\n', 'description': 'Description in your templates: My first entry\n', 'link': 'http://example.com/blog/%s/' % self.e1.pk, }) def test_template_context_feed(self): """ Custom context data can be passed to templates for title and description. """ response = self.client.get('/syndication/template_context/') doc = minidom.parseString(response.content) feed = doc.getElementsByTagName('rss')[0] chan = feed.getElementsByTagName('channel')[0] items = chan.getElementsByTagName('item') self.assertChildNodeContent(items[0], { 'title': 'My first entry (foo is bar)\n', 'description': 'My first entry (foo is bar)\n', }) def test_add_domain(self): """ add_domain() prefixes domains onto the correct URLs. """ prefix_domain_mapping = ( (('example.com', '/foo/?arg=value'), 'http://example.com/foo/?arg=value'), (('example.com', '/foo/?arg=value', True), 'https://example.com/foo/?arg=value'), (('example.com', 'http://djangoproject.com/doc/'), 'http://djangoproject.com/doc/'), (('example.com', 'https://djangoproject.com/doc/'), 'https://djangoproject.com/doc/'), (('example.com', 'mailto:[email protected]'), 'mailto:[email protected]'), (('example.com', '//example.com/foo/?arg=value'), 'http://example.com/foo/?arg=value'), ) for prefix in prefix_domain_mapping: with self.subTest(prefix=prefix): self.assertEqual(views.add_domain(*prefix[0]), prefix[1]) def test_get_object(self): response = self.client.get('/syndication/rss2/articles/%s/' % self.e1.pk) doc = minidom.parseString(response.content) feed = doc.getElementsByTagName('rss')[0] chan = feed.getElementsByTagName('channel')[0] items = chan.getElementsByTagName('item') self.assertChildNodeContent(items[0], { 'comments': '/blog/%s/article/%s/comments' % (self.e1.pk, self.a1.pk), 'description': 'Article description: My first article', 'link': 'http://example.com/blog/%s/article/%s/' % (self.e1.pk, self.a1.pk), 'title': 'Title: My first article', 'pubDate': rfc2822_date(timezone.make_aware(self.a1.published, TZ)), }) def test_get_non_existent_object(self): response = self.client.get('/syndication/rss2/articles/0/') self.assertEqual(response.status_code, 404)
9fce085495e1c1b848bcc1a92ab84a2946cef8bac52feff3cf4fed37908f3483
from django.db import models class Entry(models.Model): title = models.CharField(max_length=200) updated = models.DateTimeField() published = models.DateTimeField() class Meta: ordering = ('updated',) def __str__(self): return self.title def get_absolute_url(self): return "/blog/%s/" % self.pk class Article(models.Model): title = models.CharField(max_length=200) entry = models.ForeignKey(Entry, models.CASCADE) updated = models.DateTimeField() published = models.DateTimeField() class Meta: ordering = ['updated']
f52e1248a22d5598d5e53061405e4bb6e5b47b442d7b2bb0528b86b3003fafc4
from django.contrib.syndication import views from django.utils import feedgenerator from django.utils.timezone import get_fixed_timezone from .models import Article, Entry class TestRss2Feed(views.Feed): title = 'My blog' description = 'A more thorough description of my blog.' link = '/blog/' feed_guid = '/foo/bar/1234' author_name = 'Sally Smith' author_email = '[email protected]' author_link = 'http://www.example.com/' categories = ('python', 'django') feed_copyright = 'Copyright (c) 2007, Sally Smith' ttl = 600 def items(self): return Entry.objects.all() def item_description(self, item): return "Overridden description: %s" % item def item_pubdate(self, item): return item.published def item_updateddate(self, item): return item.updated def item_comments(self, item): return "%scomments" % item.get_absolute_url() item_author_name = 'Sally Smith' item_author_email = '[email protected]' item_author_link = 'http://www.example.com/' item_categories = ('python', 'testing') item_copyright = 'Copyright (c) 2007, Sally Smith' class TestRss2FeedWithGuidIsPermaLinkTrue(TestRss2Feed): def item_guid_is_permalink(self, item): return True class TestRss2FeedWithGuidIsPermaLinkFalse(TestRss2Feed): def item_guid(self, item): return str(item.pk) def item_guid_is_permalink(self, item): return False class TestRss091Feed(TestRss2Feed): feed_type = feedgenerator.RssUserland091Feed class TestNoPubdateFeed(views.Feed): title = 'Test feed' link = '/feed/' def items(self): return Entry.objects.all() class TestAtomFeed(TestRss2Feed): feed_type = feedgenerator.Atom1Feed subtitle = TestRss2Feed.description class TestLatestFeed(TestRss2Feed): """ A feed where the latest entry date is an `updated` element. """ feed_type = feedgenerator.Atom1Feed subtitle = TestRss2Feed.description def items(self): return Entry.objects.exclude(title='My last entry') class ArticlesFeed(TestRss2Feed): """ A feed to test no link being defined. Articles have no get_absolute_url() method, and item_link() is not defined. """ def items(self): return Article.objects.all() class TestSingleEnclosureRSSFeed(TestRss2Feed): """ A feed to test that RSS feeds work with a single enclosure. """ def item_enclosure_url(self, item): return 'http://example.com' def item_enclosure_size(self, item): return 0 def item_mime_type(self, item): return 'image/png' class TestMultipleEnclosureRSSFeed(TestRss2Feed): """ A feed to test that RSS feeds raise an exception with multiple enclosures. """ def item_enclosures(self, item): return [ feedgenerator.Enclosure('http://example.com/hello.png', 0, 'image/png'), feedgenerator.Enclosure('http://example.com/goodbye.png', 0, 'image/png'), ] class TemplateFeed(TestRss2Feed): """ A feed to test defining item titles and descriptions with templates. """ title_template = 'syndication/title.html' description_template = 'syndication/description.html' # Defining a template overrides any item_title definition def item_title(self): return "Not in a template" class TemplateContextFeed(TestRss2Feed): """ A feed to test custom context data in templates for title or description. """ title_template = 'syndication/title_context.html' description_template = 'syndication/description_context.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['foo'] = 'bar' return context class TestLanguageFeed(TestRss2Feed): language = 'de' class TestGetObjectFeed(TestRss2Feed): def get_object(self, request, entry_id): return Entry.objects.get(pk=entry_id) def items(self, obj): return Article.objects.filter(entry=obj) def item_link(self, item): return '%sarticle/%s/' % (item.entry.get_absolute_url(), item.pk) def item_comments(self, item): return '%scomments' % self.item_link(item) def item_description(self, item): return 'Article description: %s' % item.title def item_title(self, item): return 'Title: %s' % item.title class NaiveDatesFeed(TestAtomFeed): """ A feed with naive (non-timezone-aware) dates. """ def item_pubdate(self, item): return item.published class TZAwareDatesFeed(TestAtomFeed): """ A feed with timezone-aware dates. """ def item_pubdate(self, item): # Provide a weird offset so that the test can know it's getting this # specific offset and not accidentally getting on from # settings.TIME_ZONE. return item.published.replace(tzinfo=get_fixed_timezone(42)) class TestFeedUrlFeed(TestAtomFeed): feed_url = 'http://example.com/customfeedurl/' class MyCustomAtom1Feed(feedgenerator.Atom1Feed): """ Test of a custom feed generator class. """ def root_attributes(self): attrs = super().root_attributes() attrs['django'] = 'rocks' return attrs def add_root_elements(self, handler): super().add_root_elements(handler) handler.addQuickElement('spam', 'eggs') def item_attributes(self, item): attrs = super().item_attributes(item) attrs['bacon'] = 'yum' return attrs def add_item_elements(self, handler, item): super().add_item_elements(handler, item) handler.addQuickElement('ministry', 'silly walks') class TestCustomFeed(TestAtomFeed): feed_type = MyCustomAtom1Feed class TestSingleEnclosureAtomFeed(TestAtomFeed): """ A feed to test that Atom feeds work with a single enclosure. """ def item_enclosure_url(self, item): return 'http://example.com' def item_enclosure_size(self, item): return 0 def item_mime_type(self, item): return 'image/png' class TestMultipleEnclosureAtomFeed(TestAtomFeed): """ A feed to test that Atom feeds work with multiple enclosures. """ def item_enclosures(self, item): return [ feedgenerator.Enclosure('http://example.com/hello.png', '0', 'image/png'), feedgenerator.Enclosure('http://example.com/goodbye.png', '0', 'image/png'), ]
b44435f94dfc4ed2cd7dfbf7094628e6ff13eae5ba6b95b0f9bb9f45ab5caa81
from django.urls import path from . import feeds urlpatterns = [ path('syndication/rss2/', feeds.TestRss2Feed()), path('syndication/rss2/articles/<int:entry_id>/', feeds.TestGetObjectFeed()), path( 'syndication/rss2/guid_ispermalink_true/', feeds.TestRss2FeedWithGuidIsPermaLinkTrue()), path( 'syndication/rss2/guid_ispermalink_false/', feeds.TestRss2FeedWithGuidIsPermaLinkFalse()), path('syndication/rss091/', feeds.TestRss091Feed()), path('syndication/no_pubdate/', feeds.TestNoPubdateFeed()), path('syndication/atom/', feeds.TestAtomFeed()), path('syndication/latest/', feeds.TestLatestFeed()), path('syndication/custom/', feeds.TestCustomFeed()), path('syndication/language/', feeds.TestLanguageFeed()), path('syndication/naive-dates/', feeds.NaiveDatesFeed()), path('syndication/aware-dates/', feeds.TZAwareDatesFeed()), path('syndication/feedurl/', feeds.TestFeedUrlFeed()), path('syndication/articles/', feeds.ArticlesFeed()), path('syndication/template/', feeds.TemplateFeed()), path('syndication/template_context/', feeds.TemplateContextFeed()), path('syndication/rss2/single-enclosure/', feeds.TestSingleEnclosureRSSFeed()), path('syndication/rss2/multiple-enclosure/', feeds.TestMultipleEnclosureRSSFeed()), path('syndication/atom/single-enclosure/', feeds.TestSingleEnclosureAtomFeed()), path('syndication/atom/multiple-enclosure/', feeds.TestMultipleEnclosureAtomFeed()), ]
c511da83c55b4c59d549f331b92f680b29a64650df4fdd467a2fe76fb708a362
import datetime from decimal import Decimal from django.core.exceptions import FieldDoesNotExist, FieldError from django.db import connection from django.db.models import ( BooleanField, Case, Count, DateTimeField, Exists, ExpressionWrapper, F, FloatField, Func, IntegerField, Max, NullBooleanField, OuterRef, Q, Subquery, Sum, Value, When, ) from django.db.models.expressions import RawSQL from django.db.models.functions import Coalesce, Length, Lower from django.test import TestCase, skipUnlessDBFeature from .models import ( Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket, ) def cxOracle_py3_bug(func): """ There's a bug in Django/cx_Oracle with respect to string handling under Python 3 (essentially, they treat Python 3 strings as Python 2 strings rather than unicode). This makes some tests here fail under Python 3, so we mark them as expected failures until someone fixes them in #23843. """ from unittest import expectedFailure from django.db import connection return expectedFailure(func) if connection.vendor == 'oracle' else func class NonAggregateAnnotationTestCase(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='Brad Dayley', age=45) cls.a4 = Author.objects.create(name='James Bennett', age=29) cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37) cls.a6 = Author.objects.create(name='Paul Bissex', age=29) cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25) cls.a8 = Author.objects.create(name='Peter Norvig', age=57) cls.a9 = Author.objects.create(name='Stuart Russell', age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3) cls.p2 = Publisher.objects.create(name='Sams', num_awards=1) cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7) cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6) ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3) ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23) ) cls.b4 = Book.objects.create( isbn='013235613', name='Python Web Development with Django', pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3) ) cls.b5 = Book.objects.create( isbn='013790395', name='Artificial Intelligence: A Modern Approach', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15) ) cls.b6 = Book.objects.create( isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15) ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) s2 = Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30) ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def test_basic_annotation(self): books = Book.objects.annotate(is_book=Value(1)) for book in books: self.assertEqual(book.is_book, 1) def test_basic_f_annotation(self): books = Book.objects.annotate(another_rating=F('rating')) for book in books: self.assertEqual(book.another_rating, book.rating) def test_joined_annotation(self): books = Book.objects.select_related('publisher').annotate( num_awards=F('publisher__num_awards')) for book in books: self.assertEqual(book.num_awards, book.publisher.num_awards) def test_mixed_type_annotation_date_interval(self): active = datetime.datetime(2015, 3, 20, 14, 0, 0) duration = datetime.timedelta(hours=1) expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration Ticket.objects.create(active_at=active, duration=duration) t = Ticket.objects.annotate( expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField()) ).first() self.assertEqual(t.expires, expires) def test_mixed_type_annotation_numbers(self): test = self.b1 b = Book.objects.annotate( combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField()) ).get(isbn=test.isbn) combined = int(test.pages + test.rating) self.assertEqual(b.combined, combined) def test_empty_expression_annotation(self): books = Book.objects.annotate( selected=ExpressionWrapper(Q(pk__in=[]), output_field=BooleanField()) ) self.assertEqual(len(books), Book.objects.count()) self.assertTrue(all(not book.selected for book in books)) books = Book.objects.annotate( selected=ExpressionWrapper(Q(pk__in=Book.objects.none()), output_field=BooleanField()) ) self.assertEqual(len(books), Book.objects.count()) self.assertTrue(all(not book.selected for book in books)) def test_annotate_with_aggregation(self): books = Book.objects.annotate(is_book=Value(1), rating_count=Count('rating')) for book in books: self.assertEqual(book.is_book, 1) self.assertEqual(book.rating_count, 1) def test_combined_expression_annotation_with_aggregation(self): book = Book.objects.annotate( combined=ExpressionWrapper(Value(3) * Value(4), output_field=IntegerField()), rating_count=Count('rating'), ).first() self.assertEqual(book.combined, 12) self.assertEqual(book.rating_count, 1) def test_combined_f_expression_annotation_with_aggregation(self): book = Book.objects.filter(isbn='159059725').annotate( combined=ExpressionWrapper(F('price') * F('pages'), output_field=FloatField()), rating_count=Count('rating'), ).first() self.assertEqual(book.combined, 13410.0) self.assertEqual(book.rating_count, 1) @skipUnlessDBFeature('supports_boolean_expr_in_select_clause') def test_q_expression_annotation_with_aggregation(self): book = Book.objects.filter(isbn='159059725').annotate( isnull_pubdate=ExpressionWrapper( Q(pubdate__isnull=True), output_field=BooleanField(), ), rating_count=Count('rating'), ).first() self.assertEqual(book.isnull_pubdate, False) self.assertEqual(book.rating_count, 1) def test_aggregate_over_annotation(self): agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age')) other_agg = Author.objects.aggregate(age_sum=Sum('age')) self.assertEqual(agg['otherage_sum'], other_agg['age_sum']) @skipUnlessDBFeature('can_distinct_on_fields') def test_distinct_on_with_annotation(self): store = Store.objects.create( name='test store', original_opening=datetime.datetime.now(), friday_night_closing=datetime.time(21, 00, 00), ) names = [ 'Theodore Roosevelt', 'Eleanor Roosevelt', 'Franklin Roosevelt', 'Ned Stark', 'Catelyn Stark', ] for name in names: Employee.objects.create( store=store, first_name=name.split()[0], last_name=name.split()[1], age=30, salary=2000, ) people = Employee.objects.annotate( name_lower=Lower('last_name'), ).distinct('name_lower') self.assertEqual({p.last_name for p in people}, {'Stark', 'Roosevelt'}) self.assertEqual(len(people), 2) people2 = Employee.objects.annotate( test_alias=F('store__name'), ).distinct('test_alias') self.assertEqual(len(people2), 1) lengths = Employee.objects.annotate( name_len=Length('first_name'), ).distinct('name_len').values_list('name_len', flat=True) self.assertCountEqual(lengths, [3, 7, 8]) def test_filter_annotation(self): books = Book.objects.annotate(is_book=Value(1)).filter(is_book=1) for book in books: self.assertEqual(book.is_book, 1) def test_filter_annotation_with_f(self): books = Book.objects.annotate( other_rating=F('rating') ).filter(other_rating=3.5) for book in books: self.assertEqual(book.other_rating, 3.5) def test_filter_annotation_with_double_f(self): books = Book.objects.annotate( other_rating=F('rating') ).filter(other_rating=F('rating')) for book in books: self.assertEqual(book.other_rating, book.rating) def test_filter_agg_with_double_f(self): books = Book.objects.annotate( sum_rating=Sum('rating') ).filter(sum_rating=F('sum_rating')) for book in books: self.assertEqual(book.sum_rating, book.rating) def test_filter_wrong_annotation(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Book.objects.annotate( sum_rating=Sum('rating') ).filter(sum_rating=F('nope'))) def test_decimal_annotation(self): salary = Decimal(10) ** -Employee._meta.get_field('salary').decimal_places Employee.objects.create( first_name='Max', last_name='Paine', store=Store.objects.first(), age=23, salary=salary, ) self.assertEqual( Employee.objects.annotate(new_salary=F('salary') / 10).get().new_salary, salary / 10, ) def test_filter_decimal_annotation(self): qs = Book.objects.annotate(new_price=F('price') + 1).filter(new_price=Decimal(31)).values_list('new_price') self.assertEqual(qs.get(), (Decimal(31),)) def test_combined_annotation_commutative(self): book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk) book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk) self.assertEqual(book1.adjusted_rating, book2.adjusted_rating) book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk) book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk) self.assertEqual(book1.adjusted_rating, book2.adjusted_rating) def test_update_with_annotation(self): book_preupdate = Book.objects.get(pk=self.b2.pk) Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating')) book_postupdate = Book.objects.get(pk=self.b2.pk) self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating) def test_annotation_with_m2m(self): books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age') self.assertEqual(books[0].author_age, 34) self.assertEqual(books[1].author_age, 35) def test_annotation_reverse_m2m(self): books = Book.objects.annotate( store_name=F('store__name'), ).filter( name='Practical Django Projects', ).order_by('store_name') self.assertQuerysetEqual( books, [ 'Amazon.com', 'Books.com', 'Mamma and Pappa\'s Books' ], lambda b: b.store_name ) def test_values_annotation(self): """ Annotations can reference fields in a values clause, and contribute to an existing values clause. """ # annotate references a field in values() qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1) book = qs.get(pk=self.b1.pk) self.assertEqual(book['rating'] - 1, book['other_rating']) # filter refs the annotated value book = qs.get(other_rating=4) self.assertEqual(book['other_rating'], 4) # can annotate an existing values with a new field book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4) self.assertEqual(book['other_rating'], 4) self.assertEqual(book['other_isbn'], '155860191') def test_values_with_pk_annotation(self): # annotate references a field in values() with pk publishers = Publisher.objects.values('id', 'book__rating').annotate(total=Sum('book__rating')) for publisher in publishers.filter(pk=self.p1.pk): self.assertEqual(publisher['book__rating'], publisher['total']) @skipUnlessDBFeature('allows_group_by_pk') def test_rawsql_group_by_collapse(self): raw = RawSQL('SELECT MIN(id) FROM annotations_book', []) qs = Author.objects.values('id').annotate( min_book_id=raw, count_friends=Count('friends'), ).order_by() _, _, group_by = qs.query.get_compiler(using='default').pre_sql_setup() self.assertEqual(len(group_by), 1) self.assertNotEqual(raw, group_by[0]) def test_defer_annotation(self): """ Deferred attributes can be referenced by an annotation, but they are not themselves deferred, and cannot be deferred. """ qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1) with self.assertNumQueries(2): book = qs.get(other_rating=4) self.assertEqual(book.rating, 5) self.assertEqual(book.other_rating, 4) with self.assertRaisesMessage(FieldDoesNotExist, "Book has no field named 'other_rating'"): book = qs.defer('other_rating').get(other_rating=4) def test_mti_annotations(self): """ Fields on an inherited model can be referenced by an annotated field. """ d = DepartmentStore.objects.create( name='Angus & Robinson', original_opening=datetime.date(2014, 3, 8), friday_night_closing=datetime.time(21, 00, 00), chain='Westfield' ) books = Book.objects.filter(rating__gt=4) for b in books: d.books.add(b) qs = DepartmentStore.objects.annotate( other_name=F('name'), other_chain=F('chain'), is_open=Value(True, BooleanField()), book_isbn=F('books__isbn') ).order_by('book_isbn').filter(chain='Westfield') self.assertQuerysetEqual( qs, [ ('Angus & Robinson', 'Westfield', True, '155860191'), ('Angus & Robinson', 'Westfield', True, '159059725') ], lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn) ) def test_null_annotation(self): """ Annotating None onto a model round-trips """ book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first() self.assertIsNone(book.no_value) def test_order_by_annotation(self): authors = Author.objects.annotate(other_age=F('age')).order_by('other_age') self.assertQuerysetEqual( authors, [ 25, 29, 29, 34, 35, 37, 45, 46, 57, ], lambda a: a.other_age ) def test_order_by_aggregate(self): authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age') self.assertQuerysetEqual( authors, [ (25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2), ], lambda a: (a['age'], a['age_count']) ) def test_raw_sql_with_inherited_field(self): DepartmentStore.objects.create( name='Angus & Robinson', original_opening=datetime.date(2014, 3, 8), friday_night_closing=datetime.time(21), chain='Westfield', area=123, ) tests = ( ('name', 'Angus & Robinson'), ('surface', 123), ("case when name='Angus & Robinson' then chain else name end", 'Westfield'), ) for sql, expected_result in tests: with self.subTest(sql=sql): self.assertSequenceEqual( DepartmentStore.objects.annotate( annotation=RawSQL(sql, ()), ).values_list('annotation', flat=True), [expected_result], ) def test_annotate_exists(self): authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1) self.assertFalse(authors.exists()) def test_column_field_ordering(self): """ Columns are aligned in the correct order for resolve_columns. This test will fail on MySQL if column ordering is out. Column fields should be aligned as: 1. extra_select 2. model_fields 3. annotation_fields 4. model_related_fields """ store = Store.objects.first() Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine', store=store, age=23, salary=Decimal(50000.00)) Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers', store=store, age=18, salary=Decimal(40000.00)) qs = Employee.objects.extra( select={'random_value': '42'} ).select_related('store').annotate( annotated_value=Value(17), ) rows = [ (1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17), (2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17) ] self.assertQuerysetEqual( qs.order_by('id'), rows, lambda e: ( e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age, e.salary, e.store.name, e.annotated_value)) def test_column_field_ordering_with_deferred(self): store = Store.objects.first() Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine', store=store, age=23, salary=Decimal(50000.00)) Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers', store=store, age=18, salary=Decimal(40000.00)) qs = Employee.objects.extra( select={'random_value': '42'} ).select_related('store').annotate( annotated_value=Value(17), ) rows = [ (1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17), (2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17) ] # and we respect deferred columns! self.assertQuerysetEqual( qs.defer('age').order_by('id'), rows, lambda e: ( e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age, e.salary, e.store.name, e.annotated_value)) @cxOracle_py3_bug def test_custom_functions(self): Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save() Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save() Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save() Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save() qs = Company.objects.annotate( tagline=Func( F('motto'), F('ticker_name'), F('description'), Value('No Tag'), function='COALESCE' ) ).order_by('name') self.assertQuerysetEqual( qs, [ ('Apple', 'APPL'), ('Django Software Foundation', 'No Tag'), ('Google', 'Do No Evil'), ('Yahoo', 'Internet Company') ], lambda c: (c.name, c.tagline) ) @cxOracle_py3_bug def test_custom_functions_can_ref_other_functions(self): Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save() Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save() Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save() Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save() class Lower(Func): function = 'LOWER' qs = Company.objects.annotate( tagline=Func( F('motto'), F('ticker_name'), F('description'), Value('No Tag'), function='COALESCE', ) ).annotate( tagline_lower=Lower(F('tagline')), ).order_by('name') # LOWER function supported by: # oracle, postgres, mysql, sqlite, sqlserver self.assertQuerysetEqual( qs, [ ('Apple', 'APPL'.lower()), ('Django Software Foundation', 'No Tag'.lower()), ('Google', 'Do No Evil'.lower()), ('Yahoo', 'Internet Company'.lower()) ], lambda c: (c.name, c.tagline_lower) ) def test_boolean_value_annotation(self): books = Book.objects.annotate( is_book=Value(True, output_field=BooleanField()), is_pony=Value(False, output_field=BooleanField()), is_none=Value(None, output_field=BooleanField(null=True)), is_none_old=Value(None, output_field=NullBooleanField()), ) self.assertGreater(len(books), 0) for book in books: self.assertIs(book.is_book, True) self.assertIs(book.is_pony, False) self.assertIsNone(book.is_none) self.assertIsNone(book.is_none_old) def test_annotation_in_f_grouped_by_annotation(self): qs = ( Publisher.objects.annotate(multiplier=Value(3)) # group by option => sum of value * multiplier .values('name') .annotate(multiplied_value_sum=Sum(F('multiplier') * F('num_awards'))) .order_by() ) self.assertCountEqual( qs, [ {'multiplied_value_sum': 9, 'name': 'Apress'}, {'multiplied_value_sum': 0, 'name': "Jonno's House of Books"}, {'multiplied_value_sum': 27, 'name': 'Morgan Kaufmann'}, {'multiplied_value_sum': 21, 'name': 'Prentice Hall'}, {'multiplied_value_sum': 3, 'name': 'Sams'}, ] ) def test_arguments_must_be_expressions(self): msg = 'QuerySet.annotate() received non-expression(s): %s.' with self.assertRaisesMessage(TypeError, msg % BooleanField()): Book.objects.annotate(BooleanField()) with self.assertRaisesMessage(TypeError, msg % True): Book.objects.annotate(is_book=True) with self.assertRaisesMessage(TypeError, msg % ', '.join([str(BooleanField()), 'True'])): Book.objects.annotate(BooleanField(), Value(False), is_book=True) def test_chaining_annotation_filter_with_m2m(self): qs = Author.objects.filter( name='Adrian Holovaty', friends__age=35, ).annotate( jacob_name=F('friends__name'), ).filter( friends__age=29, ).annotate( james_name=F('friends__name'), ).values('jacob_name', 'james_name') self.assertCountEqual( qs, [{'jacob_name': 'Jacob Kaplan-Moss', 'james_name': 'James Bennett'}], ) def test_annotation_filter_with_subquery(self): long_books_qs = Book.objects.filter( publisher=OuterRef('pk'), pages__gt=400, ).values('publisher').annotate(count=Count('pk')).values('count') publisher_books_qs = Publisher.objects.annotate( total_books=Count('book'), ).filter( total_books=Subquery(long_books_qs, output_field=IntegerField()), ).values('name') self.assertCountEqual(publisher_books_qs, [{'name': 'Sams'}, {'name': 'Morgan Kaufmann'}]) def test_annotation_exists_aggregate_values_chaining(self): qs = Book.objects.values('publisher').annotate( has_authors=Exists(Book.authors.through.objects.filter(book=OuterRef('pk'))), max_pubdate=Max('pubdate'), ).values_list('max_pubdate', flat=True).order_by('max_pubdate') self.assertCountEqual(qs, [ datetime.date(1991, 10, 15), datetime.date(2008, 3, 3), datetime.date(2008, 6, 23), datetime.date(2008, 11, 3), ]) def test_annotation_aggregate_with_m2o(self): if connection.vendor == 'mysql' and 'ONLY_FULL_GROUP_BY' in connection.sql_mode: self.skipTest( 'GROUP BY optimization does not work properly when ' 'ONLY_FULL_GROUP_BY mode is enabled on MySQL, see #31331.' ) qs = Author.objects.filter(age__lt=30).annotate( max_pages=Case( When(book_contact_set__isnull=True, then=Value(0)), default=Max(F('book__pages')), ), ).values('name', 'max_pages') self.assertCountEqual(qs, [ {'name': 'James Bennett', 'max_pages': 300}, {'name': 'Paul Bissex', 'max_pages': 0}, {'name': 'Wesley J. Chun', 'max_pages': 0}, ]) class AliasTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='James Bennett', age=34) cls.a4 = Author.objects.create(name='Peter Norvig', age=57) cls.a5 = Author.objects.create(name='Stuart Russell', age=46) p1 = Publisher.objects.create(name='Apress', num_awards=3) cls.b1 = Book.objects.create( isbn='159059725', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=p1, pubdate=datetime.date(2007, 12, 6), name='The Definitive Guide to Django: Web Development Done Right', ) cls.b2 = Book.objects.create( isbn='159059996', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a3, publisher=p1, pubdate=datetime.date(2008, 6, 23), name='Practical Django Projects', ) cls.b3 = Book.objects.create( isbn='013790395', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a4, publisher=p1, pubdate=datetime.date(1995, 1, 15), name='Artificial Intelligence: A Modern Approach', ) cls.b4 = Book.objects.create( isbn='155860191', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a4, publisher=p1, pubdate=datetime.date(1991, 10, 15), name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4, cls.a5) cls.b4.authors.add(cls.a4) Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) def test_basic_alias(self): qs = Book.objects.alias(is_book=Value(1)) self.assertIs(hasattr(qs.first(), 'is_book'), False) def test_basic_alias_annotation(self): qs = Book.objects.alias( is_book_alias=Value(1), ).annotate(is_book=F('is_book_alias')) self.assertIs(hasattr(qs.first(), 'is_book_alias'), False) for book in qs: with self.subTest(book=book): self.assertEqual(book.is_book, 1) def test_basic_alias_f_annotation(self): qs = Book.objects.alias( another_rating_alias=F('rating') ).annotate(another_rating=F('another_rating_alias')) self.assertIs(hasattr(qs.first(), 'another_rating_alias'), False) for book in qs: with self.subTest(book=book): self.assertEqual(book.another_rating, book.rating) def test_alias_after_annotation(self): qs = Book.objects.annotate( is_book=Value(1), ).alias(is_book_alias=F('is_book')) book = qs.first() self.assertIs(hasattr(book, 'is_book'), True) self.assertIs(hasattr(book, 'is_book_alias'), False) def test_overwrite_annotation_with_alias(self): qs = Book.objects.annotate(is_book=Value(1)).alias(is_book=F('is_book')) self.assertIs(hasattr(qs.first(), 'is_book'), False) def test_overwrite_alias_with_annotation(self): qs = Book.objects.alias(is_book=Value(1)).annotate(is_book=F('is_book')) for book in qs: with self.subTest(book=book): self.assertEqual(book.is_book, 1) def test_alias_annotation_expression(self): qs = Book.objects.alias( is_book_alias=Value(1), ).annotate(is_book=Coalesce('is_book_alias', 0)) self.assertIs(hasattr(qs.first(), 'is_book_alias'), False) for book in qs: with self.subTest(book=book): self.assertEqual(book.is_book, 1) def test_alias_default_alias_expression(self): qs = Author.objects.alias( Sum('book__pages'), ).filter(book__pages__sum__gt=2000) self.assertIs(hasattr(qs.first(), 'book__pages__sum'), False) self.assertSequenceEqual(qs, [self.a4]) def test_joined_alias_annotation(self): qs = Book.objects.select_related('publisher').alias( num_awards_alias=F('publisher__num_awards'), ).annotate(num_awards=F('num_awards_alias')) self.assertIs(hasattr(qs.first(), 'num_awards_alias'), False) for book in qs: with self.subTest(book=book): self.assertEqual(book.num_awards, book.publisher.num_awards) def test_alias_annotate_with_aggregation(self): qs = Book.objects.alias( is_book_alias=Value(1), rating_count_alias=Count('rating'), ).annotate( is_book=F('is_book_alias'), rating_count=F('rating_count_alias'), ) book = qs.first() self.assertIs(hasattr(book, 'is_book_alias'), False) self.assertIs(hasattr(book, 'rating_count_alias'), False) for book in qs: with self.subTest(book=book): self.assertEqual(book.is_book, 1) self.assertEqual(book.rating_count, 1) def test_filter_alias_with_f(self): qs = Book.objects.alias( other_rating=F('rating'), ).filter(other_rating=4.5) self.assertIs(hasattr(qs.first(), 'other_rating'), False) self.assertSequenceEqual(qs, [self.b1]) def test_filter_alias_with_double_f(self): qs = Book.objects.alias( other_rating=F('rating'), ).filter(other_rating=F('rating')) self.assertIs(hasattr(qs.first(), 'other_rating'), False) self.assertEqual(qs.count(), Book.objects.count()) def test_filter_alias_agg_with_double_f(self): qs = Book.objects.alias( sum_rating=Sum('rating'), ).filter(sum_rating=F('sum_rating')) self.assertIs(hasattr(qs.first(), 'sum_rating'), False) self.assertEqual(qs.count(), Book.objects.count()) def test_update_with_alias(self): Book.objects.alias( other_rating=F('rating') - 1, ).update(rating=F('other_rating')) self.b1.refresh_from_db() self.assertEqual(self.b1.rating, 3.5) def test_order_by_alias(self): qs = Author.objects.alias(other_age=F('age')).order_by('other_age') self.assertIs(hasattr(qs.first(), 'other_age'), False) self.assertQuerysetEqual(qs, [34, 34, 35, 46, 57], lambda a: a.age) def test_order_by_alias_aggregate(self): qs = Author.objects.values('age').alias(age_count=Count('age')).order_by('age_count', 'age') self.assertIs(hasattr(qs.first(), 'age_count'), False) self.assertQuerysetEqual(qs, [35, 46, 57, 34], lambda a: a['age']) def test_dates_alias(self): qs = Book.objects.alias( pubdate_alias=F('pubdate'), ).dates('pubdate_alias', 'month') self.assertCountEqual(qs, [ datetime.date(1991, 10, 1), datetime.date(1995, 1, 1), datetime.date(2007, 12, 1), datetime.date(2008, 6, 1), ]) def test_datetimes_alias(self): qs = Store.objects.alias( original_opening_alias=F('original_opening'), ).datetimes('original_opening_alias', 'year') self.assertCountEqual(qs, [ datetime.datetime(1994, 1, 1), datetime.datetime(2001, 1, 1), ]) def test_aggregate_alias(self): msg = ( "Cannot aggregate over the 'other_age' alias. Use annotate() to " "promote it." ) with self.assertRaisesMessage(FieldError, msg): Author.objects.alias( other_age=F('age'), ).aggregate(otherage_sum=Sum('other_age')) def test_defer_only_alias(self): qs = Book.objects.alias(rating_alias=F('rating') - 1) msg = "Book has no field named 'rating_alias'" for operation in ['defer', 'only']: with self.subTest(operation=operation): with self.assertRaisesMessage(FieldDoesNotExist, msg): getattr(qs, operation)('rating_alias').first() @skipUnlessDBFeature('can_distinct_on_fields') def test_distinct_on_alias(self): qs = Book.objects.alias(rating_alias=F('rating') - 1) msg = "Cannot resolve keyword 'rating_alias' into field." with self.assertRaisesMessage(FieldError, msg): qs.distinct('rating_alias').first() def test_values_alias(self): qs = Book.objects.alias(rating_alias=F('rating') - 1) msg = ( "Cannot select the 'rating_alias' alias. Use annotate() to " "promote it." ) for operation in ['values', 'values_list']: with self.subTest(operation=operation): with self.assertRaisesMessage(FieldError, msg): getattr(qs, operation)('rating_alias')
f35c77eae01fd64322d3f948ba3ab472d9e4c55fb6a6d83e4904ac08b9093865
import os import re from io import StringIO from unittest import mock, skipUnless from django.core.management import call_command from django.db import connection from django.db.backends.base.introspection import TableInfo from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature from .models import PeopleMoreData, test_collation def inspectdb_tables_only(table_name): """ Limit introspection to tables created for models of this app. Some databases such as Oracle are extremely slow at introspection. """ return table_name.startswith('inspectdb_') def inspectdb_views_only(table_name): return ( table_name.startswith('inspectdb_') and table_name.endswith(('_materialized', '_view')) ) def special_table_only(table_name): return table_name.startswith('inspectdb_special') class InspectDBTestCase(TestCase): unique_re = re.compile(r'.*unique_together = \((.+),\).*') def test_stealth_table_name_filter_option(self): out = StringIO() call_command('inspectdb', table_name_filter=inspectdb_tables_only, stdout=out) error_message = "inspectdb has examined a table that should have been filtered out." # contrib.contenttypes is one of the apps always installed when running # the Django test suite, check that one of its tables hasn't been # inspected self.assertNotIn("class DjangoContentType(models.Model):", out.getvalue(), msg=error_message) def test_table_option(self): """ inspectdb can inspect a subset of tables by passing the table names as arguments. """ out = StringIO() call_command('inspectdb', 'inspectdb_people', stdout=out) output = out.getvalue() self.assertIn('class InspectdbPeople(models.Model):', output) self.assertNotIn("InspectdbPeopledata", output) def make_field_type_asserter(self): """Call inspectdb and return a function to validate a field type in its output""" out = StringIO() call_command('inspectdb', 'inspectdb_columntypes', stdout=out) output = out.getvalue() def assertFieldType(name, definition): out_def = re.search(r'^\s*%s = (models.*)$' % name, output, re.MULTILINE)[1] self.assertEqual(definition, out_def) return assertFieldType def test_field_types(self): """Test introspection of various Django field types""" assertFieldType = self.make_field_type_asserter() introspected_field_types = connection.features.introspected_field_types char_field_type = introspected_field_types['CharField'] # Inspecting Oracle DB doesn't produce correct results (#19884): # - it reports fields as blank=True when they aren't. if not connection.features.interprets_empty_strings_as_nulls and char_field_type == 'CharField': assertFieldType('char_field', "models.CharField(max_length=10)") assertFieldType('null_char_field', "models.CharField(max_length=10, blank=True, null=True)") assertFieldType('email_field', "models.CharField(max_length=254)") assertFieldType('file_field', "models.CharField(max_length=100)") assertFieldType('file_path_field', "models.CharField(max_length=100)") assertFieldType('slug_field', "models.CharField(max_length=50)") assertFieldType('text_field', "models.TextField()") assertFieldType('url_field', "models.CharField(max_length=200)") if char_field_type == 'TextField': assertFieldType('char_field', 'models.TextField()') assertFieldType('null_char_field', 'models.TextField(blank=True, null=True)') assertFieldType('email_field', 'models.TextField()') assertFieldType('file_field', 'models.TextField()') assertFieldType('file_path_field', 'models.TextField()') assertFieldType('slug_field', 'models.TextField()') assertFieldType('text_field', 'models.TextField()') assertFieldType('url_field', 'models.TextField()') assertFieldType('date_field', "models.DateField()") assertFieldType('date_time_field', "models.DateTimeField()") if introspected_field_types['GenericIPAddressField'] == 'GenericIPAddressField': assertFieldType('gen_ip_address_field', "models.GenericIPAddressField()") elif not connection.features.interprets_empty_strings_as_nulls: assertFieldType('gen_ip_address_field', "models.CharField(max_length=39)") assertFieldType('time_field', 'models.%s()' % introspected_field_types['TimeField']) if connection.features.has_native_uuid_field: assertFieldType('uuid_field', "models.UUIDField()") elif not connection.features.interprets_empty_strings_as_nulls: assertFieldType('uuid_field', "models.CharField(max_length=32)") @skipUnlessDBFeature('can_introspect_json_field', 'supports_json_field') def test_json_field(self): out = StringIO() call_command('inspectdb', 'inspectdb_jsonfieldcolumntype', stdout=out) output = out.getvalue() if not connection.features.interprets_empty_strings_as_nulls: self.assertIn('json_field = models.JSONField()', output) self.assertIn('null_json_field = models.JSONField(blank=True, null=True)', output) @skipUnlessDBFeature('supports_collation_on_charfield') @skipUnless(test_collation, 'Language collations are not supported.') def test_char_field_db_collation(self): out = StringIO() call_command('inspectdb', 'inspectdb_charfielddbcollation', stdout=out) output = out.getvalue() if not connection.features.interprets_empty_strings_as_nulls: self.assertIn( "char_field = models.CharField(max_length=10, " "db_collation='%s')" % test_collation, output, ) else: self.assertIn( "char_field = models.CharField(max_length=10, " "db_collation='%s', blank=True, null=True)" % test_collation, output, ) @skipUnlessDBFeature('supports_collation_on_textfield') @skipUnless(test_collation, 'Language collations are not supported.') def test_text_field_db_collation(self): out = StringIO() call_command('inspectdb', 'inspectdb_textfielddbcollation', stdout=out) output = out.getvalue() if not connection.features.interprets_empty_strings_as_nulls: self.assertIn( "text_field = models.TextField(db_collation='%s')" % test_collation, output, ) else: self.assertIn( "text_field = models.TextField(db_collation='%s, blank=True, " "null=True)" % test_collation, output, ) def test_number_field_types(self): """Test introspection of various Django field types""" assertFieldType = self.make_field_type_asserter() introspected_field_types = connection.features.introspected_field_types auto_field_type = connection.features.introspected_field_types['AutoField'] if auto_field_type != 'AutoField': assertFieldType('id', "models.%s(primary_key=True) # AutoField?" % auto_field_type) assertFieldType('big_int_field', 'models.%s()' % introspected_field_types['BigIntegerField']) bool_field_type = introspected_field_types['BooleanField'] assertFieldType('bool_field', "models.{}()".format(bool_field_type)) assertFieldType('null_bool_field', 'models.{}(blank=True, null=True)'.format(bool_field_type)) if connection.vendor != 'sqlite': assertFieldType('decimal_field', "models.DecimalField(max_digits=6, decimal_places=1)") else: # Guessed arguments on SQLite, see #5014 assertFieldType('decimal_field', "models.DecimalField(max_digits=10, decimal_places=5) " "# max_digits and decimal_places have been guessed, " "as this database handles decimal fields as float") assertFieldType('float_field', "models.FloatField()") assertFieldType('int_field', 'models.%s()' % introspected_field_types['IntegerField']) assertFieldType('pos_int_field', 'models.%s()' % introspected_field_types['PositiveIntegerField']) assertFieldType('pos_big_int_field', 'models.%s()' % introspected_field_types['PositiveBigIntegerField']) assertFieldType('pos_small_int_field', 'models.%s()' % introspected_field_types['PositiveSmallIntegerField']) assertFieldType('small_int_field', 'models.%s()' % introspected_field_types['SmallIntegerField']) @skipUnlessDBFeature('can_introspect_foreign_keys') def test_attribute_name_not_python_keyword(self): out = StringIO() call_command('inspectdb', table_name_filter=inspectdb_tables_only, stdout=out) output = out.getvalue() error_message = "inspectdb generated an attribute name which is a Python keyword" # Recursive foreign keys should be set to 'self' self.assertIn("parent = models.ForeignKey('self', models.DO_NOTHING)", output) self.assertNotIn( "from = models.ForeignKey(InspectdbPeople, models.DO_NOTHING)", output, msg=error_message, ) # As InspectdbPeople model is defined after InspectdbMessage, it should be quoted self.assertIn( "from_field = models.ForeignKey('InspectdbPeople', models.DO_NOTHING, db_column='from_id')", output, ) self.assertIn( 'people_pk = models.OneToOneField(InspectdbPeople, models.DO_NOTHING, primary_key=True)', output, ) self.assertIn( 'people_unique = models.OneToOneField(InspectdbPeople, models.DO_NOTHING)', output, ) def test_digits_column_name_introspection(self): """Introspection of column names consist/start with digits (#16536/#17676)""" char_field_type = connection.features.introspected_field_types['CharField'] out = StringIO() call_command('inspectdb', 'inspectdb_digitsincolumnname', stdout=out) output = out.getvalue() error_message = "inspectdb generated a model field name which is a number" self.assertNotIn(' 123 = models.%s' % char_field_type, output, msg=error_message) self.assertIn('number_123 = models.%s' % char_field_type, output) error_message = "inspectdb generated a model field name which starts with a digit" self.assertNotIn(' 4extra = models.%s' % char_field_type, output, msg=error_message) self.assertIn('number_4extra = models.%s' % char_field_type, output) self.assertNotIn(' 45extra = models.%s' % char_field_type, output, msg=error_message) self.assertIn('number_45extra = models.%s' % char_field_type, output) def test_special_column_name_introspection(self): """ Introspection of column names containing special characters, unsuitable for Python identifiers """ out = StringIO() call_command('inspectdb', table_name_filter=special_table_only, stdout=out) output = out.getvalue() base_name = connection.introspection.identifier_converter('Field') integer_field_type = connection.features.introspected_field_types['IntegerField'] self.assertIn("field = models.%s()" % integer_field_type, output) self.assertIn("field_field = models.%s(db_column='%s_')" % (integer_field_type, base_name), output) self.assertIn("field_field_0 = models.%s(db_column='%s__')" % (integer_field_type, base_name), output) self.assertIn("field_field_1 = models.%s(db_column='__field')" % integer_field_type, output) self.assertIn("prc_x = models.{}(db_column='prc(%) x')".format(integer_field_type), output) self.assertIn("tamaño = models.%s()" % integer_field_type, output) def test_table_name_introspection(self): """ Introspection of table names containing special characters, unsuitable for Python identifiers """ out = StringIO() call_command('inspectdb', table_name_filter=special_table_only, stdout=out) output = out.getvalue() self.assertIn("class InspectdbSpecialTableName(models.Model):", output) def test_managed_models(self): """By default the command generates models with `Meta.managed = False` (#14305)""" out = StringIO() call_command('inspectdb', 'inspectdb_columntypes', stdout=out) output = out.getvalue() self.longMessage = False self.assertIn(" managed = False", output, msg='inspectdb should generate unmanaged models.') def test_unique_together_meta(self): out = StringIO() call_command('inspectdb', 'inspectdb_uniquetogether', stdout=out) output = out.getvalue() self.assertIn(" unique_together = (('", output) unique_together_match = self.unique_re.findall(output) # There should be one unique_together tuple. self.assertEqual(len(unique_together_match), 1) fields = unique_together_match[0] # Fields with db_column = field name. self.assertIn("('field1', 'field2')", fields) # Fields from columns whose names are Python keywords. self.assertIn("('field1', 'field2')", fields) # Fields whose names normalize to the same Python field name and hence # are given an integer suffix. self.assertIn("('non_unique_column', 'non_unique_column_0')", fields) @skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific SQL') def test_unsupported_unique_together(self): """Unsupported index types (COALESCE here) are skipped.""" with connection.cursor() as c: c.execute( 'CREATE UNIQUE INDEX Findex ON %s ' '(id, people_unique_id, COALESCE(message_id, -1))' % PeopleMoreData._meta.db_table ) try: out = StringIO() call_command( 'inspectdb', table_name_filter=lambda tn: tn.startswith(PeopleMoreData._meta.db_table), stdout=out, ) output = out.getvalue() self.assertIn('# A unique constraint could not be introspected.', output) self.assertEqual(self.unique_re.findall(output), ["('id', 'people_unique')"]) finally: with connection.cursor() as c: c.execute('DROP INDEX Findex') @skipUnless(connection.vendor == 'sqlite', "Only patched sqlite's DatabaseIntrospection.data_types_reverse for this test") def test_custom_fields(self): """ Introspection of columns with a custom field (#21090) """ out = StringIO() orig_data_types_reverse = connection.introspection.data_types_reverse try: connection.introspection.data_types_reverse = { 'text': 'myfields.TextField', 'bigint': 'BigIntegerField', } call_command('inspectdb', 'inspectdb_columntypes', stdout=out) output = out.getvalue() self.assertIn("text_field = myfields.TextField()", output) self.assertIn("big_int_field = models.BigIntegerField()", output) finally: connection.introspection.data_types_reverse = orig_data_types_reverse def test_introspection_errors(self): """ Introspection errors should not crash the command, and the error should be visible in the output. """ out = StringIO() with mock.patch('django.db.connection.introspection.get_table_list', return_value=[TableInfo(name='nonexistent', type='t')]): call_command('inspectdb', stdout=out) output = out.getvalue() self.assertIn("# Unable to inspect table 'nonexistent'", output) # The error message depends on the backend self.assertIn("# The error was:", output) class InspectDBTransactionalTests(TransactionTestCase): available_apps = ['inspectdb'] def test_include_views(self): """inspectdb --include-views creates models for database views.""" with connection.cursor() as cursor: cursor.execute( 'CREATE VIEW inspectdb_people_view AS ' 'SELECT id, name FROM inspectdb_people' ) out = StringIO() view_model = 'class InspectdbPeopleView(models.Model):' view_managed = 'managed = False # Created from a view.' try: call_command( 'inspectdb', table_name_filter=inspectdb_views_only, stdout=out, ) no_views_output = out.getvalue() self.assertNotIn(view_model, no_views_output) self.assertNotIn(view_managed, no_views_output) call_command( 'inspectdb', table_name_filter=inspectdb_views_only, include_views=True, stdout=out, ) with_views_output = out.getvalue() self.assertIn(view_model, with_views_output) self.assertIn(view_managed, with_views_output) finally: with connection.cursor() as cursor: cursor.execute('DROP VIEW inspectdb_people_view') @skipUnlessDBFeature('can_introspect_materialized_views') def test_include_materialized_views(self): """inspectdb --include-views creates models for materialized views.""" with connection.cursor() as cursor: cursor.execute( 'CREATE MATERIALIZED VIEW inspectdb_people_materialized AS ' 'SELECT id, name FROM inspectdb_people' ) out = StringIO() view_model = 'class InspectdbPeopleMaterialized(models.Model):' view_managed = 'managed = False # Created from a view.' try: call_command( 'inspectdb', table_name_filter=inspectdb_views_only, stdout=out, ) no_views_output = out.getvalue() self.assertNotIn(view_model, no_views_output) self.assertNotIn(view_managed, no_views_output) call_command( 'inspectdb', table_name_filter=inspectdb_views_only, include_views=True, stdout=out, ) with_views_output = out.getvalue() self.assertIn(view_model, with_views_output) self.assertIn(view_managed, with_views_output) finally: with connection.cursor() as cursor: cursor.execute('DROP MATERIALIZED VIEW inspectdb_people_materialized') @skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific SQL') @skipUnlessDBFeature('supports_table_partitions') def test_include_partitions(self): """inspectdb --include-partitions creates models for partitions.""" with connection.cursor() as cursor: cursor.execute('''\ CREATE TABLE inspectdb_partition_parent (name text not null) PARTITION BY LIST (left(upper(name), 1)) ''') cursor.execute('''\ CREATE TABLE inspectdb_partition_child PARTITION OF inspectdb_partition_parent FOR VALUES IN ('A', 'B', 'C') ''') out = StringIO() partition_model_parent = 'class InspectdbPartitionParent(models.Model):' partition_model_child = 'class InspectdbPartitionChild(models.Model):' partition_managed = 'managed = False # Created from a partition.' try: call_command('inspectdb', table_name_filter=inspectdb_tables_only, stdout=out) no_partitions_output = out.getvalue() self.assertIn(partition_model_parent, no_partitions_output) self.assertNotIn(partition_model_child, no_partitions_output) self.assertNotIn(partition_managed, no_partitions_output) call_command('inspectdb', table_name_filter=inspectdb_tables_only, include_partitions=True, stdout=out) with_partitions_output = out.getvalue() self.assertIn(partition_model_parent, with_partitions_output) self.assertIn(partition_model_child, with_partitions_output) self.assertIn(partition_managed, with_partitions_output) finally: with connection.cursor() as cursor: cursor.execute('DROP TABLE IF EXISTS inspectdb_partition_child') cursor.execute('DROP TABLE IF EXISTS inspectdb_partition_parent') @skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific SQL') def test_foreign_data_wrapper(self): with connection.cursor() as cursor: cursor.execute('CREATE EXTENSION IF NOT EXISTS file_fdw') cursor.execute('CREATE SERVER inspectdb_server FOREIGN DATA WRAPPER file_fdw') cursor.execute('''\ CREATE FOREIGN TABLE inspectdb_iris_foreign_table ( petal_length real, petal_width real, sepal_length real, sepal_width real ) SERVER inspectdb_server OPTIONS ( filename %s ) ''', [os.devnull]) out = StringIO() foreign_table_model = 'class InspectdbIrisForeignTable(models.Model):' foreign_table_managed = 'managed = False' try: call_command( 'inspectdb', table_name_filter=inspectdb_tables_only, stdout=out, ) output = out.getvalue() self.assertIn(foreign_table_model, output) self.assertIn(foreign_table_managed, output) finally: with connection.cursor() as cursor: cursor.execute('DROP FOREIGN TABLE IF EXISTS inspectdb_iris_foreign_table') cursor.execute('DROP SERVER IF EXISTS inspectdb_server') cursor.execute('DROP EXTENSION IF EXISTS file_fdw')
db07c1d8260ad31d2761c5082db96e22772c86dc89aff2edf57792bed4a48e15
from django.db import connection, models class People(models.Model): name = models.CharField(max_length=255) parent = models.ForeignKey('self', models.CASCADE) class Message(models.Model): from_field = models.ForeignKey(People, models.CASCADE, db_column='from_id') class PeopleData(models.Model): people_pk = models.ForeignKey(People, models.CASCADE, primary_key=True) ssn = models.CharField(max_length=11) class PeopleMoreData(models.Model): people_unique = models.ForeignKey(People, models.CASCADE, unique=True) message = models.ForeignKey(Message, models.CASCADE, blank=True, null=True) license = models.CharField(max_length=255) class DigitsInColumnName(models.Model): all_digits = models.CharField(max_length=11, db_column='123') leading_digit = models.CharField(max_length=11, db_column='4extra') leading_digits = models.CharField(max_length=11, db_column='45extra') class SpecialName(models.Model): field = models.IntegerField(db_column='field') # Underscores field_field_0 = models.IntegerField(db_column='Field_') field_field_1 = models.IntegerField(db_column='Field__') field_field_2 = models.IntegerField(db_column='__field') # Other chars prc_x = models.IntegerField(db_column='prc(%) x') non_ascii = models.IntegerField(db_column='tamaño') class Meta: db_table = "inspectdb_special.table name" class ColumnTypes(models.Model): id = models.AutoField(primary_key=True) big_int_field = models.BigIntegerField() bool_field = models.BooleanField(default=False) null_bool_field = models.BooleanField(null=True) char_field = models.CharField(max_length=10) null_char_field = models.CharField(max_length=10, blank=True, null=True) date_field = models.DateField() date_time_field = models.DateTimeField() decimal_field = models.DecimalField(max_digits=6, decimal_places=1) email_field = models.EmailField() file_field = models.FileField(upload_to="unused") file_path_field = models.FilePathField() float_field = models.FloatField() int_field = models.IntegerField() gen_ip_address_field = models.GenericIPAddressField(protocol="ipv4") pos_big_int_field = models.PositiveBigIntegerField() pos_int_field = models.PositiveIntegerField() pos_small_int_field = models.PositiveSmallIntegerField() slug_field = models.SlugField() small_int_field = models.SmallIntegerField() text_field = models.TextField() time_field = models.TimeField() url_field = models.URLField() uuid_field = models.UUIDField() class JSONFieldColumnType(models.Model): json_field = models.JSONField() null_json_field = models.JSONField(blank=True, null=True) class Meta: required_db_features = { 'can_introspect_json_field', 'supports_json_field', } test_collation = connection.features.test_collations.get('non_default') class CharFieldDbCollation(models.Model): char_field = models.CharField(max_length=10, db_collation=test_collation) class Meta: required_db_features = {'supports_collation_on_charfield'} class TextFieldDbCollation(models.Model): text_field = models.TextField(db_collation=test_collation) class Meta: required_db_features = {'supports_collation_on_textfield'} class UniqueTogether(models.Model): field1 = models.IntegerField() field2 = models.CharField(max_length=10) from_field = models.IntegerField(db_column='from') non_unique = models.IntegerField(db_column='non__unique_column') non_unique_0 = models.IntegerField(db_column='non_unique__column') class Meta: unique_together = [ ('field1', 'field2'), ('from_field', 'field1'), ('non_unique', 'non_unique_0'), ]
01f5deace90414a6b330fa846f437d2700c57154297fd5b08f05bce8309be6d1
from django.test import SimpleTestCase, override_settings from django.test.utils import require_jinja2 @override_settings(ROOT_URLCONF='shortcuts.urls') class RenderTests(SimpleTestCase): def test_render(self): response = self.client.get('/render/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'FOO.BAR../render/\n') self.assertEqual(response.headers['Content-Type'], 'text/html; charset=utf-8') self.assertFalse(hasattr(response.context.request, 'current_app')) def test_render_with_multiple_templates(self): response = self.client.get('/render/multiple_templates/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'FOO.BAR../render/multiple_templates/\n') def test_render_with_content_type(self): response = self.client.get('/render/content_type/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'FOO.BAR../render/content_type/\n') self.assertEqual(response.headers['Content-Type'], 'application/x-rendertest') def test_render_with_status(self): response = self.client.get('/render/status/') self.assertEqual(response.status_code, 403) self.assertEqual(response.content, b'FOO.BAR../render/status/\n') @require_jinja2 def test_render_with_using(self): response = self.client.get('/render/using/') self.assertEqual(response.content, b'DTL\n') response = self.client.get('/render/using/?using=django') self.assertEqual(response.content, b'DTL\n') response = self.client.get('/render/using/?using=jinja2') self.assertEqual(response.content, b'Jinja2\n')
46c6331b262673f7b06e2d4b2b430688745f4ab5a25074f7aee787b9873d16db
import time from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponse from django.test import RequestFactory, SimpleTestCase, override_settings from django.test.utils import require_jinja2 from django.urls import resolve from django.views.generic import RedirectView, TemplateView, View from . import views class SimpleView(View): """ A simple view with a docstring. """ def get(self, request): return HttpResponse('This is a simple view') class SimplePostView(SimpleView): post = SimpleView.get class PostOnlyView(View): def post(self, request): return HttpResponse('This view only accepts POST') class CustomizableView(SimpleView): parameter = {} def decorator(view): view.is_decorated = True return view class DecoratedDispatchView(SimpleView): @decorator def dispatch(self, request, *args, **kwargs): return super().dispatch(request, *args, **kwargs) class AboutTemplateView(TemplateView): def get(self, request): return self.render_to_response({}) def get_template_names(self): return ['generic_views/about.html'] class AboutTemplateAttributeView(TemplateView): template_name = 'generic_views/about.html' def get(self, request): return self.render_to_response(context={}) class InstanceView(View): def get(self, request): return self class ViewTest(SimpleTestCase): rf = RequestFactory() def _assert_simple(self, response): self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'This is a simple view') def test_no_init_kwargs(self): """ A view can't be accidentally instantiated before deployment """ msg = 'This method is available only on the class, not on instances.' with self.assertRaisesMessage(AttributeError, msg): SimpleView(key='value').as_view() def test_no_init_args(self): """ A view can't be accidentally instantiated before deployment """ msg = 'as_view() takes 1 positional argument but 2 were given' with self.assertRaisesMessage(TypeError, msg): SimpleView.as_view('value') def test_pathological_http_method(self): """ The edge case of a http request that spoofs an existing method name is caught. """ self.assertEqual(SimpleView.as_view()( self.rf.get('/', REQUEST_METHOD='DISPATCH') ).status_code, 405) def test_get_only(self): """ Test a view which only allows GET doesn't allow other methods. """ self._assert_simple(SimpleView.as_view()(self.rf.get('/'))) self.assertEqual(SimpleView.as_view()(self.rf.post('/')).status_code, 405) self.assertEqual(SimpleView.as_view()( self.rf.get('/', REQUEST_METHOD='FAKE') ).status_code, 405) def test_get_and_head(self): """ Test a view which supplies a GET method also responds correctly to HEAD. """ self._assert_simple(SimpleView.as_view()(self.rf.get('/'))) response = SimpleView.as_view()(self.rf.head('/')) self.assertEqual(response.status_code, 200) def test_setup_get_and_head(self): view_instance = SimpleView() self.assertFalse(hasattr(view_instance, 'head')) view_instance.setup(self.rf.get('/')) self.assertTrue(hasattr(view_instance, 'head')) self.assertEqual(view_instance.head, view_instance.get) def test_head_no_get(self): """ Test a view which supplies no GET method responds to HEAD with HTTP 405. """ response = PostOnlyView.as_view()(self.rf.head('/')) self.assertEqual(response.status_code, 405) def test_get_and_post(self): """ Test a view which only allows both GET and POST. """ self._assert_simple(SimplePostView.as_view()(self.rf.get('/'))) self._assert_simple(SimplePostView.as_view()(self.rf.post('/'))) self.assertEqual(SimplePostView.as_view()( self.rf.get('/', REQUEST_METHOD='FAKE') ).status_code, 405) def test_invalid_keyword_argument(self): """ View arguments must be predefined on the class and can't be named like a HTTP method. """ msg = ( 'The method name %s is not accepted as a keyword argument to ' 'SimpleView().' ) # Check each of the allowed method names for method in SimpleView.http_method_names: with self.assertRaisesMessage(TypeError, msg % method): SimpleView.as_view(**{method: 'value'}) # Check the case view argument is ok if predefined on the class... CustomizableView.as_view(parameter="value") # ...but raises errors otherwise. msg = ( "CustomizableView() received an invalid keyword 'foobar'. " "as_view only accepts arguments that are already attributes of " "the class." ) with self.assertRaisesMessage(TypeError, msg): CustomizableView.as_view(foobar="value") def test_calling_more_than_once(self): """ Test a view can only be called once. """ request = self.rf.get('/') view = InstanceView.as_view() self.assertNotEqual(view(request), view(request)) def test_class_attributes(self): """ The callable returned from as_view() has proper docstring, name and module. """ self.assertEqual(SimpleView.__doc__, SimpleView.as_view().__doc__) self.assertEqual(SimpleView.__name__, SimpleView.as_view().__name__) self.assertEqual(SimpleView.__module__, SimpleView.as_view().__module__) def test_dispatch_decoration(self): """ Attributes set by decorators on the dispatch method are also present on the closure. """ self.assertTrue(DecoratedDispatchView.as_view().is_decorated) def test_options(self): """ Views respond to HTTP OPTIONS requests with an Allow header appropriate for the methods implemented by the view class. """ request = self.rf.options('/') view = SimpleView.as_view() response = view(request) self.assertEqual(200, response.status_code) self.assertTrue(response.headers['Allow']) def test_options_for_get_view(self): """ A view implementing GET allows GET and HEAD. """ request = self.rf.options('/') view = SimpleView.as_view() response = view(request) self._assert_allows(response, 'GET', 'HEAD') def test_options_for_get_and_post_view(self): """ A view implementing GET and POST allows GET, HEAD, and POST. """ request = self.rf.options('/') view = SimplePostView.as_view() response = view(request) self._assert_allows(response, 'GET', 'HEAD', 'POST') def test_options_for_post_view(self): """ A view implementing POST allows POST. """ request = self.rf.options('/') view = PostOnlyView.as_view() response = view(request) self._assert_allows(response, 'POST') def _assert_allows(self, response, *expected_methods): "Assert allowed HTTP methods reported in the Allow response header" response_allows = set(response.headers['Allow'].split(', ')) self.assertEqual(set(expected_methods + ('OPTIONS',)), response_allows) def test_args_kwargs_request_on_self(self): """ Test a view only has args, kwargs & request once `as_view` has been called. """ bare_view = InstanceView() view = InstanceView.as_view()(self.rf.get('/')) for attribute in ('args', 'kwargs', 'request'): self.assertNotIn(attribute, dir(bare_view)) self.assertIn(attribute, dir(view)) def test_overridden_setup(self): class SetAttributeMixin: def setup(self, request, *args, **kwargs): self.attr = True super().setup(request, *args, **kwargs) class CheckSetupView(SetAttributeMixin, SimpleView): def dispatch(self, request, *args, **kwargs): assert hasattr(self, 'attr') return super().dispatch(request, *args, **kwargs) response = CheckSetupView.as_view()(self.rf.get('/')) self.assertEqual(response.status_code, 200) def test_not_calling_parent_setup_error(self): class TestView(View): def setup(self, request, *args, **kwargs): pass # Not calling super().setup() msg = ( "TestView instance has no 'request' attribute. Did you override " "setup() and forget to call super()?" ) with self.assertRaisesMessage(AttributeError, msg): TestView.as_view()(self.rf.get('/')) def test_setup_adds_args_kwargs_request(self): request = self.rf.get('/') args = ('arg 1', 'arg 2') kwargs = {'kwarg_1': 1, 'kwarg_2': 'year'} view = View() view.setup(request, *args, **kwargs) self.assertEqual(request, view.request) self.assertEqual(args, view.args) self.assertEqual(kwargs, view.kwargs) def test_direct_instantiation(self): """ It should be possible to use the view by directly instantiating it without going through .as_view() (#21564). """ view = PostOnlyView() response = view.dispatch(self.rf.head('/')) self.assertEqual(response.status_code, 405) @override_settings(ROOT_URLCONF='generic_views.urls') class TemplateViewTest(SimpleTestCase): rf = RequestFactory() def _assert_about(self, response): response.render() self.assertContains(response, '<h1>About</h1>') def test_get(self): """ Test a view that simply renders a template on GET """ self._assert_about(AboutTemplateView.as_view()(self.rf.get('/about/'))) def test_head(self): """ Test a TemplateView responds correctly to HEAD """ response = AboutTemplateView.as_view()(self.rf.head('/about/')) self.assertEqual(response.status_code, 200) def test_get_template_attribute(self): """ Test a view that renders a template on GET with the template name as an attribute on the class. """ self._assert_about(AboutTemplateAttributeView.as_view()(self.rf.get('/about/'))) def test_get_generic_template(self): """ Test a completely generic view that renders a template on GET with the template name as an argument at instantiation. """ self._assert_about(TemplateView.as_view(template_name='generic_views/about.html')(self.rf.get('/about/'))) def test_template_name_required(self): """ A template view must provide a template name. """ msg = ( "TemplateResponseMixin requires either a definition of " "'template_name' or an implementation of 'get_template_names()'" ) with self.assertRaisesMessage(ImproperlyConfigured, msg): self.client.get('/template/no_template/') @require_jinja2 def test_template_engine(self): """ A template view may provide a template engine. """ request = self.rf.get('/using/') view = TemplateView.as_view(template_name='generic_views/using.html') self.assertEqual(view(request).render().content, b'DTL\n') view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='django') self.assertEqual(view(request).render().content, b'DTL\n') view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='jinja2') self.assertEqual(view(request).render().content, b'Jinja2\n') def test_template_params(self): """ A generic template view passes kwargs as context. """ response = self.client.get('/template/simple/bar/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['foo'], 'bar') self.assertIsInstance(response.context['view'], View) def test_extra_template_params(self): """ A template view can be customized to return extra context. """ response = self.client.get('/template/custom/bar/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['foo'], 'bar') self.assertEqual(response.context['key'], 'value') self.assertIsInstance(response.context['view'], View) def test_cached_views(self): """ A template view can be cached """ response = self.client.get('/template/cached/bar/') self.assertEqual(response.status_code, 200) time.sleep(1.0) response2 = self.client.get('/template/cached/bar/') self.assertEqual(response2.status_code, 200) self.assertEqual(response.content, response2.content) time.sleep(2.0) # Let the cache expire and test again response2 = self.client.get('/template/cached/bar/') self.assertEqual(response2.status_code, 200) self.assertNotEqual(response.content, response2.content) def test_content_type(self): response = self.client.get('/template/content_type/') self.assertEqual(response.headers['Content-Type'], 'text/plain') def test_resolve_view(self): match = resolve('/template/content_type/') self.assertIs(match.func.view_class, TemplateView) self.assertEqual(match.func.view_initkwargs['content_type'], 'text/plain') def test_resolve_login_required_view(self): match = resolve('/template/login_required/') self.assertIs(match.func.view_class, TemplateView) def test_extra_context(self): response = self.client.get('/template/extra_context/') self.assertEqual(response.context['title'], 'Title') @override_settings(ROOT_URLCONF='generic_views.urls') class RedirectViewTest(SimpleTestCase): rf = RequestFactory() def test_no_url(self): "Without any configuration, returns HTTP 410 GONE" response = RedirectView.as_view()(self.rf.get('/foo/')) self.assertEqual(response.status_code, 410) def test_default_redirect(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_permanent_redirect(self): "Permanent redirects are an option" response = RedirectView.as_view(url='/bar/', permanent=True)(self.rf.get('/foo/')) self.assertEqual(response.status_code, 301) self.assertEqual(response.url, '/bar/') def test_temporary_redirect(self): "Temporary redirects are an option" response = RedirectView.as_view(url='/bar/', permanent=False)(self.rf.get('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_include_args(self): "GET arguments can be included in the redirected URL" response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') response = RedirectView.as_view(url='/bar/', query_string=True)(self.rf.get('/foo/?pork=spam')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/?pork=spam') def test_include_urlencoded_args(self): "GET arguments can be URL-encoded when included in the redirected URL" response = RedirectView.as_view(url='/bar/', query_string=True)( self.rf.get('/foo/?unicode=%E2%9C%93')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/?unicode=%E2%9C%93') def test_parameter_substitution(self): "Redirection URLs can be parameterized" response = RedirectView.as_view(url='/bar/%(object_id)d/')(self.rf.get('/foo/42/'), object_id=42) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/42/') def test_named_url_pattern(self): "Named pattern parameter should reverse to the matching pattern" response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), pk=1) self.assertEqual(response.status_code, 302) self.assertEqual(response.headers['Location'], '/detail/artist/1/') def test_named_url_pattern_using_args(self): response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), 1) self.assertEqual(response.status_code, 302) self.assertEqual(response.headers['Location'], '/detail/artist/1/') def test_redirect_POST(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.post('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_HEAD(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.head('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_OPTIONS(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.options('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_PUT(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.put('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_PATCH(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.patch('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_DELETE(self): "Default is a temporary redirect" response = RedirectView.as_view(url='/bar/')(self.rf.delete('/foo/')) self.assertEqual(response.status_code, 302) self.assertEqual(response.url, '/bar/') def test_redirect_when_meta_contains_no_query_string(self): "regression for #16705" # we can't use self.rf.get because it always sets QUERY_STRING response = RedirectView.as_view(url='/bar/')(self.rf.request(PATH_INFO='/foo/')) self.assertEqual(response.status_code, 302) def test_direct_instantiation(self): """ It should be possible to use the view without going through .as_view() (#21564). """ view = RedirectView() response = view.dispatch(self.rf.head('/foo/')) self.assertEqual(response.status_code, 410) class GetContextDataTest(SimpleTestCase): def test_get_context_data_super(self): test_view = views.CustomContextView() context = test_view.get_context_data(kwarg_test='kwarg_value') # the test_name key is inserted by the test classes parent self.assertIn('test_name', context) self.assertEqual(context['kwarg_test'], 'kwarg_value') self.assertEqual(context['custom_key'], 'custom_value') # test that kwarg overrides values assigned higher up context = test_view.get_context_data(test_name='test_value') self.assertEqual(context['test_name'], 'test_value') def test_object_at_custom_name_in_context_data(self): # Checks 'pony' key presence in dict returned by get_context_date test_view = views.CustomSingleObjectView() test_view.context_object_name = 'pony' context = test_view.get_context_data() self.assertEqual(context['pony'], test_view.object) def test_object_in_get_context_data(self): # Checks 'object' key presence in dict returned by get_context_date #20234 test_view = views.CustomSingleObjectView() context = test_view.get_context_data() self.assertEqual(context['object'], test_view.object) class UseMultipleObjectMixinTest(SimpleTestCase): rf = RequestFactory() def test_use_queryset_from_view(self): test_view = views.CustomMultipleObjectMixinView() test_view.get(self.rf.get('/')) # Don't pass queryset as argument context = test_view.get_context_data() self.assertEqual(context['object_list'], test_view.queryset) def test_overwrite_queryset(self): test_view = views.CustomMultipleObjectMixinView() test_view.get(self.rf.get('/')) queryset = [{'name': 'Lennon'}, {'name': 'Ono'}] self.assertNotEqual(test_view.queryset, queryset) # Overwrite the view's queryset with queryset from kwarg context = test_view.get_context_data(object_list=queryset) self.assertEqual(context['object_list'], queryset) class SingleObjectTemplateResponseMixinTest(SimpleTestCase): def test_template_mixin_without_template(self): """ We want to makes sure that if you use a template mixin, but forget the template, it still tells you it's ImproperlyConfigured instead of TemplateDoesNotExist. """ view = views.TemplateResponseWithoutTemplate() msg = ( "TemplateResponseMixin requires either a definition of " "'template_name' or an implementation of 'get_template_names()'" ) with self.assertRaisesMessage(ImproperlyConfigured, msg): view.get_template_names()
34a3ecb2b096e6acff614ca5c9e422cae8b30d489eb36bda8bed15998eddb5e2
from django.contrib.auth import views as auth_views from django.contrib.auth.decorators import login_required from django.urls import path, re_path from django.views.decorators.cache import cache_page from django.views.generic import TemplateView, dates from . import views from .models import Book urlpatterns = [ # TemplateView path('template/no_template/', TemplateView.as_view()), path('template/login_required/', login_required(TemplateView.as_view())), path('template/simple/<foo>/', TemplateView.as_view(template_name='generic_views/about.html')), path('template/custom/<foo>/', views.CustomTemplateView.as_view(template_name='generic_views/about.html')), path( 'template/content_type/', TemplateView.as_view(template_name='generic_views/robots.txt', content_type='text/plain'), ), path( 'template/cached/<foo>/', cache_page(2.0)(TemplateView.as_view(template_name='generic_views/about.html')), ), path( 'template/extra_context/', TemplateView.as_view(template_name='generic_views/about.html', extra_context={'title': 'Title'}), ), # DetailView path('detail/obj/', views.ObjectDetail.as_view()), path('detail/artist/<int:pk>/', views.ArtistDetail.as_view(), name='artist_detail'), path('detail/author/<int:pk>/', views.AuthorDetail.as_view(), name='author_detail'), path('detail/author/bycustompk/<foo>/', views.AuthorDetail.as_view(pk_url_kwarg='foo')), path('detail/author/byslug/<slug>/', views.AuthorDetail.as_view()), path('detail/author/bycustomslug/<foo>/', views.AuthorDetail.as_view(slug_url_kwarg='foo')), path('detail/author/bypkignoreslug/<int:pk>-<slug>/', views.AuthorDetail.as_view()), path('detail/author/bypkandslug/<int:pk>-<slug>/', views.AuthorDetail.as_view(query_pk_and_slug=True)), path('detail/author/<int:pk>/template_name_suffix/', views.AuthorDetail.as_view(template_name_suffix='_view')), path( 'detail/author/<int:pk>/template_name/', views.AuthorDetail.as_view(template_name='generic_views/about.html'), ), path('detail/author/<int:pk>/context_object_name/', views.AuthorDetail.as_view(context_object_name='thingy')), path('detail/author/<int:pk>/custom_detail/', views.AuthorCustomDetail.as_view()), path('detail/author/<int:pk>/dupe_context_object_name/', views.AuthorDetail.as_view(context_object_name='object')), path('detail/page/<int:pk>/field/', views.PageDetail.as_view()), path(r'detail/author/invalid/url/', views.AuthorDetail.as_view()), path('detail/author/invalid/qs/', views.AuthorDetail.as_view(queryset=None)), path('detail/nonmodel/1/', views.NonModelDetail.as_view()), path('detail/doesnotexist/<pk>/', views.ObjectDoesNotExistDetail.as_view()), # FormView path('contact/', views.ContactView.as_view()), path('late-validation/', views.LateValidationView.as_view()), # Create/UpdateView path('edit/artists/create/', views.ArtistCreate.as_view()), path('edit/artists/<int:pk>/update/', views.ArtistUpdate.as_view()), path('edit/authors/create/naive/', views.NaiveAuthorCreate.as_view()), path('edit/authors/create/redirect/', views.NaiveAuthorCreate.as_view(success_url='/edit/authors/create/')), path( 'edit/authors/create/interpolate_redirect/', views.NaiveAuthorCreate.as_view(success_url='/edit/author/{id}/update/'), ), path( 'edit/authors/create/interpolate_redirect_nonascii/', views.NaiveAuthorCreate.as_view(success_url='/%C3%A9dit/author/{id}/update/'), ), path('edit/authors/create/restricted/', views.AuthorCreateRestricted.as_view()), re_path('^[eé]dit/authors/create/$', views.AuthorCreate.as_view()), path('edit/authors/create/special/', views.SpecializedAuthorCreate.as_view()), path('edit/author/<int:pk>/update/naive/', views.NaiveAuthorUpdate.as_view()), path( 'edit/author/<int:pk>/update/redirect/', views.NaiveAuthorUpdate.as_view(success_url='/edit/authors/create/') ), path( 'edit/author/<int:pk>/update/interpolate_redirect/', views.NaiveAuthorUpdate.as_view(success_url='/edit/author/{id}/update/') ), path( 'edit/author/<int:pk>/update/interpolate_redirect_nonascii/', views.NaiveAuthorUpdate.as_view(success_url='/%C3%A9dit/author/{id}/update/'), ), re_path('^[eé]dit/author/(?P<pk>[0-9]+)/update/$', views.AuthorUpdate.as_view()), path('edit/author/update/', views.OneAuthorUpdate.as_view()), path('edit/author/<int:pk>/update/special/', views.SpecializedAuthorUpdate.as_view()), path('edit/author/<int:pk>/delete/naive/', views.NaiveAuthorDelete.as_view()), path( 'edit/author/<int:pk>/delete/redirect/', views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/'), ), path( 'edit/author/<int:pk>/delete/interpolate_redirect/', views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/?deleted={id}') ), path( 'edit/author/<int:pk>/delete/interpolate_redirect_nonascii/', views.NaiveAuthorDelete.as_view(success_url='/%C3%A9dit/authors/create/?deleted={id}') ), path('edit/author/<int:pk>/delete/', views.AuthorDelete.as_view()), path('edit/author/<int:pk>/delete/special/', views.SpecializedAuthorDelete.as_view()), # ArchiveIndexView path('dates/books/', views.BookArchive.as_view()), path('dates/books/context_object_name/', views.BookArchive.as_view(context_object_name='thingies')), path('dates/books/allow_empty/', views.BookArchive.as_view(allow_empty=True)), path('dates/books/template_name/', views.BookArchive.as_view(template_name='generic_views/list.html')), path('dates/books/template_name_suffix/', views.BookArchive.as_view(template_name_suffix='_detail')), path('dates/books/invalid/', views.BookArchive.as_view(queryset=None)), path('dates/books/paginated/', views.BookArchive.as_view(paginate_by=10)), path('dates/books/reverse/', views.BookArchive.as_view(queryset=Book.objects.order_by('pubdate'))), path('dates/books/by_month/', views.BookArchive.as_view(date_list_period='month')), path('dates/booksignings/', views.BookSigningArchive.as_view()), path('dates/books/sortedbyname/', views.BookArchive.as_view(ordering='name')), path('dates/books/sortedbynamedec/', views.BookArchive.as_view(ordering='-name')), path('dates/books/without_date_field/', views.BookArchiveWithoutDateField.as_view()), # ListView path('list/dict/', views.DictList.as_view()), path('list/dict/paginated/', views.DictList.as_view(paginate_by=1)), path('list/artists/', views.ArtistList.as_view(), name='artists_list'), path('list/authors/', views.AuthorList.as_view(), name='authors_list'), path('list/authors/paginated/', views.AuthorList.as_view(paginate_by=30)), path('list/authors/paginated/<int:page>/', views.AuthorList.as_view(paginate_by=30)), path('list/authors/paginated-orphaned/', views.AuthorList.as_view(paginate_by=30, paginate_orphans=2)), path('list/authors/notempty/', views.AuthorList.as_view(allow_empty=False)), path('list/authors/notempty/paginated/', views.AuthorList.as_view(allow_empty=False, paginate_by=2)), path('list/authors/template_name/', views.AuthorList.as_view(template_name='generic_views/list.html')), path('list/authors/template_name_suffix/', views.AuthorList.as_view(template_name_suffix='_objects')), path('list/authors/context_object_name/', views.AuthorList.as_view(context_object_name='author_list')), path('list/authors/dupe_context_object_name/', views.AuthorList.as_view(context_object_name='object_list')), path('list/authors/invalid/', views.AuthorList.as_view(queryset=None)), path( 'list/authors/get_queryset/', views.AuthorListGetQuerysetReturnsNone.as_view(), ), path( 'list/authors/paginated/custom_class/', views.AuthorList.as_view(paginate_by=5, paginator_class=views.CustomPaginator), ), path('list/authors/paginated/custom_page_kwarg/', views.AuthorList.as_view(paginate_by=30, page_kwarg='pagina')), path('list/authors/paginated/custom_constructor/', views.AuthorListCustomPaginator.as_view()), path('list/books/sorted/', views.BookList.as_view(ordering='name')), path('list/books/sortedbypagesandnamedec/', views.BookList.as_view(ordering=('pages', '-name'))), # YearArchiveView # Mixing keyword and positional captures below is intentional; the views # ought to be able to accept either. path('dates/books/<int:year>/', views.BookYearArchive.as_view()), path('dates/books/<int:year>/make_object_list/', views.BookYearArchive.as_view(make_object_list=True)), path('dates/books/<int:year>/allow_empty/', views.BookYearArchive.as_view(allow_empty=True)), path('dates/books/<int:year>/allow_future/', views.BookYearArchive.as_view(allow_future=True)), path('dates/books/<int:year>/paginated/', views.BookYearArchive.as_view(make_object_list=True, paginate_by=30)), path( 'dates/books/<int:year>/sortedbyname/', views.BookYearArchive.as_view(make_object_list=True, ordering='name'), ), path( 'dates/books/<int:year>/sortedbypageandnamedec/', views.BookYearArchive.as_view(make_object_list=True, ordering=('pages', '-name')), ), path('dates/books/no_year/', views.BookYearArchive.as_view()), path('dates/books/<int:year>/reverse/', views.BookYearArchive.as_view(queryset=Book.objects.order_by('pubdate'))), path('dates/booksignings/<int:year>/', views.BookSigningYearArchive.as_view()), # MonthArchiveView path('dates/books/<int:year>/<int:month>/', views.BookMonthArchive.as_view(month_format='%m')), path('dates/books/<int:year>/<month>/', views.BookMonthArchive.as_view()), path('dates/books/without_month/<int:year>/', views.BookMonthArchive.as_view()), path('dates/books/<int:year>/<month>/allow_empty/', views.BookMonthArchive.as_view(allow_empty=True)), path('dates/books/<int:year>/<month>/allow_future/', views.BookMonthArchive.as_view(allow_future=True)), path('dates/books/<int:year>/<month>/paginated/', views.BookMonthArchive.as_view(paginate_by=30)), path('dates/books/<int:year>/no_month/', views.BookMonthArchive.as_view()), path('dates/booksignings/<int:year>/<month>/', views.BookSigningMonthArchive.as_view()), # WeekArchiveView path('dates/books/<int:year>/week/<int:week>/', views.BookWeekArchive.as_view()), path('dates/books/<int:year>/week/<int:week>/allow_empty/', views.BookWeekArchive.as_view(allow_empty=True)), path('dates/books/<int:year>/week/<int:week>/allow_future/', views.BookWeekArchive.as_view(allow_future=True)), path('dates/books/<int:year>/week/<int:week>/paginated/', views.BookWeekArchive.as_view(paginate_by=30)), path('dates/books/<int:year>/week/no_week/', views.BookWeekArchive.as_view()), path('dates/books/<int:year>/week/<int:week>/monday/', views.BookWeekArchive.as_view(week_format='%W')), path( 'dates/books/<int:year>/week/<int:week>/unknown_week_format/', views.BookWeekArchive.as_view(week_format='%T'), ), path( 'dates/books/<int:year>/week/<int:week>/iso_format/', views.BookWeekArchive.as_view(year_format='%G', week_format='%V'), ), path( 'dates/books/<int:year>/week/<int:week>/invalid_iso_week_year_format/', views.BookWeekArchive.as_view(week_format='%V'), ), path('dates/booksignings/<int:year>/week/<int:week>/', views.BookSigningWeekArchive.as_view()), # DayArchiveView path('dates/books/<int:year>/<int:month>/<int:day>/', views.BookDayArchive.as_view(month_format='%m')), path('dates/books/<int:year>/<month>/<int:day>/', views.BookDayArchive.as_view()), path('dates/books/<int:year>/<month>/<int:day>/allow_empty/', views.BookDayArchive.as_view(allow_empty=True)), path('dates/books/<int:year>/<month>/<int:day>/allow_future/', views.BookDayArchive.as_view(allow_future=True)), path( 'dates/books/<int:year>/<month>/<int:day>/allow_empty_and_future/', views.BookDayArchive.as_view(allow_empty=True, allow_future=True), ), path('dates/books/<int:year>/<month>/<int:day>/paginated/', views.BookDayArchive.as_view(paginate_by=True)), path('dates/books/<int:year>/<month>/no_day/', views.BookDayArchive.as_view()), path('dates/booksignings/<int:year>/<month>/<int:day>/', views.BookSigningDayArchive.as_view()), # TodayArchiveView path('dates/books/today/', views.BookTodayArchive.as_view()), path('dates/books/today/allow_empty/', views.BookTodayArchive.as_view(allow_empty=True)), path('dates/booksignings/today/', views.BookSigningTodayArchive.as_view()), # DateDetailView path('dates/books/<int:year>/<int:month>/<day>/<int:pk>/', views.BookDetail.as_view(month_format='%m')), path('dates/books/<int:year>/<month>/<day>/<int:pk>/', views.BookDetail.as_view()), path( 'dates/books/<int:year>/<month>/<int:day>/<int:pk>/allow_future/', views.BookDetail.as_view(allow_future=True), ), path('dates/books/<int:year>/<month>/<int:day>/nopk/', views.BookDetail.as_view()), path('dates/books/<int:year>/<month>/<int:day>/byslug/<slug:slug>/', views.BookDetail.as_view()), path( 'dates/books/get_object_custom_queryset/<int:year>/<month>/<int:day>/<int:pk>/', views.BookDetailGetObjectCustomQueryset.as_view(), ), path('dates/booksignings/<int:year>/<month>/<int:day>/<int:pk>/', views.BookSigningDetail.as_view()), # Useful for testing redirects path('accounts/login/', auth_views.LoginView.as_view()), path('BaseDateListViewTest/', dates.BaseDateListView.as_view()), ]
bc71f4ec6d7001e36e9d9c474683c12c1bca8bdb41a2709a7e58bfe8380d504b
import gzip import random import re import struct from io import BytesIO from urllib.parse import quote from django.conf import settings from django.core import mail from django.core.exceptions import PermissionDenied from django.http import ( FileResponse, HttpRequest, HttpResponse, HttpResponseNotFound, HttpResponsePermanentRedirect, HttpResponseRedirect, StreamingHttpResponse, ) from django.middleware.clickjacking import XFrameOptionsMiddleware from django.middleware.common import ( BrokenLinkEmailsMiddleware, CommonMiddleware, ) from django.middleware.gzip import GZipMiddleware from django.middleware.http import ConditionalGetMiddleware from django.test import RequestFactory, SimpleTestCase, override_settings int2byte = struct.Struct(">B").pack def get_response_empty(request): return HttpResponse() def get_response_404(request): return HttpResponseNotFound() @override_settings(ROOT_URLCONF='middleware.urls') class CommonMiddlewareTest(SimpleTestCase): rf = RequestFactory() @override_settings(APPEND_SLASH=True) def test_append_slash_have_slash(self): """ URLs with slashes should go unmolested. """ request = self.rf.get('/slash/') self.assertIsNone(CommonMiddleware(get_response_404).process_request(request)) self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_slashless_resource(self): """ Matches to explicit slashless URLs should go unmolested. """ def get_response(req): return HttpResponse("Here's the text of the Web page.") request = self.rf.get('/noslash') self.assertIsNone(CommonMiddleware(get_response).process_request(request)) self.assertEqual( CommonMiddleware(get_response)(request).content, b"Here's the text of the Web page.", ) @override_settings(APPEND_SLASH=True) def test_append_slash_slashless_unknown(self): """ APPEND_SLASH should not redirect to unknown resources. """ request = self.rf.get('/unknown') response = CommonMiddleware(get_response_404)(request) self.assertEqual(response.status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_redirect(self): """ APPEND_SLASH should redirect slashless URLs to a valid pattern. """ request = self.rf.get('/slash') r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) @override_settings(APPEND_SLASH=True) def test_append_slash_redirect_querystring(self): """ APPEND_SLASH should preserve querystrings when redirecting. """ request = self.rf.get('/slash?test=1') resp = CommonMiddleware(get_response_404)(request) self.assertEqual(resp.url, '/slash/?test=1') @override_settings(APPEND_SLASH=True) def test_append_slash_redirect_querystring_have_slash(self): """ APPEND_SLASH should append slash to path when redirecting a request with a querystring ending with slash. """ request = self.rf.get('/slash?test=slash/') resp = CommonMiddleware(get_response_404)(request) self.assertIsInstance(resp, HttpResponsePermanentRedirect) self.assertEqual(resp.url, '/slash/?test=slash/') @override_settings(APPEND_SLASH=True, DEBUG=True) def test_append_slash_no_redirect_on_POST_in_DEBUG(self): """ While in debug mode, an exception is raised with a warning when a failed attempt is made to POST, PUT, or PATCH to an URL which would normally be redirected to a slashed version. """ msg = "maintaining %s data. Change your form to point to testserver/slash/" request = self.rf.get('/slash') request.method = 'POST' with self.assertRaisesMessage(RuntimeError, msg % request.method): CommonMiddleware(get_response_404)(request) request = self.rf.get('/slash') request.method = 'PUT' with self.assertRaisesMessage(RuntimeError, msg % request.method): CommonMiddleware(get_response_404)(request) request = self.rf.get('/slash') request.method = 'PATCH' with self.assertRaisesMessage(RuntimeError, msg % request.method): CommonMiddleware(get_response_404)(request) @override_settings(APPEND_SLASH=False) def test_append_slash_disabled(self): """ Disabling append slash functionality should leave slashless URLs alone. """ request = self.rf.get('/slash') self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_opt_out(self): """ Views marked with @no_append_slash should be left alone. """ request = self.rf.get('/sensitive_fbv') self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) request = self.rf.get('/sensitive_cbv') self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_quoted(self): """ URLs which require quoting should be redirected to their slash version. """ request = self.rf.get(quote('/needsquoting#')) r = CommonMiddleware(get_response_404)(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, '/needsquoting%23/') @override_settings(APPEND_SLASH=True) def test_append_slash_leading_slashes(self): """ Paths starting with two slashes are escaped to prevent open redirects. If there's a URL pattern that allows paths to start with two slashes, a request with path //evil.com must not redirect to //evil.com/ (appended slash) which is a schemaless absolute URL. The browser would navigate to evil.com/. """ # Use 4 slashes because of RequestFactory behavior. request = self.rf.get('////evil.com/security') r = CommonMiddleware(get_response_404).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, '/%2Fevil.com/security/') r = CommonMiddleware(get_response_404)(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, '/%2Fevil.com/security/') @override_settings(APPEND_SLASH=False, PREPEND_WWW=True) def test_prepend_www(self): request = self.rf.get('/path/') r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, 'http://www.testserver/path/') @override_settings(APPEND_SLASH=True, PREPEND_WWW=True) def test_prepend_www_append_slash_have_slash(self): request = self.rf.get('/slash/') r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, 'http://www.testserver/slash/') @override_settings(APPEND_SLASH=True, PREPEND_WWW=True) def test_prepend_www_append_slash_slashless(self): request = self.rf.get('/slash') r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, 'http://www.testserver/slash/') # The following tests examine expected behavior given a custom URLconf that # overrides the default one through the request object. @override_settings(APPEND_SLASH=True) def test_append_slash_have_slash_custom_urlconf(self): """ URLs with slashes should go unmolested. """ request = self.rf.get('/customurlconf/slash/') request.urlconf = 'middleware.extra_urls' self.assertIsNone(CommonMiddleware(get_response_404).process_request(request)) self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_slashless_resource_custom_urlconf(self): """ Matches to explicit slashless URLs should go unmolested. """ def get_response(req): return HttpResponse("Web content") request = self.rf.get('/customurlconf/noslash') request.urlconf = 'middleware.extra_urls' self.assertIsNone(CommonMiddleware(get_response).process_request(request)) self.assertEqual(CommonMiddleware(get_response)(request).content, b'Web content') @override_settings(APPEND_SLASH=True) def test_append_slash_slashless_unknown_custom_urlconf(self): """ APPEND_SLASH should not redirect to unknown resources. """ request = self.rf.get('/customurlconf/unknown') request.urlconf = 'middleware.extra_urls' self.assertIsNone(CommonMiddleware(get_response_404).process_request(request)) self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_redirect_custom_urlconf(self): """ APPEND_SLASH should redirect slashless URLs to a valid pattern. """ request = self.rf.get('/customurlconf/slash') request.urlconf = 'middleware.extra_urls' r = CommonMiddleware(get_response_404)(request) self.assertIsNotNone(r, "CommonMiddleware failed to return APPEND_SLASH redirect using request.urlconf") self.assertEqual(r.status_code, 301) self.assertEqual(r.url, '/customurlconf/slash/') @override_settings(APPEND_SLASH=True, DEBUG=True) def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self): """ While in debug mode, an exception is raised with a warning when a failed attempt is made to POST to an URL which would normally be redirected to a slashed version. """ request = self.rf.get('/customurlconf/slash') request.urlconf = 'middleware.extra_urls' request.method = 'POST' with self.assertRaisesMessage(RuntimeError, 'end in a slash'): CommonMiddleware(get_response_404)(request) @override_settings(APPEND_SLASH=False) def test_append_slash_disabled_custom_urlconf(self): """ Disabling append slash functionality should leave slashless URLs alone. """ request = self.rf.get('/customurlconf/slash') request.urlconf = 'middleware.extra_urls' self.assertIsNone(CommonMiddleware(get_response_404).process_request(request)) self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_quoted_custom_urlconf(self): """ URLs which require quoting should be redirected to their slash version. """ request = self.rf.get(quote('/customurlconf/needsquoting#')) request.urlconf = 'middleware.extra_urls' r = CommonMiddleware(get_response_404)(request) self.assertIsNotNone(r, "CommonMiddleware failed to return APPEND_SLASH redirect using request.urlconf") self.assertEqual(r.status_code, 301) self.assertEqual(r.url, '/customurlconf/needsquoting%23/') @override_settings(APPEND_SLASH=False, PREPEND_WWW=True) def test_prepend_www_custom_urlconf(self): request = self.rf.get('/customurlconf/path/') request.urlconf = 'middleware.extra_urls' r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, 'http://www.testserver/customurlconf/path/') @override_settings(APPEND_SLASH=True, PREPEND_WWW=True) def test_prepend_www_append_slash_have_slash_custom_urlconf(self): request = self.rf.get('/customurlconf/slash/') request.urlconf = 'middleware.extra_urls' r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, 'http://www.testserver/customurlconf/slash/') @override_settings(APPEND_SLASH=True, PREPEND_WWW=True) def test_prepend_www_append_slash_slashless_custom_urlconf(self): request = self.rf.get('/customurlconf/slash') request.urlconf = 'middleware.extra_urls' r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, 'http://www.testserver/customurlconf/slash/') # Tests for the Content-Length header def test_content_length_header_added(self): def get_response(req): response = HttpResponse('content') self.assertNotIn('Content-Length', response) return response response = CommonMiddleware(get_response)(self.rf.get('/')) self.assertEqual(int(response.headers['Content-Length']), len(response.content)) def test_content_length_header_not_added_for_streaming_response(self): def get_response(req): response = StreamingHttpResponse('content') self.assertNotIn('Content-Length', response) return response response = CommonMiddleware(get_response)(self.rf.get('/')) self.assertNotIn('Content-Length', response) def test_content_length_header_not_changed(self): bad_content_length = 500 def get_response(req): response = HttpResponse() response.headers['Content-Length'] = bad_content_length return response response = CommonMiddleware(get_response)(self.rf.get('/')) self.assertEqual(int(response.headers['Content-Length']), bad_content_length) # Other tests @override_settings(DISALLOWED_USER_AGENTS=[re.compile(r'foo')]) def test_disallowed_user_agents(self): request = self.rf.get('/slash') request.META['HTTP_USER_AGENT'] = 'foo' with self.assertRaisesMessage(PermissionDenied, 'Forbidden user agent'): CommonMiddleware(get_response_empty).process_request(request) def test_non_ascii_query_string_does_not_crash(self): """Regression test for #15152""" request = self.rf.get('/slash') request.META['QUERY_STRING'] = 'drink=café' r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) def test_response_redirect_class(self): request = self.rf.get('/slash') r = CommonMiddleware(get_response_404)(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, '/slash/') self.assertIsInstance(r, HttpResponsePermanentRedirect) def test_response_redirect_class_subclass(self): class MyCommonMiddleware(CommonMiddleware): response_redirect_class = HttpResponseRedirect request = self.rf.get('/slash') r = MyCommonMiddleware(get_response_404)(request) self.assertEqual(r.status_code, 302) self.assertEqual(r.url, '/slash/') self.assertIsInstance(r, HttpResponseRedirect) @override_settings( IGNORABLE_404_URLS=[re.compile(r'foo')], MANAGERS=[('PHD', '[email protected]')], ) class BrokenLinkEmailsMiddlewareTest(SimpleTestCase): rf = RequestFactory() def setUp(self): self.req = self.rf.get('/regular_url/that/does/not/exist') def get_response(self, req): return self.client.get(req.path) def test_404_error_reporting(self): self.req.META['HTTP_REFERER'] = '/another/url/' BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 1) self.assertIn('Broken', mail.outbox[0].subject) def test_404_error_reporting_no_referer(self): BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) def test_404_error_reporting_ignored_url(self): self.req.path = self.req.path_info = 'foo_url/that/does/not/exist' BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) def test_custom_request_checker(self): class SubclassedMiddleware(BrokenLinkEmailsMiddleware): ignored_user_agent_patterns = (re.compile(r'Spider.*'), re.compile(r'Robot.*')) def is_ignorable_request(self, request, uri, domain, referer): '''Check user-agent in addition to normal checks.''' if super().is_ignorable_request(request, uri, domain, referer): return True user_agent = request.META['HTTP_USER_AGENT'] return any(pattern.search(user_agent) for pattern in self.ignored_user_agent_patterns) self.req.META['HTTP_REFERER'] = '/another/url/' self.req.META['HTTP_USER_AGENT'] = 'Spider machine 3.4' SubclassedMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) self.req.META['HTTP_USER_AGENT'] = 'My user agent' SubclassedMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 1) def test_referer_equal_to_requested_url(self): """ Some bots set the referer to the current URL to avoid being blocked by an referer check (#25302). """ self.req.META['HTTP_REFERER'] = self.req.path BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) # URL with scheme and domain should also be ignored self.req.META['HTTP_REFERER'] = 'http://testserver%s' % self.req.path BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) # URL with a different scheme should be ignored as well because bots # tend to use http:// in referers even when browsing HTTPS websites. self.req.META['HTTP_X_PROTO'] = 'https' self.req.META['SERVER_PORT'] = 443 with self.settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_PROTO', 'https')): BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) def test_referer_equal_to_requested_url_on_another_domain(self): self.req.META['HTTP_REFERER'] = 'http://anotherserver%s' % self.req.path BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 1) @override_settings(APPEND_SLASH=True) def test_referer_equal_to_requested_url_without_trailing_slash_when_append_slash_is_set(self): self.req.path = self.req.path_info = '/regular_url/that/does/not/exist/' self.req.META['HTTP_REFERER'] = self.req.path_info[:-1] BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) @override_settings(APPEND_SLASH=False) def test_referer_equal_to_requested_url_without_trailing_slash_when_append_slash_is_unset(self): self.req.path = self.req.path_info = '/regular_url/that/does/not/exist/' self.req.META['HTTP_REFERER'] = self.req.path_info[:-1] BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 1) @override_settings(ROOT_URLCONF='middleware.cond_get_urls') class ConditionalGetMiddlewareTest(SimpleTestCase): request_factory = RequestFactory() def setUp(self): self.req = self.request_factory.get('/') self.resp_headers = {} def get_response(self, req): resp = self.client.get(req.path_info) for key, value in self.resp_headers.items(): resp[key] = value return resp # Tests for the ETag header def test_middleware_calculates_etag(self): resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) self.assertNotEqual('', resp['ETag']) def test_middleware_wont_overwrite_etag(self): self.resp_headers['ETag'] = 'eggs' resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) self.assertEqual('eggs', resp['ETag']) def test_no_etag_streaming_response(self): def get_response(req): return StreamingHttpResponse(['content']) self.assertFalse(ConditionalGetMiddleware(get_response)(self.req).has_header('ETag')) def test_no_etag_response_empty_content(self): def get_response(req): return HttpResponse() self.assertFalse(ConditionalGetMiddleware(get_response)(self.req).has_header('ETag')) def test_no_etag_no_store_cache(self): self.resp_headers['Cache-Control'] = 'No-Cache, No-Store, Max-age=0' self.assertFalse(ConditionalGetMiddleware(self.get_response)(self.req).has_header('ETag')) def test_etag_extended_cache_control(self): self.resp_headers['Cache-Control'] = 'my-directive="my-no-store"' self.assertTrue(ConditionalGetMiddleware(self.get_response)(self.req).has_header('ETag')) def test_if_none_match_and_no_etag(self): self.req.META['HTTP_IF_NONE_MATCH'] = 'spam' resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) def test_no_if_none_match_and_etag(self): self.resp_headers['ETag'] = 'eggs' resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) def test_if_none_match_and_same_etag(self): self.req.META['HTTP_IF_NONE_MATCH'] = '"spam"' self.resp_headers['ETag'] = '"spam"' resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 304) def test_if_none_match_and_different_etag(self): self.req.META['HTTP_IF_NONE_MATCH'] = 'spam' self.resp_headers['ETag'] = 'eggs' resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) def test_if_none_match_and_redirect(self): def get_response(req): resp = self.client.get(req.path_info) resp['ETag'] = 'spam' resp['Location'] = '/' resp.status_code = 301 return resp self.req.META['HTTP_IF_NONE_MATCH'] = 'spam' resp = ConditionalGetMiddleware(get_response)(self.req) self.assertEqual(resp.status_code, 301) def test_if_none_match_and_client_error(self): def get_response(req): resp = self.client.get(req.path_info) resp['ETag'] = 'spam' resp.status_code = 400 return resp self.req.META['HTTP_IF_NONE_MATCH'] = 'spam' resp = ConditionalGetMiddleware(get_response)(self.req) self.assertEqual(resp.status_code, 400) # Tests for the Last-Modified header def test_if_modified_since_and_no_last_modified(self): self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT' resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) def test_no_if_modified_since_and_last_modified(self): self.resp_headers['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT' resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) def test_if_modified_since_and_same_last_modified(self): self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT' self.resp_headers['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT' self.resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(self.resp.status_code, 304) def test_if_modified_since_and_last_modified_in_the_past(self): self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT' self.resp_headers['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT' resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 304) def test_if_modified_since_and_last_modified_in_the_future(self): self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT' self.resp_headers['Last-Modified'] = 'Sat, 12 Feb 2011 17:41:44 GMT' self.resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(self.resp.status_code, 200) def test_if_modified_since_and_redirect(self): def get_response(req): resp = self.client.get(req.path_info) resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT' resp['Location'] = '/' resp.status_code = 301 return resp self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT' resp = ConditionalGetMiddleware(get_response)(self.req) self.assertEqual(resp.status_code, 301) def test_if_modified_since_and_client_error(self): def get_response(req): resp = self.client.get(req.path_info) resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT' resp.status_code = 400 return resp self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT' resp = ConditionalGetMiddleware(get_response)(self.req) self.assertEqual(resp.status_code, 400) def test_not_modified_headers(self): """ The 304 Not Modified response should include only the headers required by section 4.1 of RFC 7232, Last-Modified, and the cookies. """ def get_response(req): resp = self.client.get(req.path_info) resp['Date'] = 'Sat, 12 Feb 2011 17:35:44 GMT' resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT' resp['Expires'] = 'Sun, 13 Feb 2011 17:35:44 GMT' resp['Vary'] = 'Cookie' resp['Cache-Control'] = 'public' resp['Content-Location'] = '/alt' resp['Content-Language'] = 'en' # shouldn't be preserved resp['ETag'] = '"spam"' resp.set_cookie('key', 'value') return resp self.req.META['HTTP_IF_NONE_MATCH'] = '"spam"' new_response = ConditionalGetMiddleware(get_response)(self.req) self.assertEqual(new_response.status_code, 304) base_response = get_response(self.req) for header in ('Cache-Control', 'Content-Location', 'Date', 'ETag', 'Expires', 'Last-Modified', 'Vary'): self.assertEqual(new_response.headers[header], base_response.headers[header]) self.assertEqual(new_response.cookies, base_response.cookies) self.assertNotIn('Content-Language', new_response) def test_no_unsafe(self): """ ConditionalGetMiddleware shouldn't return a conditional response on an unsafe request. A response has already been generated by the time ConditionalGetMiddleware is called, so it's too late to return a 412 Precondition Failed. """ def get_200_response(req): return HttpResponse(status=200) response = ConditionalGetMiddleware(self.get_response)(self.req) etag = response.headers['ETag'] put_request = self.request_factory.put('/', HTTP_IF_MATCH=etag) conditional_get_response = ConditionalGetMiddleware(get_200_response)(put_request) self.assertEqual(conditional_get_response.status_code, 200) # should never be a 412 def test_no_head(self): """ ConditionalGetMiddleware shouldn't compute and return an ETag on a HEAD request since it can't do so accurately without access to the response body of the corresponding GET. """ def get_200_response(req): return HttpResponse(status=200) request = self.request_factory.head('/') conditional_get_response = ConditionalGetMiddleware(get_200_response)(request) self.assertNotIn('ETag', conditional_get_response) class XFrameOptionsMiddlewareTest(SimpleTestCase): """ Tests for the X-Frame-Options clickjacking prevention middleware. """ def test_same_origin(self): """ The X_FRAME_OPTIONS setting can be set to SAMEORIGIN to have the middleware use that value for the HTTP header. """ with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'): r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'SAMEORIGIN') with override_settings(X_FRAME_OPTIONS='sameorigin'): r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'SAMEORIGIN') def test_deny(self): """ The X_FRAME_OPTIONS setting can be set to DENY to have the middleware use that value for the HTTP header. """ with override_settings(X_FRAME_OPTIONS='DENY'): r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'DENY') with override_settings(X_FRAME_OPTIONS='deny'): r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'DENY') def test_defaults_sameorigin(self): """ If the X_FRAME_OPTIONS setting is not set then it defaults to DENY. """ with override_settings(X_FRAME_OPTIONS=None): del settings.X_FRAME_OPTIONS # restored by override_settings r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'DENY') def test_dont_set_if_set(self): """ If the X-Frame-Options header is already set then the middleware does not attempt to override it. """ def same_origin_response(request): response = HttpResponse() response.headers['X-Frame-Options'] = 'SAMEORIGIN' return response def deny_response(request): response = HttpResponse() response.headers['X-Frame-Options'] = 'DENY' return response with override_settings(X_FRAME_OPTIONS='DENY'): r = XFrameOptionsMiddleware(same_origin_response)(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'SAMEORIGIN') with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'): r = XFrameOptionsMiddleware(deny_response)(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'DENY') def test_response_exempt(self): """ If the response has an xframe_options_exempt attribute set to False then it still sets the header, but if it's set to True then it doesn't. """ def xframe_exempt_response(request): response = HttpResponse() response.xframe_options_exempt = True return response def xframe_not_exempt_response(request): response = HttpResponse() response.xframe_options_exempt = False return response with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'): r = XFrameOptionsMiddleware(xframe_not_exempt_response)(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'SAMEORIGIN') r = XFrameOptionsMiddleware(xframe_exempt_response)(HttpRequest()) self.assertIsNone(r.headers.get('X-Frame-Options')) def test_is_extendable(self): """ The XFrameOptionsMiddleware method that determines the X-Frame-Options header value can be overridden based on something in the request or response. """ class OtherXFrameOptionsMiddleware(XFrameOptionsMiddleware): # This is just an example for testing purposes... def get_xframe_options_value(self, request, response): if getattr(request, 'sameorigin', False): return 'SAMEORIGIN' if getattr(response, 'sameorigin', False): return 'SAMEORIGIN' return 'DENY' def same_origin_response(request): response = HttpResponse() response.sameorigin = True return response with override_settings(X_FRAME_OPTIONS='DENY'): r = OtherXFrameOptionsMiddleware(same_origin_response)(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'SAMEORIGIN') request = HttpRequest() request.sameorigin = True r = OtherXFrameOptionsMiddleware(get_response_empty)(request) self.assertEqual(r.headers['X-Frame-Options'], 'SAMEORIGIN') with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'): r = OtherXFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'DENY') class GZipMiddlewareTest(SimpleTestCase): """ Tests the GZipMiddleware. """ short_string = b"This string is too short to be worth compressing." compressible_string = b'a' * 500 incompressible_string = b''.join(int2byte(random.randint(0, 255)) for _ in range(500)) sequence = [b'a' * 500, b'b' * 200, b'a' * 300] sequence_unicode = ['a' * 500, 'é' * 200, 'a' * 300] request_factory = RequestFactory() def setUp(self): self.req = self.request_factory.get('/') self.req.META['HTTP_ACCEPT_ENCODING'] = 'gzip, deflate' self.req.META['HTTP_USER_AGENT'] = 'Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1' self.resp = HttpResponse() self.resp.status_code = 200 self.resp.content = self.compressible_string self.resp['Content-Type'] = 'text/html; charset=UTF-8' def get_response(self, request): return self.resp @staticmethod def decompress(gzipped_string): with gzip.GzipFile(mode='rb', fileobj=BytesIO(gzipped_string)) as f: return f.read() @staticmethod def get_mtime(gzipped_string): with gzip.GzipFile(mode='rb', fileobj=BytesIO(gzipped_string)) as f: f.read() # must read the data before accessing the header return f.mtime def test_compress_response(self): """ Compression is performed on responses with compressible content. """ r = GZipMiddleware(self.get_response)(self.req) self.assertEqual(self.decompress(r.content), self.compressible_string) self.assertEqual(r.get('Content-Encoding'), 'gzip') self.assertEqual(r.get('Content-Length'), str(len(r.content))) def test_compress_streaming_response(self): """ Compression is performed on responses with streaming content. """ def get_stream_response(request): resp = StreamingHttpResponse(self.sequence) resp['Content-Type'] = 'text/html; charset=UTF-8' return resp r = GZipMiddleware(get_stream_response)(self.req) self.assertEqual(self.decompress(b''.join(r)), b''.join(self.sequence)) self.assertEqual(r.get('Content-Encoding'), 'gzip') self.assertFalse(r.has_header('Content-Length')) def test_compress_streaming_response_unicode(self): """ Compression is performed on responses with streaming Unicode content. """ def get_stream_response_unicode(request): resp = StreamingHttpResponse(self.sequence_unicode) resp['Content-Type'] = 'text/html; charset=UTF-8' return resp r = GZipMiddleware(get_stream_response_unicode)(self.req) self.assertEqual( self.decompress(b''.join(r)), b''.join(x.encode() for x in self.sequence_unicode) ) self.assertEqual(r.get('Content-Encoding'), 'gzip') self.assertFalse(r.has_header('Content-Length')) def test_compress_file_response(self): """ Compression is performed on FileResponse. """ with open(__file__, 'rb') as file1: def get_response(req): file_resp = FileResponse(file1) file_resp['Content-Type'] = 'text/html; charset=UTF-8' return file_resp r = GZipMiddleware(get_response)(self.req) with open(__file__, 'rb') as file2: self.assertEqual(self.decompress(b''.join(r)), file2.read()) self.assertEqual(r.get('Content-Encoding'), 'gzip') self.assertIsNot(r.file_to_stream, file1) def test_compress_non_200_response(self): """ Compression is performed on responses with a status other than 200 (#10762). """ self.resp.status_code = 404 r = GZipMiddleware(self.get_response)(self.req) self.assertEqual(self.decompress(r.content), self.compressible_string) self.assertEqual(r.get('Content-Encoding'), 'gzip') def test_no_compress_short_response(self): """ Compression isn't performed on responses with short content. """ self.resp.content = self.short_string r = GZipMiddleware(self.get_response)(self.req) self.assertEqual(r.content, self.short_string) self.assertIsNone(r.get('Content-Encoding')) def test_no_compress_compressed_response(self): """ Compression isn't performed on responses that are already compressed. """ self.resp['Content-Encoding'] = 'deflate' r = GZipMiddleware(self.get_response)(self.req) self.assertEqual(r.content, self.compressible_string) self.assertEqual(r.get('Content-Encoding'), 'deflate') def test_no_compress_incompressible_response(self): """ Compression isn't performed on responses with incompressible content. """ self.resp.content = self.incompressible_string r = GZipMiddleware(self.get_response)(self.req) self.assertEqual(r.content, self.incompressible_string) self.assertIsNone(r.get('Content-Encoding')) def test_compress_deterministic(self): """ Compression results are the same for the same content and don't include a modification time (since that would make the results of compression non-deterministic and prevent ConditionalGetMiddleware from recognizing conditional matches on gzipped content). """ r1 = GZipMiddleware(self.get_response)(self.req) r2 = GZipMiddleware(self.get_response)(self.req) self.assertEqual(r1.content, r2.content) self.assertEqual(self.get_mtime(r1.content), 0) self.assertEqual(self.get_mtime(r2.content), 0) class ETagGZipMiddlewareTest(SimpleTestCase): """ ETags are handled properly by GZipMiddleware. """ rf = RequestFactory() compressible_string = b'a' * 500 def test_strong_etag_modified(self): """ GZipMiddleware makes a strong ETag weak. """ def get_response(req): response = HttpResponse(self.compressible_string) response.headers['ETag'] = '"eggs"' return response request = self.rf.get('/', HTTP_ACCEPT_ENCODING='gzip, deflate') gzip_response = GZipMiddleware(get_response)(request) self.assertEqual(gzip_response.headers['ETag'], 'W/"eggs"') def test_weak_etag_not_modified(self): """ GZipMiddleware doesn't modify a weak ETag. """ def get_response(req): response = HttpResponse(self.compressible_string) response.headers['ETag'] = 'W/"eggs"' return response request = self.rf.get('/', HTTP_ACCEPT_ENCODING='gzip, deflate') gzip_response = GZipMiddleware(get_response)(request) self.assertEqual(gzip_response.headers['ETag'], 'W/"eggs"') def test_etag_match(self): """ GZipMiddleware allows 304 Not Modified responses. """ def get_response(req): response = HttpResponse(self.compressible_string) return response def get_cond_response(req): return ConditionalGetMiddleware(get_response)(req) request = self.rf.get('/', HTTP_ACCEPT_ENCODING='gzip, deflate') response = GZipMiddleware(get_cond_response)(request) gzip_etag = response.headers['ETag'] next_request = self.rf.get('/', HTTP_ACCEPT_ENCODING='gzip, deflate', HTTP_IF_NONE_MATCH=gzip_etag) next_response = ConditionalGetMiddleware(get_response)(next_request) self.assertEqual(next_response.status_code, 304)
8815bdb62d9e15ad815b93f3c321aee2030f2f89b045df9e38f1795398a2cb8a
from django.http import HttpResponse from django.test import RequestFactory, SimpleTestCase from django.test.utils import override_settings class SecurityMiddlewareTest(SimpleTestCase): def middleware(self, *args, **kwargs): from django.middleware.security import SecurityMiddleware return SecurityMiddleware(self.response(*args, **kwargs)) @property def secure_request_kwargs(self): return {"wsgi.url_scheme": "https"} def response(self, *args, headers=None, **kwargs): def get_response(req): response = HttpResponse(*args, **kwargs) if headers: for k, v in headers.items(): response.headers[k] = v return response return get_response def process_response(self, *args, secure=False, request=None, **kwargs): request_kwargs = {} if secure: request_kwargs.update(self.secure_request_kwargs) if request is None: request = self.request.get("/some/url", **request_kwargs) ret = self.middleware(*args, **kwargs).process_request(request) if ret: return ret return self.middleware(*args, **kwargs)(request) request = RequestFactory() def process_request(self, method, *args, secure=False, **kwargs): if secure: kwargs.update(self.secure_request_kwargs) req = getattr(self.request, method.lower())(*args, **kwargs) return self.middleware().process_request(req) @override_settings(SECURE_HSTS_SECONDS=3600) def test_sts_on(self): """ With SECURE_HSTS_SECONDS=3600, the middleware adds "Strict-Transport-Security: max-age=3600" to the response. """ self.assertEqual( self.process_response(secure=True).headers['Strict-Transport-Security'], 'max-age=3600', ) @override_settings(SECURE_HSTS_SECONDS=3600) def test_sts_already_present(self): """ The middleware will not override a "Strict-Transport-Security" header already present in the response. """ response = self.process_response( secure=True, headers={"Strict-Transport-Security": "max-age=7200"}) self.assertEqual(response.headers["Strict-Transport-Security"], "max-age=7200") @override_settings(SECURE_HSTS_SECONDS=3600) def test_sts_only_if_secure(self): """ The "Strict-Transport-Security" header is not added to responses going over an insecure connection. """ self.assertNotIn( 'Strict-Transport-Security', self.process_response(secure=False).headers, ) @override_settings(SECURE_HSTS_SECONDS=0) def test_sts_off(self): """ With SECURE_HSTS_SECONDS=0, the middleware does not add a "Strict-Transport-Security" header to the response. """ self.assertNotIn( 'Strict-Transport-Security', self.process_response(secure=True).headers, ) @override_settings(SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=True) def test_sts_include_subdomains(self): """ With SECURE_HSTS_SECONDS non-zero and SECURE_HSTS_INCLUDE_SUBDOMAINS True, the middleware adds a "Strict-Transport-Security" header with the "includeSubDomains" directive to the response. """ response = self.process_response(secure=True) self.assertEqual( response.headers['Strict-Transport-Security'], 'max-age=600; includeSubDomains', ) @override_settings(SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=False) def test_sts_no_include_subdomains(self): """ With SECURE_HSTS_SECONDS non-zero and SECURE_HSTS_INCLUDE_SUBDOMAINS False, the middleware adds a "Strict-Transport-Security" header without the "includeSubDomains" directive to the response. """ response = self.process_response(secure=True) self.assertEqual(response.headers["Strict-Transport-Security"], "max-age=600") @override_settings(SECURE_HSTS_SECONDS=10886400, SECURE_HSTS_PRELOAD=True) def test_sts_preload(self): """ With SECURE_HSTS_SECONDS non-zero and SECURE_HSTS_PRELOAD True, the middleware adds a "Strict-Transport-Security" header with the "preload" directive to the response. """ response = self.process_response(secure=True) self.assertEqual( response.headers['Strict-Transport-Security'], 'max-age=10886400; preload', ) @override_settings(SECURE_HSTS_SECONDS=10886400, SECURE_HSTS_INCLUDE_SUBDOMAINS=True, SECURE_HSTS_PRELOAD=True) def test_sts_subdomains_and_preload(self): """ With SECURE_HSTS_SECONDS non-zero, SECURE_HSTS_INCLUDE_SUBDOMAINS and SECURE_HSTS_PRELOAD True, the middleware adds a "Strict-Transport-Security" header containing both the "includeSubDomains" and "preload" directives to the response. """ response = self.process_response(secure=True) self.assertEqual( response.headers['Strict-Transport-Security'], 'max-age=10886400; includeSubDomains; preload', ) @override_settings(SECURE_HSTS_SECONDS=10886400, SECURE_HSTS_PRELOAD=False) def test_sts_no_preload(self): """ With SECURE_HSTS_SECONDS non-zero and SECURE_HSTS_PRELOAD False, the middleware adds a "Strict-Transport-Security" header without the "preload" directive to the response. """ response = self.process_response(secure=True) self.assertEqual( response.headers['Strict-Transport-Security'], 'max-age=10886400', ) @override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True) def test_content_type_on(self): """ With SECURE_CONTENT_TYPE_NOSNIFF set to True, the middleware adds "X-Content-Type-Options: nosniff" header to the response. """ self.assertEqual( self.process_response().headers['X-Content-Type-Options'], 'nosniff', ) @override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True) def test_content_type_already_present(self): """ The middleware will not override an "X-Content-Type-Options" header already present in the response. """ response = self.process_response(secure=True, headers={"X-Content-Type-Options": "foo"}) self.assertEqual(response.headers["X-Content-Type-Options"], "foo") @override_settings(SECURE_CONTENT_TYPE_NOSNIFF=False) def test_content_type_off(self): """ With SECURE_CONTENT_TYPE_NOSNIFF False, the middleware does not add an "X-Content-Type-Options" header to the response. """ self.assertNotIn('X-Content-Type-Options', self.process_response().headers) @override_settings(SECURE_BROWSER_XSS_FILTER=True) def test_xss_filter_on(self): """ With SECURE_BROWSER_XSS_FILTER set to True, the middleware adds "s-xss-protection: 1; mode=block" header to the response. """ self.assertEqual( self.process_response().headers['X-XSS-Protection'], '1; mode=block', ) @override_settings(SECURE_BROWSER_XSS_FILTER=True) def test_xss_filter_already_present(self): """ The middleware will not override an "X-XSS-Protection" header already present in the response. """ response = self.process_response(secure=True, headers={"X-XSS-Protection": "foo"}) self.assertEqual(response.headers["X-XSS-Protection"], "foo") @override_settings(SECURE_BROWSER_XSS_FILTER=False) def test_xss_filter_off(self): """ With SECURE_BROWSER_XSS_FILTER set to False, the middleware does not add an "X-XSS-Protection" header to the response. """ self.assertNotIn('X-XSS-Protection', self.process_response().headers) @override_settings(SECURE_SSL_REDIRECT=True) def test_ssl_redirect_on(self): """ With SECURE_SSL_REDIRECT True, the middleware redirects any non-secure requests to the https:// version of the same URL. """ ret = self.process_request("get", "/some/url?query=string") self.assertEqual(ret.status_code, 301) self.assertEqual(ret["Location"], "https://testserver/some/url?query=string") @override_settings(SECURE_SSL_REDIRECT=True) def test_no_redirect_ssl(self): """ The middleware does not redirect secure requests. """ ret = self.process_request("get", "/some/url", secure=True) self.assertIsNone(ret) @override_settings(SECURE_SSL_REDIRECT=True, SECURE_REDIRECT_EXEMPT=["^insecure/"]) def test_redirect_exempt(self): """ The middleware does not redirect requests with URL path matching an exempt pattern. """ ret = self.process_request("get", "/insecure/page") self.assertIsNone(ret) @override_settings(SECURE_SSL_REDIRECT=True, SECURE_SSL_HOST="secure.example.com") def test_redirect_ssl_host(self): """ The middleware redirects to SECURE_SSL_HOST if given. """ ret = self.process_request("get", "/some/url") self.assertEqual(ret.status_code, 301) self.assertEqual(ret["Location"], "https://secure.example.com/some/url") @override_settings(SECURE_SSL_REDIRECT=False) def test_ssl_redirect_off(self): """ With SECURE_SSL_REDIRECT False, the middleware does not redirect. """ ret = self.process_request("get", "/some/url") self.assertIsNone(ret) @override_settings(SECURE_REFERRER_POLICY=None) def test_referrer_policy_off(self): """ With SECURE_REFERRER_POLICY set to None, the middleware does not add a "Referrer-Policy" header to the response. """ self.assertNotIn('Referrer-Policy', self.process_response().headers) def test_referrer_policy_on(self): """ With SECURE_REFERRER_POLICY set to a valid value, the middleware adds a "Referrer-Policy" header to the response. """ tests = ( ('strict-origin', 'strict-origin'), ('strict-origin,origin', 'strict-origin,origin'), ('strict-origin, origin', 'strict-origin,origin'), (['strict-origin', 'origin'], 'strict-origin,origin'), (('strict-origin', 'origin'), 'strict-origin,origin'), ) for value, expected in tests: with self.subTest(value=value), override_settings(SECURE_REFERRER_POLICY=value): self.assertEqual( self.process_response().headers['Referrer-Policy'], expected, ) @override_settings(SECURE_REFERRER_POLICY='strict-origin') def test_referrer_policy_already_present(self): """ The middleware will not override a "Referrer-Policy" header already present in the response. """ response = self.process_response(headers={'Referrer-Policy': 'unsafe-url'}) self.assertEqual(response.headers['Referrer-Policy'], 'unsafe-url')
043180fdce380f45a432e8de381e21dd07402cb0c2d353f37ac8b4b460bb3325
from django.urls import path, re_path from . import views urlpatterns = [ path('noslash', views.empty_view), path('slash/', views.empty_view), path('needsquoting#/', views.empty_view), # Accepts paths with two leading slashes. re_path(r'^(.+)/security/$', views.empty_view), # Should not append slash. path('sensitive_fbv/', views.sensitive_fbv), path('sensitive_cbv/', views.SensitiveCBV.as_view()), ]
f6c8efe70199b2b997b20ddeac094fe2031607a94dec9dd8d75ca24dc26d053b
from django.http import HttpResponse from django.utils.decorators import method_decorator from django.views.decorators.common import no_append_slash from django.views.generic import View def empty_view(request, *args, **kwargs): return HttpResponse() @no_append_slash def sensitive_fbv(request, *args, **kwargs): return HttpResponse() @method_decorator(no_append_slash, name='dispatch') class SensitiveCBV(View): def get(self, *args, **kwargs): return HttpResponse()
34579d26ad19e2de0650af0fd418ec1ed56435fa592d01ea97106d446ee504bf
from django.contrib import admin from django.contrib.admin.sites import AdminSite from django.contrib.auth.models import User from django.contrib.contenttypes.admin import GenericTabularInline from django.contrib.contenttypes.models import ContentType from django.forms.formsets import DEFAULT_MAX_NUM from django.forms.models import ModelForm from django.test import ( RequestFactory, SimpleTestCase, TestCase, override_settings, ) from django.urls import reverse from .admin import MediaInline, MediaPermanentInline, site as admin_site from .models import Category, Episode, EpisodePermanent, Media, PhoneNumber class TestDataMixin: @classmethod def setUpTestData(cls): cls.superuser = User.objects.create_superuser(username='super', password='secret', email='[email protected]') @override_settings(ROOT_URLCONF='generic_inline_admin.urls') class GenericAdminViewTest(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) e = Episode.objects.create(name='This Week in Django') self.episode_pk = e.pk m = Media(content_object=e, url='http://example.com/podcast.mp3') m.save() self.mp3_media_pk = m.pk m = Media(content_object=e, url='http://example.com/logo.png') m.save() self.png_media_pk = m.pk def test_basic_add_GET(self): """ A smoke test to ensure GET on the add_view works. """ response = self.client.get(reverse('admin:generic_inline_admin_episode_add')) self.assertEqual(response.status_code, 200) def test_basic_edit_GET(self): """ A smoke test to ensure GET on the change_view works. """ response = self.client.get( reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,)) ) self.assertEqual(response.status_code, 200) def test_basic_add_POST(self): """ A smoke test to ensure POST on add_view works. """ post_data = { "name": "This Week in Django", # inline data "generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1", "generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0", "generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0", } response = self.client.post(reverse('admin:generic_inline_admin_episode_add'), post_data) self.assertEqual(response.status_code, 302) # redirect somewhere def test_basic_edit_POST(self): """ A smoke test to ensure POST on edit_view works. """ post_data = { "name": "This Week in Django", # inline data "generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3", "generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2", "generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0", "generic_inline_admin-media-content_type-object_id-0-id": str(self.mp3_media_pk), "generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3", "generic_inline_admin-media-content_type-object_id-1-id": str(self.png_media_pk), "generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png", "generic_inline_admin-media-content_type-object_id-2-id": "", "generic_inline_admin-media-content_type-object_id-2-url": "", } url = reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,)) response = self.client.post(url, post_data) self.assertEqual(response.status_code, 302) # redirect somewhere @override_settings(ROOT_URLCONF='generic_inline_admin.urls') class GenericInlineAdminParametersTest(TestDataMixin, TestCase): factory = RequestFactory() def setUp(self): self.client.force_login(self.superuser) def _create_object(self, model): """ Create a model with an attached Media object via GFK. We can't load content via a fixture (since the GenericForeignKey relies on content type IDs, which will vary depending on what other tests have been run), thus we do it here. """ e = model.objects.create(name='This Week in Django') Media.objects.create(content_object=e, url='http://example.com/podcast.mp3') return e def test_no_param(self): """ With one initial form, extra (default) at 3, there should be 4 forms. """ e = self._create_object(Episode) response = self.client.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,))) formset = response.context['inline_admin_formsets'][0].formset self.assertEqual(formset.total_form_count(), 4) self.assertEqual(formset.initial_form_count(), 1) def test_extra_param(self): """ With extra=0, there should be one form. """ class ExtraInline(GenericTabularInline): model = Media extra = 0 modeladmin = admin.ModelAdmin(Episode, admin_site) modeladmin.inlines = [ExtraInline] e = self._create_object(Episode) request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,))) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request, object_id=str(e.pk)) formset = response.context_data['inline_admin_formsets'][0].formset self.assertEqual(formset.total_form_count(), 1) self.assertEqual(formset.initial_form_count(), 1) def test_max_num_param(self): """ With extra=5 and max_num=2, there should be only 2 forms. """ class MaxNumInline(GenericTabularInline): model = Media extra = 5 max_num = 2 modeladmin = admin.ModelAdmin(Episode, admin_site) modeladmin.inlines = [MaxNumInline] e = self._create_object(Episode) request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,))) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request, object_id=str(e.pk)) formset = response.context_data['inline_admin_formsets'][0].formset self.assertEqual(formset.total_form_count(), 2) self.assertEqual(formset.initial_form_count(), 1) def test_min_num_param(self): """ With extra=3 and min_num=2, there should be five forms. """ class MinNumInline(GenericTabularInline): model = Media extra = 3 min_num = 2 modeladmin = admin.ModelAdmin(Episode, admin_site) modeladmin.inlines = [MinNumInline] e = self._create_object(Episode) request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,))) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request, object_id=str(e.pk)) formset = response.context_data['inline_admin_formsets'][0].formset self.assertEqual(formset.total_form_count(), 5) self.assertEqual(formset.initial_form_count(), 1) def test_get_extra(self): class GetExtraInline(GenericTabularInline): model = Media extra = 4 def get_extra(self, request, obj): return 2 modeladmin = admin.ModelAdmin(Episode, admin_site) modeladmin.inlines = [GetExtraInline] e = self._create_object(Episode) request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,))) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request, object_id=str(e.pk)) formset = response.context_data['inline_admin_formsets'][0].formset self.assertEqual(formset.extra, 2) def test_get_min_num(self): class GetMinNumInline(GenericTabularInline): model = Media min_num = 5 def get_min_num(self, request, obj): return 2 modeladmin = admin.ModelAdmin(Episode, admin_site) modeladmin.inlines = [GetMinNumInline] e = self._create_object(Episode) request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,))) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request, object_id=str(e.pk)) formset = response.context_data['inline_admin_formsets'][0].formset self.assertEqual(formset.min_num, 2) def test_get_max_num(self): class GetMaxNumInline(GenericTabularInline): model = Media extra = 5 def get_max_num(self, request, obj): return 2 modeladmin = admin.ModelAdmin(Episode, admin_site) modeladmin.inlines = [GetMaxNumInline] e = self._create_object(Episode) request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,))) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request, object_id=str(e.pk)) formset = response.context_data['inline_admin_formsets'][0].formset self.assertEqual(formset.max_num, 2) @override_settings(ROOT_URLCONF='generic_inline_admin.urls') class GenericInlineAdminWithUniqueTogetherTest(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) def test_add(self): category_id = Category.objects.create(name='male').pk post_data = { "name": "John Doe", # inline data "generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1", "generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0", "generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0", "generic_inline_admin-phonenumber-content_type-object_id-0-id": "", "generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555", "generic_inline_admin-phonenumber-content_type-object_id-0-category": str(category_id), } response = self.client.get(reverse('admin:generic_inline_admin_contact_add')) self.assertEqual(response.status_code, 200) response = self.client.post(reverse('admin:generic_inline_admin_contact_add'), post_data) self.assertEqual(response.status_code, 302) # redirect somewhere def test_delete(self): from .models import Contact c = Contact.objects.create(name='foo') PhoneNumber.objects.create( object_id=c.id, content_type=ContentType.objects.get_for_model(Contact), phone_number="555-555-5555", ) response = self.client.post(reverse('admin:generic_inline_admin_contact_delete', args=[c.pk])) self.assertContains(response, 'Are you sure you want to delete') @override_settings(ROOT_URLCONF='generic_inline_admin.urls') class NoInlineDeletionTest(SimpleTestCase): def test_no_deletion(self): inline = MediaPermanentInline(EpisodePermanent, admin_site) fake_request = object() formset = inline.get_formset(fake_request) self.assertFalse(formset.can_delete) class MockRequest: pass class MockSuperUser: def has_perm(self, perm, obj=None): return True request = MockRequest() request.user = MockSuperUser() @override_settings(ROOT_URLCONF='generic_inline_admin.urls') class GenericInlineModelAdminTest(SimpleTestCase): def setUp(self): self.site = AdminSite() def test_get_formset_kwargs(self): media_inline = MediaInline(Media, AdminSite()) # Create a formset with default arguments formset = media_inline.get_formset(request) self.assertEqual(formset.max_num, DEFAULT_MAX_NUM) self.assertIs(formset.can_order, False) # Create a formset with custom keyword arguments formset = media_inline.get_formset(request, max_num=100, can_order=True) self.assertEqual(formset.max_num, 100) self.assertIs(formset.can_order, True) def test_custom_form_meta_exclude_with_readonly(self): """ The custom ModelForm's `Meta.exclude` is respected when used in conjunction with `GenericInlineModelAdmin.readonly_fields` and when no `ModelAdmin.exclude` is defined. """ class MediaForm(ModelForm): class Meta: model = Media exclude = ['url'] class MediaInline(GenericTabularInline): readonly_fields = ['description'] form = MediaForm model = Media class EpisodeAdmin(admin.ModelAdmin): inlines = [ MediaInline ] ma = EpisodeAdmin(Episode, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ['keywords', 'id', 'DELETE']) def test_custom_form_meta_exclude(self): """ The custom ModelForm's `Meta.exclude` is respected by `GenericInlineModelAdmin.get_formset`, and overridden if `ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined. Refs #15907. """ # First with `GenericInlineModelAdmin` ----------------- class MediaForm(ModelForm): class Meta: model = Media exclude = ['url'] class MediaInline(GenericTabularInline): exclude = ['description'] form = MediaForm model = Media class EpisodeAdmin(admin.ModelAdmin): inlines = [ MediaInline ] ma = EpisodeAdmin(Episode, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ['url', 'keywords', 'id', 'DELETE']) # Then, only with `ModelForm` ----------------- class MediaInline(GenericTabularInline): form = MediaForm model = Media class EpisodeAdmin(admin.ModelAdmin): inlines = [ MediaInline ] ma = EpisodeAdmin(Episode, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ['description', 'keywords', 'id', 'DELETE']) def test_get_fieldsets(self): # get_fieldsets is called when figuring out form fields. # Refs #18681. class MediaForm(ModelForm): class Meta: model = Media fields = '__all__' class MediaInline(GenericTabularInline): form = MediaForm model = Media can_delete = False def get_fieldsets(self, request, obj=None): return [(None, {'fields': ['url', 'description']})] ma = MediaInline(Media, self.site) form = ma.get_formset(None).form self.assertEqual(form._meta.fields, ['url', 'description']) def test_get_formsets_with_inlines_returns_tuples(self): """ get_formsets_with_inlines() returns the correct tuples. """ class MediaForm(ModelForm): class Meta: model = Media exclude = ['url'] class MediaInline(GenericTabularInline): form = MediaForm model = Media class AlternateInline(GenericTabularInline): form = MediaForm model = Media class EpisodeAdmin(admin.ModelAdmin): inlines = [ AlternateInline, MediaInline ] ma = EpisodeAdmin(Episode, self.site) inlines = ma.get_inline_instances(request) for (formset, inline), other_inline in zip(ma.get_formsets_with_inlines(request), inlines): self.assertIsInstance(formset, other_inline.get_formset(request).__class__) def test_get_inline_instances_override_get_inlines(self): class MediaInline(GenericTabularInline): model = Media class AlternateInline(GenericTabularInline): model = Media class EpisodeAdmin(admin.ModelAdmin): inlines = (AlternateInline, MediaInline) def get_inlines(self, request, obj): if hasattr(request, 'name'): if request.name == 'alternate': return self.inlines[:1] elif request.name == 'media': return self.inlines[1:2] return [] ma = EpisodeAdmin(Episode, self.site) self.assertEqual(ma.get_inlines(request, None), []) self.assertEqual(ma.get_inline_instances(request), []) for name, inline_class in (('alternate', AlternateInline), ('media', MediaInline)): request.name = name self.assertEqual(ma.get_inlines(request, None), (inline_class,)), self.assertEqual(type(ma.get_inline_instances(request)[0]), inline_class)
4dfe7edf55f5dc1731e5872ef5e0d97f3608542a0f51c952c2ed1c4853a0ab50
import os import shutil import sys import tempfile import unittest from io import StringIO from pathlib import Path from unittest import mock from django.conf import settings from django.contrib.staticfiles import finders, storage from django.contrib.staticfiles.management.commands.collectstatic import ( Command as CollectstaticCommand, ) from django.core.management import call_command from django.test import override_settings from .cases import CollectionTestCase from .settings import TEST_ROOT def hashed_file_path(test, path): fullpath = test.render_template(test.static_template_snippet(path)) return fullpath.replace(settings.STATIC_URL, '') class TestHashedFiles: hashed_file_path = hashed_file_path def setUp(self): self._max_post_process_passes = storage.staticfiles_storage.max_post_process_passes super().setUp() def tearDown(self): # Clear hashed files to avoid side effects among tests. storage.staticfiles_storage.hashed_files.clear() storage.staticfiles_storage.max_post_process_passes = self._max_post_process_passes def assertPostCondition(self): """ Assert post conditions for a test are met. Must be manually called at the end of each test. """ pass def test_template_tag_return(self): self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png") self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt") self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt", asvar=True) self.assertStaticRenders("cached/styles.css", "/static/cached/styles.5e0040571e1a.css") self.assertStaticRenders("path/", "/static/path/") self.assertStaticRenders("path/?query", "/static/path/?query") self.assertPostCondition() def test_template_tag_simple_content(self): relpath = self.hashed_file_path("cached/styles.css") self.assertEqual(relpath, "cached/styles.5e0040571e1a.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) self.assertPostCondition() def test_path_ignored_completely(self): relpath = self.hashed_file_path("cached/css/ignored.css") self.assertEqual(relpath, "cached/css/ignored.554da52152af.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertIn(b'#foobar', content) self.assertIn(b'http:foobar', content) self.assertIn(b'https:foobar', content) self.assertIn(b'data:foobar', content) self.assertIn(b'chrome:foobar', content) self.assertIn(b'//foobar', content) self.assertPostCondition() def test_path_with_querystring(self): relpath = self.hashed_file_path("cached/styles.css?spam=eggs") self.assertEqual(relpath, "cached/styles.5e0040571e1a.css?spam=eggs") with storage.staticfiles_storage.open("cached/styles.5e0040571e1a.css") as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) self.assertPostCondition() def test_path_with_fragment(self): relpath = self.hashed_file_path("cached/styles.css#eggs") self.assertEqual(relpath, "cached/styles.5e0040571e1a.css#eggs") with storage.staticfiles_storage.open("cached/styles.5e0040571e1a.css") as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) self.assertPostCondition() def test_path_with_querystring_and_fragment(self): relpath = self.hashed_file_path("cached/css/fragments.css") self.assertEqual(relpath, "cached/css/fragments.a60c0e74834f.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertIn(b'fonts/font.b9b105392eb8.eot?#iefix', content) self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content) self.assertIn(b'fonts/font.b8d603e42714.svg#path/to/../../fonts/font.svg', content) self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content) self.assertIn(b'#default#VML', content) self.assertPostCondition() def test_template_tag_absolute(self): relpath = self.hashed_file_path("cached/absolute.css") self.assertEqual(relpath, "cached/absolute.eb04def9f9a4.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"/static/cached/styles.css", content) self.assertIn(b"/static/cached/styles.5e0040571e1a.css", content) self.assertNotIn(b"/static/styles_root.css", content) self.assertIn(b"/static/styles_root.401f2509a628.css", content) self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content) self.assertPostCondition() def test_template_tag_absolute_root(self): """ Like test_template_tag_absolute, but for a file in STATIC_ROOT (#26249). """ relpath = self.hashed_file_path("absolute_root.css") self.assertEqual(relpath, "absolute_root.f821df1b64f7.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"/static/styles_root.css", content) self.assertIn(b"/static/styles_root.401f2509a628.css", content) self.assertPostCondition() def test_template_tag_relative(self): relpath = self.hashed_file_path("cached/relative.css") self.assertEqual(relpath, "cached/relative.c3e9e1ea6f2e.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"../cached/styles.css", content) self.assertNotIn(b'@import "styles.css"', content) self.assertNotIn(b'url(img/relative.png)', content) self.assertIn(b'url("img/relative.acae32e4532b.png")', content) self.assertIn(b"../cached/styles.5e0040571e1a.css", content) self.assertPostCondition() def test_import_replacement(self): "See #18050" relpath = self.hashed_file_path("cached/import.css") self.assertEqual(relpath, "cached/import.f53576679e5a.css") with storage.staticfiles_storage.open(relpath) as relfile: self.assertIn(b"""import url("styles.5e0040571e1a.css")""", relfile.read()) self.assertPostCondition() def test_template_tag_deep_relative(self): relpath = self.hashed_file_path("cached/css/window.css") self.assertEqual(relpath, "cached/css/window.5d5c10836967.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b'url(img/window.png)', content) self.assertIn(b'url("img/window.acae32e4532b.png")', content) self.assertPostCondition() def test_template_tag_url(self): relpath = self.hashed_file_path("cached/url.css") self.assertEqual(relpath, "cached/url.902310b73412.css") with storage.staticfiles_storage.open(relpath) as relfile: self.assertIn(b"https://", relfile.read()) self.assertPostCondition() @override_settings( STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'loop')], STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'], ) def test_import_loop(self): finders.get_finder.cache_clear() err = StringIO() with self.assertRaisesMessage(RuntimeError, 'Max post-process passes exceeded'): call_command('collectstatic', interactive=False, verbosity=0, stderr=err) self.assertEqual("Post-processing 'All' failed!\n\n", err.getvalue()) self.assertPostCondition() def test_post_processing(self): """ post_processing behaves correctly. Files that are alterable should always be post-processed; files that aren't should be skipped. collectstatic has already been called once in setUp() for this testcase, therefore we check by verifying behavior on a second run. """ collectstatic_args = { 'interactive': False, 'verbosity': 0, 'link': False, 'clear': False, 'dry_run': False, 'post_process': True, 'use_default_ignore_patterns': True, 'ignore_patterns': ['*.ignoreme'], } collectstatic_cmd = CollectstaticCommand() collectstatic_cmd.set_options(**collectstatic_args) stats = collectstatic_cmd.collect() self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed']) self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified']) self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed']) self.assertPostCondition() def test_css_import_case_insensitive(self): relpath = self.hashed_file_path("cached/styles_insensitive.css") self.assertEqual(relpath, "cached/styles_insensitive.3fa427592a53.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) self.assertPostCondition() @override_settings( STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'faulty')], STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'], ) def test_post_processing_failure(self): """ post_processing indicates the origin of the error when it fails. """ finders.get_finder.cache_clear() err = StringIO() with self.assertRaises(Exception): call_command('collectstatic', interactive=False, verbosity=0, stderr=err) self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue()) self.assertPostCondition() @override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.ExtraPatternsStorage') class TestExtraPatternsStorage(CollectionTestCase): def setUp(self): storage.staticfiles_storage.hashed_files.clear() # avoid cache interference super().setUp() def cached_file_path(self, path): fullpath = self.render_template(self.static_template_snippet(path)) return fullpath.replace(settings.STATIC_URL, '') def test_multi_extension_patterns(self): """ With storage classes having several file extension patterns, only the files matching a specific file pattern should be affected by the substitution (#19670). """ # CSS files shouldn't be touched by JS patterns. relpath = self.cached_file_path("cached/import.css") self.assertEqual(relpath, "cached/import.f53576679e5a.css") with storage.staticfiles_storage.open(relpath) as relfile: self.assertIn(b'import url("styles.5e0040571e1a.css")', relfile.read()) # Confirm JS patterns have been applied to JS files. relpath = self.cached_file_path("cached/test.js") self.assertEqual(relpath, "cached/test.388d7a790d46.js") with storage.staticfiles_storage.open(relpath) as relfile: self.assertIn(b'JS_URL("import.f53576679e5a.css")', relfile.read()) @override_settings( STATICFILES_STORAGE='django.contrib.staticfiles.storage.ManifestStaticFilesStorage', ) class TestCollectionManifestStorage(TestHashedFiles, CollectionTestCase): """ Tests for the Cache busting storage """ def setUp(self): super().setUp() temp_dir = tempfile.mkdtemp() os.makedirs(os.path.join(temp_dir, 'test')) self._clear_filename = os.path.join(temp_dir, 'test', 'cleared.txt') with open(self._clear_filename, 'w') as f: f.write('to be deleted in one test') self.patched_settings = self.settings( STATICFILES_DIRS=settings.STATICFILES_DIRS + [temp_dir], ) self.patched_settings.enable() self.addCleanup(shutil.rmtree, temp_dir) self._manifest_strict = storage.staticfiles_storage.manifest_strict def tearDown(self): self.patched_settings.disable() if os.path.exists(self._clear_filename): os.unlink(self._clear_filename) storage.staticfiles_storage.manifest_strict = self._manifest_strict super().tearDown() def assertPostCondition(self): hashed_files = storage.staticfiles_storage.hashed_files # The in-memory version of the manifest matches the one on disk # since a properly created manifest should cover all filenames. if hashed_files: manifest = storage.staticfiles_storage.load_manifest() self.assertEqual(hashed_files, manifest) def test_manifest_exists(self): filename = storage.staticfiles_storage.manifest_name path = storage.staticfiles_storage.path(filename) self.assertTrue(os.path.exists(path)) def test_manifest_does_not_exist(self): storage.staticfiles_storage.manifest_name = 'does.not.exist.json' self.assertIsNone(storage.staticfiles_storage.read_manifest()) def test_manifest_does_not_ignore_permission_error(self): with mock.patch('builtins.open', side_effect=PermissionError): with self.assertRaises(PermissionError): storage.staticfiles_storage.read_manifest() def test_loaded_cache(self): self.assertNotEqual(storage.staticfiles_storage.hashed_files, {}) manifest_content = storage.staticfiles_storage.read_manifest() self.assertIn( '"version": "%s"' % storage.staticfiles_storage.manifest_version, manifest_content ) def test_parse_cache(self): hashed_files = storage.staticfiles_storage.hashed_files manifest = storage.staticfiles_storage.load_manifest() self.assertEqual(hashed_files, manifest) def test_clear_empties_manifest(self): cleared_file_name = storage.staticfiles_storage.clean_name(os.path.join('test', 'cleared.txt')) # collect the additional file self.run_collectstatic() hashed_files = storage.staticfiles_storage.hashed_files self.assertIn(cleared_file_name, hashed_files) manifest_content = storage.staticfiles_storage.load_manifest() self.assertIn(cleared_file_name, manifest_content) original_path = storage.staticfiles_storage.path(cleared_file_name) self.assertTrue(os.path.exists(original_path)) # delete the original file form the app, collect with clear os.unlink(self._clear_filename) self.run_collectstatic(clear=True) self.assertFileNotFound(original_path) hashed_files = storage.staticfiles_storage.hashed_files self.assertNotIn(cleared_file_name, hashed_files) manifest_content = storage.staticfiles_storage.load_manifest() self.assertNotIn(cleared_file_name, manifest_content) def test_missing_entry(self): missing_file_name = 'cached/missing.css' configured_storage = storage.staticfiles_storage self.assertNotIn(missing_file_name, configured_storage.hashed_files) # File name not found in manifest with self.assertRaisesMessage(ValueError, "Missing staticfiles manifest entry for '%s'" % missing_file_name): self.hashed_file_path(missing_file_name) configured_storage.manifest_strict = False # File doesn't exist on disk err_msg = "The file '%s' could not be found with %r." % (missing_file_name, configured_storage._wrapped) with self.assertRaisesMessage(ValueError, err_msg): self.hashed_file_path(missing_file_name) content = StringIO() content.write('Found') configured_storage.save(missing_file_name, content) # File exists on disk self.hashed_file_path(missing_file_name) def test_intermediate_files(self): cached_files = os.listdir(os.path.join(settings.STATIC_ROOT, 'cached')) # Intermediate files shouldn't be created for reference. self.assertEqual( len([ cached_file for cached_file in cached_files if cached_file.startswith('relative.') ]), 2, ) @override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.NoneHashStorage') class TestCollectionNoneHashStorage(CollectionTestCase): hashed_file_path = hashed_file_path def test_hashed_name(self): relpath = self.hashed_file_path('cached/styles.css') self.assertEqual(relpath, 'cached/styles.css') @override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.SimpleStorage') class TestCollectionSimpleStorage(CollectionTestCase): hashed_file_path = hashed_file_path def setUp(self): storage.staticfiles_storage.hashed_files.clear() # avoid cache interference super().setUp() def test_template_tag_return(self): self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png") self.assertStaticRenders("test/file.txt", "/static/test/file.deploy12345.txt") self.assertStaticRenders("cached/styles.css", "/static/cached/styles.deploy12345.css") self.assertStaticRenders("path/", "/static/path/") self.assertStaticRenders("path/?query", "/static/path/?query") def test_template_tag_simple_content(self): relpath = self.hashed_file_path("cached/styles.css") self.assertEqual(relpath, "cached/styles.deploy12345.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.deploy12345.css", content) class CustomStaticFilesStorage(storage.StaticFilesStorage): """ Used in TestStaticFilePermissions """ def __init__(self, *args, **kwargs): kwargs['file_permissions_mode'] = 0o640 kwargs['directory_permissions_mode'] = 0o740 super().__init__(*args, **kwargs) @unittest.skipIf(sys.platform == 'win32', "Windows only partially supports chmod.") class TestStaticFilePermissions(CollectionTestCase): command_params = { 'interactive': False, 'verbosity': 0, 'ignore_patterns': ['*.ignoreme'], } def setUp(self): self.umask = 0o027 self.old_umask = os.umask(self.umask) super().setUp() def tearDown(self): os.umask(self.old_umask) super().tearDown() # Don't run collectstatic command in this test class. def run_collectstatic(self, **kwargs): pass @override_settings( FILE_UPLOAD_PERMISSIONS=0o655, FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765, ) def test_collect_static_files_permissions(self): call_command('collectstatic', **self.command_params) static_root = Path(settings.STATIC_ROOT) test_file = static_root / 'test.txt' file_mode = test_file.stat().st_mode & 0o777 self.assertEqual(file_mode, 0o655) tests = [ static_root / 'subdir', static_root / 'nested', static_root / 'nested' / 'css', ] for directory in tests: with self.subTest(directory=directory): dir_mode = directory.stat().st_mode & 0o777 self.assertEqual(dir_mode, 0o765) @override_settings( FILE_UPLOAD_PERMISSIONS=None, FILE_UPLOAD_DIRECTORY_PERMISSIONS=None, ) def test_collect_static_files_default_permissions(self): call_command('collectstatic', **self.command_params) static_root = Path(settings.STATIC_ROOT) test_file = static_root / 'test.txt' file_mode = test_file.stat().st_mode & 0o777 self.assertEqual(file_mode, 0o666 & ~self.umask) tests = [ static_root / 'subdir', static_root / 'nested', static_root / 'nested' / 'css', ] for directory in tests: with self.subTest(directory=directory): dir_mode = directory.stat().st_mode & 0o777 self.assertEqual(dir_mode, 0o777 & ~self.umask) @override_settings( FILE_UPLOAD_PERMISSIONS=0o655, FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765, STATICFILES_STORAGE='staticfiles_tests.test_storage.CustomStaticFilesStorage', ) def test_collect_static_files_subclass_of_static_storage(self): call_command('collectstatic', **self.command_params) static_root = Path(settings.STATIC_ROOT) test_file = static_root / 'test.txt' file_mode = test_file.stat().st_mode & 0o777 self.assertEqual(file_mode, 0o640) tests = [ static_root / 'subdir', static_root / 'nested', static_root / 'nested' / 'css', ] for directory in tests: with self.subTest(directory=directory): dir_mode = directory.stat().st_mode & 0o777 self.assertEqual(dir_mode, 0o740) @override_settings( STATICFILES_STORAGE='django.contrib.staticfiles.storage.ManifestStaticFilesStorage', ) class TestCollectionHashedFilesCache(CollectionTestCase): """ Files referenced from CSS use the correct final hashed name regardless of the order in which the files are post-processed. """ hashed_file_path = hashed_file_path def setUp(self): super().setUp() self._temp_dir = temp_dir = tempfile.mkdtemp() os.makedirs(os.path.join(temp_dir, 'test')) self.addCleanup(shutil.rmtree, temp_dir) def _get_filename_path(self, filename): return os.path.join(self._temp_dir, 'test', filename) def test_file_change_after_collectstatic(self): # Create initial static files. file_contents = ( ('foo.png', 'foo'), ('bar.css', 'url("foo.png")\nurl("xyz.png")'), ('xyz.png', 'xyz'), ) for filename, content in file_contents: with open(self._get_filename_path(filename), 'w') as f: f.write(content) with self.modify_settings(STATICFILES_DIRS={'append': self._temp_dir}): finders.get_finder.cache_clear() err = StringIO() # First collectstatic run. call_command('collectstatic', interactive=False, verbosity=0, stderr=err) relpath = self.hashed_file_path('test/bar.css') with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertIn(b'foo.acbd18db4cc2.png', content) self.assertIn(b'xyz.d16fb36f0911.png', content) # Change the contents of the png files. for filename in ('foo.png', 'xyz.png'): with open(self._get_filename_path(filename), 'w+b') as f: f.write(b"new content of file to change its hash") # The hashes of the png files in the CSS file are updated after # a second collectstatic. call_command('collectstatic', interactive=False, verbosity=0, stderr=err) relpath = self.hashed_file_path('test/bar.css') with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertIn(b'foo.57a5cb9ba68d.png', content) self.assertIn(b'xyz.57a5cb9ba68d.png', content)
b30857dc968f9463b1ea12be87680054c9e6ec43068001e3c784840ba294931a
import unittest from datetime import date, datetime, time, timedelta from decimal import Decimal from operator import attrgetter, itemgetter from uuid import UUID from django.core.exceptions import FieldError from django.db import connection from django.db.models import ( BinaryField, BooleanField, Case, Count, DecimalField, F, GenericIPAddressField, IntegerField, Max, Min, Q, Sum, TextField, Value, When, ) from django.test import SimpleTestCase, TestCase from .models import CaseTestModel, Client, FKCaseTestModel, O2OCaseTestModel try: from PIL import Image except ImportError: Image = None class CaseExpressionTests(TestCase): @classmethod def setUpTestData(cls): o = CaseTestModel.objects.create(integer=1, integer2=1, string='1') O2OCaseTestModel.objects.create(o2o=o, integer=1) FKCaseTestModel.objects.create(fk=o, integer=1) o = CaseTestModel.objects.create(integer=2, integer2=3, string='2') O2OCaseTestModel.objects.create(o2o=o, integer=2) FKCaseTestModel.objects.create(fk=o, integer=2) FKCaseTestModel.objects.create(fk=o, integer=3) o = CaseTestModel.objects.create(integer=3, integer2=4, string='3') O2OCaseTestModel.objects.create(o2o=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=4) o = CaseTestModel.objects.create(integer=2, integer2=2, string='2') O2OCaseTestModel.objects.create(o2o=o, integer=2) FKCaseTestModel.objects.create(fk=o, integer=2) FKCaseTestModel.objects.create(fk=o, integer=3) o = CaseTestModel.objects.create(integer=3, integer2=4, string='3') O2OCaseTestModel.objects.create(o2o=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=4) o = CaseTestModel.objects.create(integer=3, integer2=3, string='3') O2OCaseTestModel.objects.create(o2o=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=4) o = CaseTestModel.objects.create(integer=4, integer2=5, string='4') O2OCaseTestModel.objects.create(o2o=o, integer=1) FKCaseTestModel.objects.create(fk=o, integer=5) cls.group_by_fields = [ f.name for f in CaseTestModel._meta.get_fields() if not (f.is_relation and f.auto_created) and ( connection.features.allows_group_by_lob or not isinstance(f, (BinaryField, TextField)) ) ] def test_annotate(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(test=Case( When(integer=1, then=Value('one')), When(integer=2, then=Value('two')), default=Value('other'), )).order_by('pk'), [(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')], transform=attrgetter('integer', 'test') ) def test_annotate_without_default(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(test=Case( When(integer=1, then=1), When(integer=2, then=2), )).order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'test') ) def test_annotate_with_expression_as_value(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(f_test=Case( When(integer=1, then=F('integer') + 1), When(integer=2, then=F('integer') + 3), default='integer', )).order_by('pk'), [(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)], transform=attrgetter('integer', 'f_test') ) def test_annotate_with_expression_as_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(f_test=Case( When(integer2=F('integer'), then=Value('equal')), When(integer2=F('integer') + 1, then=Value('+1')), )).order_by('pk'), [(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')], transform=attrgetter('integer', 'f_test') ) def test_annotate_with_join_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(join_test=Case( When(integer=1, then=F('o2o_rel__integer') + 1), When(integer=2, then=F('o2o_rel__integer') + 3), default='o2o_rel__integer', )).order_by('pk'), [(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 1)], transform=attrgetter('integer', 'join_test') ) def test_annotate_with_in_clause(self): fk_rels = FKCaseTestModel.objects.filter(integer__in=[5]) self.assertQuerysetEqual( CaseTestModel.objects.only('pk', 'integer').annotate(in_test=Sum(Case( When(fk_rel__in=fk_rels, then=F('fk_rel__integer')), default=Value(0), ))).order_by('pk'), [(1, 0), (2, 0), (3, 0), (2, 0), (3, 0), (3, 0), (4, 5)], transform=attrgetter('integer', 'in_test') ) def test_annotate_with_join_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(join_test=Case( When(integer2=F('o2o_rel__integer'), then=Value('equal')), When(integer2=F('o2o_rel__integer') + 1, then=Value('+1')), default=Value('other'), )).order_by('pk'), [(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, 'other')], transform=attrgetter('integer', 'join_test') ) def test_annotate_with_join_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(join_test=Case( When(o2o_rel__integer=1, then=Value('one')), When(o2o_rel__integer=2, then=Value('two')), When(o2o_rel__integer=3, then=Value('three')), default=Value('other'), )).order_by('pk'), [(1, 'one'), (2, 'two'), (3, 'three'), (2, 'two'), (3, 'three'), (3, 'three'), (4, 'one')], transform=attrgetter('integer', 'join_test') ) def test_annotate_with_annotation_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f_plus_1=F('integer') + 1, f_plus_3=F('integer') + 3, ).annotate( f_test=Case( When(integer=1, then='f_plus_1'), When(integer=2, then='f_plus_3'), default='integer', ), ).order_by('pk'), [(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)], transform=attrgetter('integer', 'f_test') ) def test_annotate_with_annotation_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f_plus_1=F('integer') + 1, ).annotate( f_test=Case( When(integer2=F('integer'), then=Value('equal')), When(integer2=F('f_plus_1'), then=Value('+1')), ), ).order_by('pk'), [(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')], transform=attrgetter('integer', 'f_test') ) def test_annotate_with_annotation_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f_minus_2=F('integer') - 2, ).annotate( test=Case( When(f_minus_2=-1, then=Value('negative one')), When(f_minus_2=0, then=Value('zero')), When(f_minus_2=1, then=Value('one')), default=Value('other'), ), ).order_by('pk'), [(1, 'negative one'), (2, 'zero'), (3, 'one'), (2, 'zero'), (3, 'one'), (3, 'one'), (4, 'other')], transform=attrgetter('integer', 'test') ) def test_annotate_with_aggregation_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.group_by_fields).annotate( min=Min('fk_rel__integer'), max=Max('fk_rel__integer'), ).annotate( test=Case( When(integer=2, then='min'), When(integer=3, then='max'), ), ).order_by('pk'), [(1, None, 1, 1), (2, 2, 2, 3), (3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4), (3, 4, 3, 4), (4, None, 5, 5)], transform=itemgetter('integer', 'test', 'min', 'max') ) def test_annotate_with_aggregation_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.group_by_fields).annotate( min=Min('fk_rel__integer'), max=Max('fk_rel__integer'), ).annotate( test=Case( When(integer2=F('min'), then=Value('min')), When(integer2=F('max'), then=Value('max')), ), ).order_by('pk'), [(1, 1, 'min'), (2, 3, 'max'), (3, 4, 'max'), (2, 2, 'min'), (3, 4, 'max'), (3, 3, 'min'), (4, 5, 'min')], transform=itemgetter('integer', 'integer2', 'test') ) def test_annotate_with_aggregation_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.group_by_fields).annotate( max=Max('fk_rel__integer'), ).annotate( test=Case( When(max=3, then=Value('max = 3')), When(max=4, then=Value('max = 4')), default=Value(''), ), ).order_by('pk'), [(1, 1, ''), (2, 3, 'max = 3'), (3, 4, 'max = 4'), (2, 3, 'max = 3'), (3, 4, 'max = 4'), (3, 4, 'max = 4'), (4, 5, '')], transform=itemgetter('integer', 'max', 'test') ) def test_annotate_exclude(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(test=Case( When(integer=1, then=Value('one')), When(integer=2, then=Value('two')), default=Value('other'), )).exclude(test='other').order_by('pk'), [(1, 'one'), (2, 'two'), (2, 'two')], transform=attrgetter('integer', 'test') ) def test_annotate_values_not_in_order_by(self): self.assertEqual( list(CaseTestModel.objects.annotate(test=Case( When(integer=1, then=Value('one')), When(integer=2, then=Value('two')), When(integer=3, then=Value('three')), default=Value('other'), )).order_by('test').values_list('integer', flat=True)), [1, 4, 3, 3, 3, 2, 2] ) def test_annotate_with_empty_when(self): objects = CaseTestModel.objects.annotate( selected=Case( When(pk__in=[], then=Value('selected')), default=Value('not selected'), ) ) self.assertEqual(len(objects), CaseTestModel.objects.count()) self.assertTrue(all(obj.selected == 'not selected' for obj in objects)) def test_combined_expression(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( test=Case( When(integer=1, then=2), When(integer=2, then=1), default=3, ) + 1, ).order_by('pk'), [(1, 3), (2, 2), (3, 4), (2, 2), (3, 4), (3, 4), (4, 4)], transform=attrgetter('integer', 'test') ) def test_in_subquery(self): self.assertQuerysetEqual( CaseTestModel.objects.filter( pk__in=CaseTestModel.objects.annotate( test=Case( When(integer=F('integer2'), then='pk'), When(integer=4, then='pk'), ), ).values('test')).order_by('pk'), [(1, 1), (2, 2), (3, 3), (4, 5)], transform=attrgetter('integer', 'integer2') ) def test_condition_with_lookups(self): qs = CaseTestModel.objects.annotate( test=Case( When(Q(integer2=1), string='2', then=Value(False)), When(Q(integer2=1), string='1', then=Value(True)), default=Value(False), output_field=BooleanField(), ), ) self.assertIs(qs.get(integer=1).test, True) def test_case_reuse(self): SOME_CASE = Case( When(pk=0, then=Value('0')), default=Value('1'), ) self.assertQuerysetEqual( CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by('pk'), CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by('pk').values_list('pk', 'somecase'), lambda x: (x.pk, x.somecase) ) def test_aggregate(self): self.assertEqual( CaseTestModel.objects.aggregate( one=Sum(Case( When(integer=1, then=1), )), two=Sum(Case( When(integer=2, then=1), )), three=Sum(Case( When(integer=3, then=1), )), four=Sum(Case( When(integer=4, then=1), )), ), {'one': 1, 'two': 2, 'three': 3, 'four': 1} ) def test_aggregate_with_expression_as_value(self): self.assertEqual( CaseTestModel.objects.aggregate( one=Sum(Case(When(integer=1, then='integer'))), two=Sum(Case(When(integer=2, then=F('integer') - 1))), three=Sum(Case(When(integer=3, then=F('integer') + 1))), ), {'one': 1, 'two': 2, 'three': 12} ) def test_aggregate_with_expression_as_condition(self): self.assertEqual( CaseTestModel.objects.aggregate( equal=Sum(Case( When(integer2=F('integer'), then=1), )), plus_one=Sum(Case( When(integer2=F('integer') + 1, then=1), )), ), {'equal': 3, 'plus_one': 4} ) def test_filter(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer2=Case( When(integer=2, then=3), When(integer=3, then=4), default=1, )).order_by('pk'), [(1, 1), (2, 3), (3, 4), (3, 4)], transform=attrgetter('integer', 'integer2') ) def test_filter_without_default(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer2=Case( When(integer=2, then=3), When(integer=3, then=4), )).order_by('pk'), [(2, 3), (3, 4), (3, 4)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_expression_as_value(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer2=Case( When(integer=2, then=F('integer') + 1), When(integer=3, then=F('integer')), default='integer', )).order_by('pk'), [(1, 1), (2, 3), (3, 3)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_expression_as_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(string=Case( When(integer2=F('integer'), then=Value('2')), When(integer2=F('integer') + 1, then=Value('3')), )).order_by('pk'), [(3, 4, '3'), (2, 2, '2'), (3, 4, '3')], transform=attrgetter('integer', 'integer2', 'string') ) def test_filter_with_join_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer2=Case( When(integer=2, then=F('o2o_rel__integer') + 1), When(integer=3, then=F('o2o_rel__integer')), default='o2o_rel__integer', )).order_by('pk'), [(1, 1), (2, 3), (3, 3)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_join_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer=Case( When(integer2=F('o2o_rel__integer') + 1, then=2), When(integer2=F('o2o_rel__integer'), then=3), )).order_by('pk'), [(2, 3), (3, 3)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_join_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer2=Case( When(o2o_rel__integer=1, then=1), When(o2o_rel__integer=2, then=3), When(o2o_rel__integer=3, then=4), )).order_by('pk'), [(1, 1), (2, 3), (3, 4), (3, 4)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_annotation_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f=F('integer'), f_plus_1=F('integer') + 1, ).filter( integer2=Case( When(integer=2, then='f_plus_1'), When(integer=3, then='f'), ), ).order_by('pk'), [(2, 3), (3, 3)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_annotation_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f_plus_1=F('integer') + 1, ).filter( integer=Case( When(integer2=F('integer'), then=2), When(integer2=F('f_plus_1'), then=3), ), ).order_by('pk'), [(3, 4), (2, 2), (3, 4)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_annotation_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f_plus_1=F('integer') + 1, ).filter( integer2=Case( When(f_plus_1=3, then=3), When(f_plus_1=4, then=4), default=1, ), ).order_by('pk'), [(1, 1), (2, 3), (3, 4), (3, 4)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_aggregation_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.group_by_fields).annotate( min=Min('fk_rel__integer'), max=Max('fk_rel__integer'), ).filter( integer2=Case( When(integer=2, then='min'), When(integer=3, then='max'), ), ).order_by('pk'), [(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)], transform=itemgetter('integer', 'integer2', 'min', 'max') ) def test_filter_with_aggregation_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.group_by_fields).annotate( min=Min('fk_rel__integer'), max=Max('fk_rel__integer'), ).filter( integer=Case( When(integer2=F('min'), then=2), When(integer2=F('max'), then=3), ), ).order_by('pk'), [(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)], transform=itemgetter('integer', 'integer2', 'min', 'max') ) def test_filter_with_aggregation_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.group_by_fields).annotate( max=Max('fk_rel__integer'), ).filter( integer=Case( When(max=3, then=2), When(max=4, then=3), ), ).order_by('pk'), [(2, 3, 3), (3, 4, 4), (2, 2, 3), (3, 4, 4), (3, 3, 4)], transform=itemgetter('integer', 'integer2', 'max') ) def test_update(self): CaseTestModel.objects.update( string=Case( When(integer=1, then=Value('one')), When(integer=2, then=Value('two')), default=Value('other'), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')], transform=attrgetter('integer', 'string') ) def test_update_without_default(self): CaseTestModel.objects.update( integer2=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'integer2') ) def test_update_with_expression_as_value(self): CaseTestModel.objects.update( integer=Case( When(integer=1, then=F('integer') + 1), When(integer=2, then=F('integer') + 3), default='integer', ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [('1', 2), ('2', 5), ('3', 3), ('2', 5), ('3', 3), ('3', 3), ('4', 4)], transform=attrgetter('string', 'integer') ) def test_update_with_expression_as_condition(self): CaseTestModel.objects.update( string=Case( When(integer2=F('integer'), then=Value('equal')), When(integer2=F('integer') + 1, then=Value('+1')), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')], transform=attrgetter('integer', 'string') ) def test_update_with_join_in_condition_raise_field_error(self): with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'): CaseTestModel.objects.update( integer=Case( When(integer2=F('o2o_rel__integer') + 1, then=2), When(integer2=F('o2o_rel__integer'), then=3), ), ) def test_update_with_join_in_predicate_raise_field_error(self): with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'): CaseTestModel.objects.update( string=Case( When(o2o_rel__integer=1, then=Value('one')), When(o2o_rel__integer=2, then=Value('two')), When(o2o_rel__integer=3, then=Value('three')), default=Value('other'), ), ) def test_update_big_integer(self): CaseTestModel.objects.update( big_integer=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'big_integer') ) def test_update_binary(self): CaseTestModel.objects.update( binary=Case( When(integer=1, then=b'one'), When(integer=2, then=b'two'), default=b'', ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, b'one'), (2, b'two'), (3, b''), (2, b'two'), (3, b''), (3, b''), (4, b'')], transform=lambda o: (o.integer, bytes(o.binary)) ) def test_update_boolean(self): CaseTestModel.objects.update( boolean=Case( When(integer=1, then=True), When(integer=2, then=True), default=False, ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, True), (2, True), (3, False), (2, True), (3, False), (3, False), (4, False)], transform=attrgetter('integer', 'boolean') ) def test_update_date(self): CaseTestModel.objects.update( date=Case( When(integer=1, then=date(2015, 1, 1)), When(integer=2, then=date(2015, 1, 2)), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [ (1, date(2015, 1, 1)), (2, date(2015, 1, 2)), (3, None), (2, date(2015, 1, 2)), (3, None), (3, None), (4, None) ], transform=attrgetter('integer', 'date') ) def test_update_date_time(self): CaseTestModel.objects.update( date_time=Case( When(integer=1, then=datetime(2015, 1, 1)), When(integer=2, then=datetime(2015, 1, 2)), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [ (1, datetime(2015, 1, 1)), (2, datetime(2015, 1, 2)), (3, None), (2, datetime(2015, 1, 2)), (3, None), (3, None), (4, None) ], transform=attrgetter('integer', 'date_time') ) def test_update_decimal(self): CaseTestModel.objects.update( decimal=Case( When(integer=1, then=Decimal('1.1')), When(integer=2, then=Value(Decimal('2.2'), output_field=DecimalField())), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [ (1, Decimal('1.1')), (2, Decimal('2.2')), (3, None), (2, Decimal('2.2')), (3, None), (3, None), (4, None) ], transform=attrgetter('integer', 'decimal') ) def test_update_duration(self): CaseTestModel.objects.update( duration=Case( When(integer=1, then=timedelta(1)), When(integer=2, then=timedelta(2)), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, timedelta(1)), (2, timedelta(2)), (3, None), (2, timedelta(2)), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'duration') ) def test_update_email(self): CaseTestModel.objects.update( email=Case( When(integer=1, then=Value('[email protected]')), When(integer=2, then=Value('[email protected]')), default=Value(''), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '[email protected]'), (2, '[email protected]'), (3, ''), (2, '[email protected]'), (3, ''), (3, ''), (4, '')], transform=attrgetter('integer', 'email') ) def test_update_file(self): CaseTestModel.objects.update( file=Case( When(integer=1, then=Value('~/1')), When(integer=2, then=Value('~/2')), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')], transform=lambda o: (o.integer, str(o.file)) ) def test_update_file_path(self): CaseTestModel.objects.update( file_path=Case( When(integer=1, then=Value('~/1')), When(integer=2, then=Value('~/2')), default=Value(''), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')], transform=attrgetter('integer', 'file_path') ) def test_update_float(self): CaseTestModel.objects.update( float=Case( When(integer=1, then=1.1), When(integer=2, then=2.2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1.1), (2, 2.2), (3, None), (2, 2.2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'float') ) @unittest.skipUnless(Image, "Pillow not installed") def test_update_image(self): CaseTestModel.objects.update( image=Case( When(integer=1, then=Value('~/1')), When(integer=2, then=Value('~/2')), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')], transform=lambda o: (o.integer, str(o.image)) ) def test_update_generic_ip_address(self): CaseTestModel.objects.update( generic_ip_address=Case( When(integer=1, then=Value('1.1.1.1')), When(integer=2, then=Value('2.2.2.2')), output_field=GenericIPAddressField(), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '1.1.1.1'), (2, '2.2.2.2'), (3, None), (2, '2.2.2.2'), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'generic_ip_address') ) def test_update_null_boolean(self): CaseTestModel.objects.update( null_boolean=Case( When(integer=1, then=True), When(integer=2, then=False), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, True), (2, False), (3, None), (2, False), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'null_boolean') ) def test_update_null_boolean_old(self): CaseTestModel.objects.update( null_boolean_old=Case( When(integer=1, then=True), When(integer=2, then=False), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, True), (2, False), (3, None), (2, False), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'null_boolean_old') ) def test_update_positive_big_integer(self): CaseTestModel.objects.update( positive_big_integer=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'positive_big_integer') ) def test_update_positive_integer(self): CaseTestModel.objects.update( positive_integer=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'positive_integer') ) def test_update_positive_small_integer(self): CaseTestModel.objects.update( positive_small_integer=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'positive_small_integer') ) def test_update_slug(self): CaseTestModel.objects.update( slug=Case( When(integer=1, then=Value('1')), When(integer=2, then=Value('2')), default=Value(''), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')], transform=attrgetter('integer', 'slug') ) def test_update_small_integer(self): CaseTestModel.objects.update( small_integer=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'small_integer') ) def test_update_string(self): CaseTestModel.objects.filter(string__in=['1', '2']).update( string=Case( When(integer=1, then=Value('1')), When(integer=2, then=Value('2')), ), ) self.assertQuerysetEqual( CaseTestModel.objects.filter(string__in=['1', '2']).order_by('pk'), [(1, '1'), (2, '2'), (2, '2')], transform=attrgetter('integer', 'string') ) def test_update_text(self): CaseTestModel.objects.update( text=Case( When(integer=1, then=Value('1')), When(integer=2, then=Value('2')), default=Value(''), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')], transform=attrgetter('integer', 'text') ) def test_update_time(self): CaseTestModel.objects.update( time=Case( When(integer=1, then=time(1)), When(integer=2, then=time(2)), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, time(1)), (2, time(2)), (3, None), (2, time(2)), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'time') ) def test_update_url(self): CaseTestModel.objects.update( url=Case( When(integer=1, then=Value('http://1.example.com/')), When(integer=2, then=Value('http://2.example.com/')), default=Value(''), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [ (1, 'http://1.example.com/'), (2, 'http://2.example.com/'), (3, ''), (2, 'http://2.example.com/'), (3, ''), (3, ''), (4, '') ], transform=attrgetter('integer', 'url') ) def test_update_uuid(self): CaseTestModel.objects.update( uuid=Case( When(integer=1, then=UUID('11111111111111111111111111111111')), When(integer=2, then=UUID('22222222222222222222222222222222')), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [ (1, UUID('11111111111111111111111111111111')), (2, UUID('22222222222222222222222222222222')), (3, None), (2, UUID('22222222222222222222222222222222')), (3, None), (3, None), (4, None), ], transform=attrgetter('integer', 'uuid') ) def test_update_fk(self): obj1, obj2 = CaseTestModel.objects.all()[:2] CaseTestModel.objects.update( fk=Case( When(integer=1, then=obj1.pk), When(integer=2, then=obj2.pk), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, obj1.pk), (2, obj2.pk), (3, None), (2, obj2.pk), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'fk_id') ) def test_lookup_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( test=Case( When(integer__lt=2, then=Value('less than 2')), When(integer__gt=2, then=Value('greater than 2')), default=Value('equal to 2'), ), ).order_by('pk'), [ (1, 'less than 2'), (2, 'equal to 2'), (3, 'greater than 2'), (2, 'equal to 2'), (3, 'greater than 2'), (3, 'greater than 2'), (4, 'greater than 2') ], transform=attrgetter('integer', 'test') ) def test_lookup_different_fields(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( test=Case( When(integer=2, integer2=3, then=Value('when')), default=Value('default'), ), ).order_by('pk'), [ (1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'default'), (3, 4, 'default'), (3, 3, 'default'), (4, 5, 'default') ], transform=attrgetter('integer', 'integer2', 'test') ) def test_combined_q_object(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( test=Case( When(Q(integer=2) | Q(integer2=3), then=Value('when')), default=Value('default'), ), ).order_by('pk'), [ (1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'when'), (3, 4, 'default'), (3, 3, 'when'), (4, 5, 'default') ], transform=attrgetter('integer', 'integer2', 'test') ) def test_order_by_conditional_implicit(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case( When(integer=1, then=2), When(integer=2, then=1), default=3, )).order_by('test', 'pk'), [(2, 1), (2, 1), (1, 2)], transform=attrgetter('integer', 'test') ) def test_order_by_conditional_explicit(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case( When(integer=1, then=2), When(integer=2, then=1), default=3, )).order_by(F('test').asc(), 'pk'), [(2, 1), (2, 1), (1, 2)], transform=attrgetter('integer', 'test') ) def test_join_promotion(self): o = CaseTestModel.objects.create(integer=1, integer2=1, string='1') # Testing that: # 1. There isn't any object on the remote side of the fk_rel # relation. If the query used inner joins, then the join to fk_rel # would remove o from the results. So, in effect we are testing that # we are promoting the fk_rel join to a left outer join here. # 2. The default value of 3 is generated for the case expression. self.assertQuerysetEqual( CaseTestModel.objects.filter(pk=o.pk).annotate( foo=Case( When(fk_rel__pk=1, then=2), default=3, ), ), [(o, 3)], lambda x: (x, x.foo) ) # Now 2 should be generated, as the fk_rel is null. self.assertQuerysetEqual( CaseTestModel.objects.filter(pk=o.pk).annotate( foo=Case( When(fk_rel__isnull=True, then=2), default=3, ), ), [(o, 2)], lambda x: (x, x.foo) ) def test_join_promotion_multiple_annotations(self): o = CaseTestModel.objects.create(integer=1, integer2=1, string='1') # Testing that: # 1. There isn't any object on the remote side of the fk_rel # relation. If the query used inner joins, then the join to fk_rel # would remove o from the results. So, in effect we are testing that # we are promoting the fk_rel join to a left outer join here. # 2. The default value of 3 is generated for the case expression. self.assertQuerysetEqual( CaseTestModel.objects.filter(pk=o.pk).annotate( foo=Case( When(fk_rel__pk=1, then=2), default=3, ), bar=Case( When(fk_rel__pk=1, then=4), default=5, ), ), [(o, 3, 5)], lambda x: (x, x.foo, x.bar) ) # Now 2 should be generated, as the fk_rel is null. self.assertQuerysetEqual( CaseTestModel.objects.filter(pk=o.pk).annotate( foo=Case( When(fk_rel__isnull=True, then=2), default=3, ), bar=Case( When(fk_rel__isnull=True, then=4), default=5, ), ), [(o, 2, 4)], lambda x: (x, x.foo, x.bar) ) def test_m2m_exclude(self): CaseTestModel.objects.create(integer=10, integer2=1, string='1') qs = CaseTestModel.objects.values_list('id', 'integer').annotate( cnt=Sum( Case(When(~Q(fk_rel__integer=1), then=1), default=2), ), ).order_by('integer') # The first o has 2 as its fk_rel__integer=1, thus it hits the # default=2 case. The other ones have 2 as the result as they have 2 # fk_rel objects, except for integer=4 and integer=10 (created above). # The integer=4 case has one integer, thus the result is 1, and # integer=10 doesn't have any and this too generates 1 (instead of 0) # as ~Q() also matches nulls. self.assertQuerysetEqual( qs, [(1, 2), (2, 2), (2, 2), (3, 2), (3, 2), (3, 2), (4, 1), (10, 1)], lambda x: x[1:] ) def test_m2m_reuse(self): CaseTestModel.objects.create(integer=10, integer2=1, string='1') # Need to use values before annotate so that Oracle will not group # by fields it isn't capable of grouping by. qs = CaseTestModel.objects.values_list('id', 'integer').annotate( cnt=Sum( Case(When(~Q(fk_rel__integer=1), then=1), default=2), ), ).annotate( cnt2=Sum( Case(When(~Q(fk_rel__integer=1), then=1), default=2), ), ).order_by('integer') self.assertEqual(str(qs.query).count(' JOIN '), 1) self.assertQuerysetEqual( qs, [(1, 2, 2), (2, 2, 2), (2, 2, 2), (3, 2, 2), (3, 2, 2), (3, 2, 2), (4, 1, 1), (10, 1, 1)], lambda x: x[1:] ) def test_aggregation_empty_cases(self): tests = [ # Empty cases and default. (Case(output_field=IntegerField()), None), # Empty cases and a constant default. (Case(default=Value('empty')), 'empty'), # Empty cases and column in the default. (Case(default=F('url')), ''), ] for case, value in tests: with self.subTest(case=case): self.assertQuerysetEqual( CaseTestModel.objects.values('string').annotate( case=case, integer_sum=Sum('integer'), ).order_by('string'), [ ('1', value, 1), ('2', value, 4), ('3', value, 9), ('4', value, 4), ], transform=itemgetter('string', 'case', 'integer_sum'), ) class CaseDocumentationExamples(TestCase): @classmethod def setUpTestData(cls): Client.objects.create( name='Jane Doe', account_type=Client.REGULAR, registered_on=date.today() - timedelta(days=36), ) Client.objects.create( name='James Smith', account_type=Client.GOLD, registered_on=date.today() - timedelta(days=5), ) Client.objects.create( name='Jack Black', account_type=Client.PLATINUM, registered_on=date.today() - timedelta(days=10 * 365), ) def test_simple_example(self): self.assertQuerysetEqual( Client.objects.annotate( discount=Case( When(account_type=Client.GOLD, then=Value('5%')), When(account_type=Client.PLATINUM, then=Value('10%')), default=Value('0%'), ), ).order_by('pk'), [('Jane Doe', '0%'), ('James Smith', '5%'), ('Jack Black', '10%')], transform=attrgetter('name', 'discount') ) def test_lookup_example(self): a_month_ago = date.today() - timedelta(days=30) a_year_ago = date.today() - timedelta(days=365) self.assertQuerysetEqual( Client.objects.annotate( discount=Case( When(registered_on__lte=a_year_ago, then=Value('10%')), When(registered_on__lte=a_month_ago, then=Value('5%')), default=Value('0%'), ), ).order_by('pk'), [('Jane Doe', '5%'), ('James Smith', '0%'), ('Jack Black', '10%')], transform=attrgetter('name', 'discount') ) def test_conditional_update_example(self): a_month_ago = date.today() - timedelta(days=30) a_year_ago = date.today() - timedelta(days=365) Client.objects.update( account_type=Case( When(registered_on__lte=a_year_ago, then=Value(Client.PLATINUM)), When(registered_on__lte=a_month_ago, then=Value(Client.GOLD)), default=Value(Client.REGULAR), ), ) self.assertQuerysetEqual( Client.objects.all().order_by('pk'), [('Jane Doe', 'G'), ('James Smith', 'R'), ('Jack Black', 'P')], transform=attrgetter('name', 'account_type') ) def test_conditional_aggregation_example(self): Client.objects.create( name='Jean Grey', account_type=Client.REGULAR, registered_on=date.today(), ) Client.objects.create( name='James Bond', account_type=Client.PLATINUM, registered_on=date.today(), ) Client.objects.create( name='Jane Porter', account_type=Client.PLATINUM, registered_on=date.today(), ) self.assertEqual( Client.objects.aggregate( regular=Count('pk', filter=Q(account_type=Client.REGULAR)), gold=Count('pk', filter=Q(account_type=Client.GOLD)), platinum=Count('pk', filter=Q(account_type=Client.PLATINUM)), ), {'regular': 2, 'gold': 1, 'platinum': 3} ) # This was the example before the filter argument was added. self.assertEqual( Client.objects.aggregate( regular=Sum(Case( When(account_type=Client.REGULAR, then=1), )), gold=Sum(Case( When(account_type=Client.GOLD, then=1), )), platinum=Sum(Case( When(account_type=Client.PLATINUM, then=1), )), ), {'regular': 2, 'gold': 1, 'platinum': 3} ) def test_filter_example(self): a_month_ago = date.today() - timedelta(days=30) a_year_ago = date.today() - timedelta(days=365) self.assertQuerysetEqual( Client.objects.filter( registered_on__lte=Case( When(account_type=Client.GOLD, then=a_month_ago), When(account_type=Client.PLATINUM, then=a_year_ago), ), ), [('Jack Black', 'P')], transform=attrgetter('name', 'account_type') ) def test_hash(self): expression_1 = Case( When(account_type__in=[Client.REGULAR, Client.GOLD], then=1), default=2, output_field=IntegerField(), ) expression_2 = Case( When(account_type__in=(Client.REGULAR, Client.GOLD), then=1), default=2, output_field=IntegerField(), ) expression_3 = Case(When(account_type__in=[Client.REGULAR, Client.GOLD], then=1), default=2) expression_4 = Case(When(account_type__in=[Client.PLATINUM, Client.GOLD], then=2), default=1) self.assertEqual(hash(expression_1), hash(expression_2)) self.assertNotEqual(hash(expression_2), hash(expression_3)) self.assertNotEqual(hash(expression_1), hash(expression_4)) self.assertNotEqual(hash(expression_3), hash(expression_4)) class CaseWhenTests(SimpleTestCase): def test_only_when_arguments(self): msg = 'Positional arguments must all be When objects.' with self.assertRaisesMessage(TypeError, msg): Case(When(Q(pk__in=[])), object()) def test_invalid_when_constructor_args(self): msg = ( 'When() supports a Q object, a boolean expression, or lookups as ' 'a condition.' ) with self.assertRaisesMessage(TypeError, msg): When(condition=object()) with self.assertRaisesMessage(TypeError, msg): When(condition=Value(1)) with self.assertRaisesMessage(TypeError, msg): When(Value(1), string='1') with self.assertRaisesMessage(TypeError, msg): When() def test_empty_q_object(self): msg = "An empty Q() can't be used as a When() condition." with self.assertRaisesMessage(ValueError, msg): When(Q(), then=Value(True))
df910860ff9f15d727a29e377a54b18b04c02cbdc0c6a43bad948ee3bd4a1c50
import time from datetime import datetime, timedelta from http import cookies from django.http import HttpResponse from django.test import SimpleTestCase from django.test.utils import freeze_time from django.utils.http import http_date from django.utils.timezone import utc class SetCookieTests(SimpleTestCase): def test_near_expiration(self): """Cookie will expire when a near expiration time is provided.""" response = HttpResponse() # There's a timing weakness in this test; The expected result for # max-age requires that there be a very slight difference between the # evaluated expiration time and the time evaluated in set_cookie(). If # this difference doesn't exist, the cookie time will be 1 second # larger. The sleep guarantees that there will be a time difference. expires = datetime.utcnow() + timedelta(seconds=10) time.sleep(0.001) response.set_cookie('datetime', expires=expires) datetime_cookie = response.cookies['datetime'] self.assertEqual(datetime_cookie['max-age'], 10) def test_aware_expiration(self): """set_cookie() accepts an aware datetime as expiration time.""" response = HttpResponse() expires = (datetime.utcnow() + timedelta(seconds=10)).replace(tzinfo=utc) time.sleep(0.001) response.set_cookie('datetime', expires=expires) datetime_cookie = response.cookies['datetime'] self.assertEqual(datetime_cookie['max-age'], 10) def test_create_cookie_after_deleting_cookie(self): """Setting a cookie after deletion clears the expiry date.""" response = HttpResponse() response.set_cookie('c', 'old-value') self.assertEqual(response.cookies['c']['expires'], '') response.delete_cookie('c') self.assertEqual(response.cookies['c']['expires'], 'Thu, 01 Jan 1970 00:00:00 GMT') response.set_cookie('c', 'new-value') self.assertEqual(response.cookies['c']['expires'], '') def test_far_expiration(self): """Cookie will expire when a distant expiration time is provided.""" response = HttpResponse() response.set_cookie('datetime', expires=datetime(2038, 1, 1, 4, 5, 6)) datetime_cookie = response.cookies['datetime'] self.assertIn( datetime_cookie['expires'], # assertIn accounts for slight time dependency (#23450) ('Fri, 01 Jan 2038 04:05:06 GMT', 'Fri, 01 Jan 2038 04:05:07 GMT') ) def test_max_age_expiration(self): """Cookie will expire if max_age is provided.""" response = HttpResponse() set_cookie_time = time.time() with freeze_time(set_cookie_time): response.set_cookie('max_age', max_age=10) max_age_cookie = response.cookies['max_age'] self.assertEqual(max_age_cookie['max-age'], 10) self.assertEqual(max_age_cookie['expires'], http_date(set_cookie_time + 10)) def test_max_age_int(self): response = HttpResponse() response.set_cookie('max_age', max_age=10.6) self.assertEqual(response.cookies['max_age']['max-age'], 10) def test_httponly_cookie(self): response = HttpResponse() response.set_cookie('example', httponly=True) example_cookie = response.cookies['example'] self.assertIn('; %s' % cookies.Morsel._reserved['httponly'], str(example_cookie)) self.assertIs(example_cookie['httponly'], True) def test_unicode_cookie(self): """HttpResponse.set_cookie() works with Unicode data.""" response = HttpResponse() cookie_value = '清風' response.set_cookie('test', cookie_value) self.assertEqual(response.cookies['test'].value, cookie_value) def test_samesite(self): response = HttpResponse() response.set_cookie('example', samesite='None') self.assertEqual(response.cookies['example']['samesite'], 'None') response.set_cookie('example', samesite='Lax') self.assertEqual(response.cookies['example']['samesite'], 'Lax') response.set_cookie('example', samesite='strict') self.assertEqual(response.cookies['example']['samesite'], 'strict') def test_invalid_samesite(self): msg = 'samesite must be "lax", "none", or "strict".' with self.assertRaisesMessage(ValueError, msg): HttpResponse().set_cookie('example', samesite='invalid') class DeleteCookieTests(SimpleTestCase): def test_default(self): response = HttpResponse() response.delete_cookie('c') cookie = response.cookies['c'] self.assertEqual(cookie['expires'], 'Thu, 01 Jan 1970 00:00:00 GMT') self.assertEqual(cookie['max-age'], 0) self.assertEqual(cookie['path'], '/') self.assertEqual(cookie['secure'], '') self.assertEqual(cookie['domain'], '') self.assertEqual(cookie['samesite'], '') def test_delete_cookie_secure_prefix(self): """ delete_cookie() sets the secure flag if the cookie name starts with __Host- or __Secure- (without that, browsers ignore cookies with those prefixes). """ response = HttpResponse() for prefix in ('Secure', 'Host'): with self.subTest(prefix=prefix): cookie_name = '__%s-c' % prefix response.delete_cookie(cookie_name) self.assertIs(response.cookies[cookie_name]['secure'], True) def test_delete_cookie_secure_samesite_none(self): # delete_cookie() sets the secure flag if samesite='none'. response = HttpResponse() response.delete_cookie('c', samesite='none') self.assertIs(response.cookies['c']['secure'], True) def test_delete_cookie_samesite(self): response = HttpResponse() response.delete_cookie('c', samesite='lax') self.assertEqual(response.cookies['c']['samesite'], 'lax')
28bef90b374d4973887c6f620dc6ae9f3b39901c2c1cce181ce8608f63c04242
import io import os import sys import tempfile from unittest import skipIf from django.core.files.base import ContentFile from django.http import FileResponse from django.test import SimpleTestCase class FileResponseTests(SimpleTestCase): def test_file_from_disk_response(self): response = FileResponse(open(__file__, 'rb')) self.assertEqual(response.headers['Content-Length'], str(os.path.getsize(__file__))) self.assertIn(response.headers['Content-Type'], ['text/x-python', 'text/plain']) self.assertEqual( response.headers['Content-Disposition'], 'inline; filename="test_fileresponse.py"', ) response.close() def test_file_from_buffer_response(self): response = FileResponse(io.BytesIO(b'binary content')) self.assertEqual(response.headers['Content-Length'], '14') self.assertEqual(response.headers['Content-Type'], 'application/octet-stream') self.assertFalse(response.has_header('Content-Disposition')) self.assertEqual(list(response), [b'binary content']) def test_file_from_buffer_unnamed_attachment(self): response = FileResponse(io.BytesIO(b'binary content'), as_attachment=True) self.assertEqual(response.headers['Content-Length'], '14') self.assertEqual(response.headers['Content-Type'], 'application/octet-stream') self.assertEqual(response.headers['Content-Disposition'], 'attachment') self.assertEqual(list(response), [b'binary content']) @skipIf(sys.platform == 'win32', "Named pipes are Unix-only.") def test_file_from_named_pipe_response(self): with tempfile.TemporaryDirectory() as temp_dir: pipe_file = os.path.join(temp_dir, 'named_pipe') os.mkfifo(pipe_file) pipe_for_read = os.open(pipe_file, os.O_RDONLY | os.O_NONBLOCK) with open(pipe_file, 'wb') as pipe_for_write: pipe_for_write.write(b'binary content') response = FileResponse(os.fdopen(pipe_for_read, mode='rb')) self.assertEqual(list(response), [b'binary content']) response.close() self.assertFalse(response.has_header('Content-Length')) def test_file_from_disk_as_attachment(self): response = FileResponse(open(__file__, 'rb'), as_attachment=True) self.assertEqual(response.headers['Content-Length'], str(os.path.getsize(__file__))) self.assertIn(response.headers['Content-Type'], ['text/x-python', 'text/plain']) self.assertEqual( response.headers['Content-Disposition'], 'attachment; filename="test_fileresponse.py"', ) response.close() def test_compressed_response(self): """ If compressed responses are served with the uncompressed Content-Type and a compression Content-Encoding, browsers might automatically uncompress the file, which is most probably not wanted. """ test_tuples = ( ('.tar.gz', 'application/gzip'), ('.tar.bz2', 'application/x-bzip'), ('.tar.xz', 'application/x-xz'), ) for extension, mimetype in test_tuples: with self.subTest(ext=extension): with tempfile.NamedTemporaryFile(suffix=extension) as tmp: response = FileResponse(tmp) self.assertEqual(response.headers['Content-Type'], mimetype) self.assertFalse(response.has_header('Content-Encoding')) def test_unicode_attachment(self): response = FileResponse( ContentFile(b'binary content', name="祝您平安.odt"), as_attachment=True, content_type='application/vnd.oasis.opendocument.text', ) self.assertEqual( response.headers['Content-Type'], 'application/vnd.oasis.opendocument.text', ) self.assertEqual( response.headers['Content-Disposition'], "attachment; filename*=utf-8''%E7%A5%9D%E6%82%A8%E5%B9%B3%E5%AE%89.odt" )
f375dcdaf051fd0b93bd3d12713c5e54a4ddba00652eb6f338d9aaa72535e299
import io from django.conf import settings from django.core.cache import cache from django.http import HttpResponse from django.http.response import HttpResponseBase from django.test import SimpleTestCase UTF8 = 'utf-8' ISO88591 = 'iso-8859-1' class HttpResponseBaseTests(SimpleTestCase): def test_closed(self): r = HttpResponseBase() self.assertIs(r.closed, False) r.close() self.assertIs(r.closed, True) def test_write(self): r = HttpResponseBase() self.assertIs(r.writable(), False) with self.assertRaisesMessage(OSError, 'This HttpResponseBase instance is not writable'): r.write('asdf') with self.assertRaisesMessage(OSError, 'This HttpResponseBase instance is not writable'): r.writelines(['asdf\n', 'qwer\n']) def test_tell(self): r = HttpResponseBase() with self.assertRaisesMessage(OSError, 'This HttpResponseBase instance cannot tell its position'): r.tell() def test_setdefault(self): """ HttpResponseBase.setdefault() should not change an existing header and should be case insensitive. """ r = HttpResponseBase() r.headers['Header'] = 'Value' r.setdefault('header', 'changed') self.assertEqual(r.headers['header'], 'Value') r.setdefault('x-header', 'DefaultValue') self.assertEqual(r.headers['X-Header'], 'DefaultValue') class HttpResponseTests(SimpleTestCase): def test_status_code(self): resp = HttpResponse(status=503) self.assertEqual(resp.status_code, 503) self.assertEqual(resp.reason_phrase, "Service Unavailable") def test_change_status_code(self): resp = HttpResponse() resp.status_code = 503 self.assertEqual(resp.status_code, 503) self.assertEqual(resp.reason_phrase, "Service Unavailable") def test_valid_status_code_string(self): resp = HttpResponse(status='100') self.assertEqual(resp.status_code, 100) resp = HttpResponse(status='404') self.assertEqual(resp.status_code, 404) resp = HttpResponse(status='599') self.assertEqual(resp.status_code, 599) def test_invalid_status_code(self): must_be_integer = 'HTTP status code must be an integer.' must_be_integer_in_range = 'HTTP status code must be an integer from 100 to 599.' with self.assertRaisesMessage(TypeError, must_be_integer): HttpResponse(status=object()) with self.assertRaisesMessage(TypeError, must_be_integer): HttpResponse(status="J'attendrai") with self.assertRaisesMessage(ValueError, must_be_integer_in_range): HttpResponse(status=99) with self.assertRaisesMessage(ValueError, must_be_integer_in_range): HttpResponse(status=600) def test_reason_phrase(self): reason = "I'm an anarchist coffee pot on crack." resp = HttpResponse(status=419, reason=reason) self.assertEqual(resp.status_code, 419) self.assertEqual(resp.reason_phrase, reason) def test_charset_detection(self): """ HttpResponse should parse charset from content_type.""" response = HttpResponse('ok') self.assertEqual(response.charset, settings.DEFAULT_CHARSET) response = HttpResponse(charset=ISO88591) self.assertEqual(response.charset, ISO88591) self.assertEqual(response.headers['Content-Type'], 'text/html; charset=%s' % ISO88591) response = HttpResponse(content_type='text/plain; charset=%s' % UTF8, charset=ISO88591) self.assertEqual(response.charset, ISO88591) response = HttpResponse(content_type='text/plain; charset=%s' % ISO88591) self.assertEqual(response.charset, ISO88591) response = HttpResponse(content_type='text/plain; charset="%s"' % ISO88591) self.assertEqual(response.charset, ISO88591) response = HttpResponse(content_type='text/plain; charset=') self.assertEqual(response.charset, settings.DEFAULT_CHARSET) response = HttpResponse(content_type='text/plain') self.assertEqual(response.charset, settings.DEFAULT_CHARSET) def test_response_content_charset(self): """HttpResponse should encode based on charset.""" content = "Café :)" utf8_content = content.encode(UTF8) iso_content = content.encode(ISO88591) response = HttpResponse(utf8_content) self.assertContains(response, utf8_content) response = HttpResponse(iso_content, content_type='text/plain; charset=%s' % ISO88591) self.assertContains(response, iso_content) response = HttpResponse(iso_content) self.assertContains(response, iso_content) response = HttpResponse(iso_content, content_type='text/plain') self.assertContains(response, iso_content) def test_repr(self): response = HttpResponse(content="Café :)".encode(UTF8), status=201) expected = '<HttpResponse status_code=201, "text/html; charset=utf-8">' self.assertEqual(repr(response), expected) def test_repr_no_content_type(self): response = HttpResponse(status=204) del response.headers['Content-Type'] self.assertEqual(repr(response), '<HttpResponse status_code=204>') def test_wrap_textiowrapper(self): content = "Café :)" r = HttpResponse() with io.TextIOWrapper(r, UTF8) as buf: buf.write(content) self.assertEqual(r.content, content.encode(UTF8)) def test_generator_cache(self): generator = (str(i) for i in range(10)) response = HttpResponse(content=generator) self.assertEqual(response.content, b'0123456789') with self.assertRaises(StopIteration): next(generator) cache.set('my-response-key', response) response = cache.get('my-response-key') self.assertEqual(response.content, b'0123456789')
64aafce922693ab4af29cddcfed44e2d0ee6a8fbe7958d115661cb829dc7959b
import datetime from unittest import mock from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.views import shortcut from django.contrib.sites.models import Site from django.contrib.sites.shortcuts import get_current_site from django.http import Http404, HttpRequest from django.test import TestCase, override_settings from .models import ( Article, Author, FooWithBrokenAbsoluteUrl, FooWithoutUrl, FooWithUrl, ModelWithM2MToSite, ModelWithNullFKToSite, SchemeIncludedURL, Site as MockSite, ) @override_settings(ROOT_URLCONF='contenttypes_tests.urls') class ContentTypesViewsTests(TestCase): @classmethod def setUpTestData(cls): # Don't use the manager to ensure the site exists with pk=1, regardless # of whether or not it already exists. cls.site1 = Site(pk=1, domain='testserver', name='testserver') cls.site1.save() cls.author1 = Author.objects.create(name='Boris') cls.article1 = Article.objects.create( title='Old Article', slug='old_article', author=cls.author1, date_created=datetime.datetime(2001, 1, 1, 21, 22, 23), ) cls.article2 = Article.objects.create( title='Current Article', slug='current_article', author=cls.author1, date_created=datetime.datetime(2007, 9, 17, 21, 22, 23), ) cls.article3 = Article.objects.create( title='Future Article', slug='future_article', author=cls.author1, date_created=datetime.datetime(3000, 1, 1, 21, 22, 23), ) cls.scheme1 = SchemeIncludedURL.objects.create(url='http://test_scheme_included_http/') cls.scheme2 = SchemeIncludedURL.objects.create(url='https://test_scheme_included_https/') cls.scheme3 = SchemeIncludedURL.objects.create(url='//test_default_scheme_kept/') def setUp(self): Site.objects.clear_cache() def test_shortcut_with_absolute_url(self): "Can view a shortcut for an Author object that has a get_absolute_url method" for obj in Author.objects.all(): with self.subTest(obj=obj): short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk) response = self.client.get(short_url) self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(), target_status_code=404) def test_shortcut_with_absolute_url_including_scheme(self): """ Can view a shortcut when object's get_absolute_url returns a full URL the tested URLs are: "http://...", "https://..." and "//..." """ for obj in SchemeIncludedURL.objects.all(): with self.subTest(obj=obj): short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(SchemeIncludedURL).id, obj.pk) response = self.client.get(short_url) self.assertRedirects(response, obj.get_absolute_url(), fetch_redirect_response=False) def test_shortcut_no_absolute_url(self): """ Shortcuts for an object that has no get_absolute_url() method raise 404. """ for obj in Article.objects.all(): with self.subTest(obj=obj): short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk) response = self.client.get(short_url) self.assertEqual(response.status_code, 404) def test_wrong_type_pk(self): short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects') response = self.client.get(short_url) self.assertEqual(response.status_code, 404) def test_shortcut_bad_pk(self): short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242') response = self.client.get(short_url) self.assertEqual(response.status_code, 404) def test_nonint_content_type(self): an_author = Author.objects.all()[0] short_url = '/shortcut/%s/%s/' % ('spam', an_author.pk) response = self.client.get(short_url) self.assertEqual(response.status_code, 404) def test_bad_content_type(self): an_author = Author.objects.all()[0] short_url = '/shortcut/%s/%s/' % (42424242, an_author.pk) response = self.client.get(short_url) self.assertEqual(response.status_code, 404) @override_settings(ROOT_URLCONF='contenttypes_tests.urls') class ContentTypesViewsSiteRelTests(TestCase): def setUp(self): Site.objects.clear_cache() @classmethod def setUpTestData(cls): cls.site_2 = Site.objects.create(domain='example2.com', name='example2.com') cls.site_3 = Site.objects.create(domain='example3.com', name='example3.com') @mock.patch('django.apps.apps.get_model') def test_shortcut_view_with_null_site_fk(self, get_model): """ The shortcut view works if a model's ForeignKey to site is None. """ get_model.side_effect = lambda *args, **kwargs: MockSite if args[0] == 'sites.Site' else ModelWithNullFKToSite obj = ModelWithNullFKToSite.objects.create(title='title') url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(ModelWithNullFKToSite).id, obj.pk) response = self.client.get(url) expected_url = 'http://example.com%s' % obj.get_absolute_url() self.assertRedirects(response, expected_url, fetch_redirect_response=False) @mock.patch('django.apps.apps.get_model') def test_shortcut_view_with_site_m2m(self, get_model): """ When the object has a ManyToManyField to Site, redirect to the current site if it's attached to the object or to the domain of the first site found in the m2m relationship. """ get_model.side_effect = lambda *args, **kwargs: MockSite if args[0] == 'sites.Site' else ModelWithM2MToSite # get_current_site() will lookup a Site object, so these must match the # domains in the MockSite model. MockSite.objects.bulk_create([ MockSite(pk=1, domain='example.com'), MockSite(pk=self.site_2.pk, domain=self.site_2.domain), MockSite(pk=self.site_3.pk, domain=self.site_3.domain), ]) ct = ContentType.objects.get_for_model(ModelWithM2MToSite) site_3_obj = ModelWithM2MToSite.objects.create(title='Not Linked to Current Site') site_3_obj.sites.add(MockSite.objects.get(pk=self.site_3.pk)) expected_url = 'http://%s%s' % (self.site_3.domain, site_3_obj.get_absolute_url()) with self.settings(SITE_ID=self.site_2.pk): # Redirects to the domain of the first Site found in the m2m # relationship (ordering is arbitrary). response = self.client.get('/shortcut/%s/%s/' % (ct.pk, site_3_obj.pk)) self.assertRedirects(response, expected_url, fetch_redirect_response=False) obj_with_sites = ModelWithM2MToSite.objects.create(title='Linked to Current Site') obj_with_sites.sites.set(MockSite.objects.all()) shortcut_url = '/shortcut/%s/%s/' % (ct.pk, obj_with_sites.pk) expected_url = 'http://%s%s' % (self.site_2.domain, obj_with_sites.get_absolute_url()) with self.settings(SITE_ID=self.site_2.pk): # Redirects to the domain of the Site matching the current site's # domain. response = self.client.get(shortcut_url) self.assertRedirects(response, expected_url, fetch_redirect_response=False) with self.settings(SITE_ID=None, ALLOWED_HOSTS=[self.site_2.domain]): # Redirects to the domain of the Site matching the request's host # header. response = self.client.get(shortcut_url, SERVER_NAME=self.site_2.domain) self.assertRedirects(response, expected_url, fetch_redirect_response=False) class ShortcutViewTests(TestCase): def setUp(self): self.request = HttpRequest() self.request.META = {'SERVER_NAME': 'Example.com', 'SERVER_PORT': '80'} @override_settings(ALLOWED_HOSTS=['example.com']) def test_not_dependent_on_sites_app(self): """ The view returns a complete URL regardless of whether the sites framework is installed. """ user_ct = ContentType.objects.get_for_model(FooWithUrl) obj = FooWithUrl.objects.create(name='john') with self.modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'}): response = shortcut(self.request, user_ct.id, obj.id) self.assertEqual( 'http://%s/users/john/' % get_current_site(self.request).domain, response.headers.get('location') ) with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}): response = shortcut(self.request, user_ct.id, obj.id) self.assertEqual('http://Example.com/users/john/', response.headers.get('location')) def test_model_without_get_absolute_url(self): """The view returns 404 when Model.get_absolute_url() isn't defined.""" user_ct = ContentType.objects.get_for_model(FooWithoutUrl) obj = FooWithoutUrl.objects.create(name='john') with self.assertRaises(Http404): shortcut(self.request, user_ct.id, obj.id) def test_model_with_broken_get_absolute_url(self): """ The view doesn't catch an AttributeError raised by Model.get_absolute_url() (#8997). """ user_ct = ContentType.objects.get_for_model(FooWithBrokenAbsoluteUrl) obj = FooWithBrokenAbsoluteUrl.objects.create(name='john') with self.assertRaises(AttributeError): shortcut(self.request, user_ct.id, obj.id)
7372aad6d1d3d4e913dd5ad50066d74ef05491a21e2f262a3d97be56df87007a
""" Regression tests for the Test Client, especially the customized assertions. """ import itertools import os from django.contrib.auth.models import User from django.contrib.auth.signals import user_logged_in, user_logged_out from django.http import HttpResponse from django.template import ( Context, RequestContext, TemplateSyntaxError, engines, ) from django.template.response import SimpleTemplateResponse from django.test import ( Client, SimpleTestCase, TestCase, modify_settings, override_settings, ) from django.test.client import RedirectCycleError, RequestFactory, encode_file from django.test.utils import ContextList from django.urls import NoReverseMatch, reverse from django.utils.translation import gettext_lazy from .models import CustomUser from .views import CustomTestException class TestDataMixin: @classmethod def setUpTestData(cls): cls.u1 = User.objects.create_user(username='testclient', password='password') cls.staff = User.objects.create_user(username='staff', password='password', is_staff=True) @override_settings(ROOT_URLCONF='test_client_regress.urls') class AssertContainsTests(SimpleTestCase): def test_contains(self): "Responses can be inspected for content, including counting repeated substrings" response = self.client.get('/no_template_view/') self.assertNotContains(response, 'never') self.assertContains(response, 'never', 0) self.assertContains(response, 'once') self.assertContains(response, 'once', 1) self.assertContains(response, 'twice') self.assertContains(response, 'twice', 2) try: self.assertContains(response, 'text', status_code=999) except AssertionError as e: self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertContains(response, 'text', status_code=999, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertNotContains(response, 'text', status_code=999) except AssertionError as e: self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e)) try: self.assertNotContains(response, 'once') except AssertionError as e: self.assertIn("Response should not contain 'once'", str(e)) try: self.assertNotContains(response, 'once', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response should not contain 'once'", str(e)) try: self.assertContains(response, 'never', 1) except AssertionError as e: self.assertIn("Found 0 instances of 'never' in response (expected 1)", str(e)) try: self.assertContains(response, 'never', 1, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 0 instances of 'never' in response (expected 1)", str(e)) try: self.assertContains(response, 'once', 0) except AssertionError as e: self.assertIn("Found 1 instances of 'once' in response (expected 0)", str(e)) try: self.assertContains(response, 'once', 0, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 1 instances of 'once' in response (expected 0)", str(e)) try: self.assertContains(response, 'once', 2) except AssertionError as e: self.assertIn("Found 1 instances of 'once' in response (expected 2)", str(e)) try: self.assertContains(response, 'once', 2, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 1 instances of 'once' in response (expected 2)", str(e)) try: self.assertContains(response, 'twice', 1) except AssertionError as e: self.assertIn("Found 2 instances of 'twice' in response (expected 1)", str(e)) try: self.assertContains(response, 'twice', 1, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 2 instances of 'twice' in response (expected 1)", str(e)) try: self.assertContains(response, 'thrice') except AssertionError as e: self.assertIn("Couldn't find 'thrice' in response", str(e)) try: self.assertContains(response, 'thrice', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Couldn't find 'thrice' in response", str(e)) try: self.assertContains(response, 'thrice', 3) except AssertionError as e: self.assertIn("Found 0 instances of 'thrice' in response (expected 3)", str(e)) try: self.assertContains(response, 'thrice', 3, msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Found 0 instances of 'thrice' in response (expected 3)", str(e)) def test_unicode_contains(self): "Unicode characters can be found in template context" # Regression test for #10183 r = self.client.get('/check_unicode/') self.assertContains(r, 'さかき') self.assertContains(r, b'\xe5\xb3\xa0'.decode()) def test_unicode_not_contains(self): "Unicode characters can be searched for, and not found in template context" # Regression test for #10183 r = self.client.get('/check_unicode/') self.assertNotContains(r, 'はたけ') self.assertNotContains(r, b'\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode()) def test_binary_contains(self): r = self.client.get('/check_binary/') self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e') with self.assertRaises(AssertionError): self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e', count=2) def test_binary_not_contains(self): r = self.client.get('/check_binary/') self.assertNotContains(r, b'%ODF-1.4\r\n%\x93\x8c\x8b\x9e') with self.assertRaises(AssertionError): self.assertNotContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e') def test_nontext_contains(self): r = self.client.get('/no_template_view/') self.assertContains(r, gettext_lazy('once')) def test_nontext_not_contains(self): r = self.client.get('/no_template_view/') self.assertNotContains(r, gettext_lazy('never')) def test_assert_contains_renders_template_response(self): """ An unrendered SimpleTemplateResponse may be used in assertContains(). """ template = engines['django'].from_string('Hello') response = SimpleTemplateResponse(template) self.assertContains(response, 'Hello') def test_assert_contains_using_non_template_response(self): """ auto-rendering does not affect responses that aren't instances (or subclasses) of SimpleTemplateResponse. Refs #15826. """ response = HttpResponse('Hello') self.assertContains(response, 'Hello') def test_assert_not_contains_renders_template_response(self): """ An unrendered SimpleTemplateResponse may be used in assertNotContains(). """ template = engines['django'].from_string('Hello') response = SimpleTemplateResponse(template) self.assertNotContains(response, 'Bye') def test_assert_not_contains_using_non_template_response(self): """ auto-rendering does not affect responses that aren't instances (or subclasses) of SimpleTemplateResponse. """ response = HttpResponse('Hello') self.assertNotContains(response, 'Bye') @override_settings(ROOT_URLCONF='test_client_regress.urls') class AssertTemplateUsedTests(TestDataMixin, TestCase): def test_no_context(self): "Template usage assertions work then templates aren't in use" response = self.client.get('/no_template_view/') # The no template case doesn't mess with the template assertions self.assertTemplateNotUsed(response, 'GET Template') try: self.assertTemplateUsed(response, 'GET Template') except AssertionError as e: self.assertIn("No templates used to render the response", str(e)) try: self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: No templates used to render the response", str(e)) msg = 'No templates used to render the response' with self.assertRaisesMessage(AssertionError, msg): self.assertTemplateUsed(response, 'GET Template', count=2) def test_single_context(self): "Template assertions work when there is a single context" response = self.client.get('/post_view/', {}) msg = ( ": Template 'Empty GET Template' was used unexpectedly in " "rendering the response" ) with self.assertRaisesMessage(AssertionError, msg): self.assertTemplateNotUsed(response, 'Empty GET Template') with self.assertRaisesMessage(AssertionError, 'abc' + msg): self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc') msg = ( ": Template 'Empty POST Template' was not a template used to " "render the response. Actual template(s) used: Empty GET Template" ) with self.assertRaisesMessage(AssertionError, msg): self.assertTemplateUsed(response, 'Empty POST Template') with self.assertRaisesMessage(AssertionError, 'abc' + msg): self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc') msg = ( ": Template 'Empty GET Template' was expected to be rendered 2 " "time(s) but was actually rendered 1 time(s)." ) with self.assertRaisesMessage(AssertionError, msg): self.assertTemplateUsed(response, 'Empty GET Template', count=2) with self.assertRaisesMessage(AssertionError, 'abc' + msg): self.assertTemplateUsed(response, 'Empty GET Template', msg_prefix='abc', count=2) def test_multiple_context(self): "Template assertions work when there are multiple contexts" post_data = { 'text': 'Hello World', 'email': '[email protected]', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view_with_template/', post_data) self.assertContains(response, 'POST data OK') msg = "Template '%s' was used unexpectedly in rendering the response" with self.assertRaisesMessage(AssertionError, msg % 'form_view.html'): self.assertTemplateNotUsed(response, "form_view.html") with self.assertRaisesMessage(AssertionError, msg % 'base.html'): self.assertTemplateNotUsed(response, 'base.html') msg = ( "Template 'Valid POST Template' was not a template used to render " "the response. Actual template(s) used: form_view.html, base.html" ) with self.assertRaisesMessage(AssertionError, msg): self.assertTemplateUsed(response, "Valid POST Template") msg = ( "Template 'base.html' was expected to be rendered 2 time(s) but " "was actually rendered 1 time(s)." ) with self.assertRaisesMessage(AssertionError, msg): self.assertTemplateUsed(response, 'base.html', count=2) def test_template_rendered_multiple_times(self): """Template assertions work when a template is rendered multiple times.""" response = self.client.get('/render_template_multiple_times/') self.assertTemplateUsed(response, 'base.html', count=2) @override_settings(ROOT_URLCONF='test_client_regress.urls') class AssertRedirectsTests(SimpleTestCase): def test_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/permanent_redirect_view/') try: self.assertRedirects(response, '/get_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e)) try: self.assertRedirects(response, '/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response didn't redirect as expected: Response code was 301 (expected 302)", str(e)) def test_lost_query(self): "An assertion is raised if the redirect location doesn't preserve GET parameters" response = self.client.get('/redirect_view/', {'var': 'value'}) try: self.assertRedirects(response, '/get_view/') except AssertionError as e: self.assertIn("Response redirected to '/get_view/?var=value', expected '/get_view/'", str(e)) try: self.assertRedirects(response, '/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response redirected to '/get_view/?var=value', expected '/get_view/'", str(e)) def test_incorrect_target(self): "An assertion is raised if the response redirects to another target" response = self.client.get('/permanent_redirect_view/') try: # Should redirect to get_view self.assertRedirects(response, '/some_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e)) def test_target_page(self): "An assertion is raised if the response redirect target cannot be retrieved as expected" response = self.client.get('/double_redirect_view/') try: # The redirect target responds with a 301 code, not 200 self.assertRedirects(response, 'http://testserver/permanent_redirect_view/') except AssertionError as e: self.assertIn( "Couldn't retrieve redirection page '/permanent_redirect_view/': " "response code was 301 (expected 200)", str(e) ) try: # The redirect target responds with a 301 code, not 200 self.assertRedirects(response, 'http://testserver/permanent_redirect_view/', msg_prefix='abc') except AssertionError as e: self.assertIn( "abc: Couldn't retrieve redirection page '/permanent_redirect_view/': " "response code was 301 (expected 200)", str(e) ) def test_redirect_chain(self): "You can follow a redirect chain of multiple redirects" response = self.client.get('/redirects/further/more/', {}, follow=True) self.assertRedirects(response, '/no_template_view/', status_code=302, target_status_code=200) self.assertEqual(len(response.redirect_chain), 1) self.assertEqual(response.redirect_chain[0], ('/no_template_view/', 302)) def test_multiple_redirect_chain(self): "You can follow a redirect chain of multiple redirects" response = self.client.get('/redirects/', {}, follow=True) self.assertRedirects(response, '/no_template_view/', status_code=302, target_status_code=200) self.assertEqual(len(response.redirect_chain), 3) self.assertEqual(response.redirect_chain[0], ('/redirects/further/', 302)) self.assertEqual(response.redirect_chain[1], ('/redirects/further/more/', 302)) self.assertEqual(response.redirect_chain[2], ('/no_template_view/', 302)) def test_redirect_chain_to_non_existent(self): "You can follow a chain to a nonexistent view." response = self.client.get('/redirect_to_non_existent_view2/', {}, follow=True) self.assertRedirects(response, '/non_existent_view/', status_code=302, target_status_code=404) def test_redirect_chain_to_self(self): "Redirections to self are caught and escaped" with self.assertRaises(RedirectCycleError) as context: self.client.get('/redirect_to_self/', {}, follow=True) response = context.exception.last_response # The chain of redirects stops once the cycle is detected. self.assertRedirects(response, '/redirect_to_self/', status_code=302, target_status_code=302) self.assertEqual(len(response.redirect_chain), 2) def test_redirect_to_self_with_changing_query(self): "Redirections don't loop forever even if query is changing" with self.assertRaises(RedirectCycleError): self.client.get('/redirect_to_self_with_changing_query_view/', {'counter': '0'}, follow=True) def test_circular_redirect(self): "Circular redirect chains are caught and escaped" with self.assertRaises(RedirectCycleError) as context: self.client.get('/circular_redirect_1/', {}, follow=True) response = context.exception.last_response # The chain of redirects will get back to the starting point, but stop there. self.assertRedirects(response, '/circular_redirect_2/', status_code=302, target_status_code=302) self.assertEqual(len(response.redirect_chain), 4) def test_redirect_chain_post(self): "A redirect chain will be followed from an initial POST post" response = self.client.post('/redirects/', {'nothing': 'to_send'}, follow=True) self.assertRedirects(response, '/no_template_view/', 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_head(self): "A redirect chain will be followed from an initial HEAD request" response = self.client.head('/redirects/', {'nothing': 'to_send'}, follow=True) self.assertRedirects(response, '/no_template_view/', 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_options(self): "A redirect chain will be followed from an initial OPTIONS request" response = self.client.options('/redirects/', follow=True) self.assertRedirects(response, '/no_template_view/', 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_put(self): "A redirect chain will be followed from an initial PUT request" response = self.client.put('/redirects/', follow=True) self.assertRedirects(response, '/no_template_view/', 302, 200) self.assertEqual(len(response.redirect_chain), 3) def test_redirect_chain_delete(self): "A redirect chain will be followed from an initial DELETE request" response = self.client.delete('/redirects/', follow=True) self.assertRedirects(response, '/no_template_view/', 302, 200) self.assertEqual(len(response.redirect_chain), 3) @modify_settings(ALLOWED_HOSTS={'append': 'otherserver'}) def test_redirect_to_different_host(self): "The test client will preserve scheme, host and port changes" response = self.client.get('/redirect_other_host/', follow=True) self.assertRedirects( response, 'https://otherserver:8443/no_template_view/', status_code=302, target_status_code=200 ) # We can't use is_secure() or get_host() # because response.request is a dictionary, not an HttpRequest self.assertEqual(response.request.get('wsgi.url_scheme'), 'https') self.assertEqual(response.request.get('SERVER_NAME'), 'otherserver') self.assertEqual(response.request.get('SERVER_PORT'), '8443') # assertRedirects() can follow redirect to 'otherserver' too. response = self.client.get('/redirect_other_host/', follow=False) self.assertRedirects( response, 'https://otherserver:8443/no_template_view/', status_code=302, target_status_code=200 ) def test_redirect_chain_on_non_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/get_view/', follow=True) try: self.assertRedirects(response, '/get_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) try: self.assertRedirects(response, '/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) def test_redirect_on_non_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/get_view/') try: self.assertRedirects(response, '/get_view/') except AssertionError as e: self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) try: self.assertRedirects(response, '/get_view/', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e)) def test_redirect_scheme(self): "An assertion is raised if the response doesn't have the scheme specified in expected_url" # For all possible True/False combinations of follow and secure for follow, secure in itertools.product([True, False], repeat=2): # always redirects to https response = self.client.get('/https_redirect_view/', follow=follow, secure=secure) # the goal scheme is https self.assertRedirects(response, 'https://testserver/secure_view/', status_code=302) with self.assertRaises(AssertionError): self.assertRedirects(response, 'http://testserver/secure_view/', status_code=302) def test_redirect_fetch_redirect_response(self): """Preserve extra headers of requests made with django.test.Client.""" methods = ( 'get', 'post', 'head', 'options', 'put', 'patch', 'delete', 'trace', ) for method in methods: with self.subTest(method=method): req_method = getattr(self.client, method) response = req_method( '/redirect_based_on_extra_headers_1/', follow=False, HTTP_REDIRECT='val', ) self.assertRedirects( response, '/redirect_based_on_extra_headers_2/', fetch_redirect_response=True, status_code=302, target_status_code=302, ) @override_settings(ROOT_URLCONF='test_client_regress.urls') class AssertFormErrorTests(SimpleTestCase): def test_unknown_form(self): "An assertion is raised if the form name is unknown" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.') except AssertionError as e: self.assertIn("The form 'wrong_form' was not used to render the response", str(e)) try: self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: The form 'wrong_form' was not used to render the response", str(e)) def test_unknown_field(self): "An assertion is raised if the field name is unknown" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'some_field', 'Some error.') except AssertionError as e: self.assertIn("The form 'form' in context 0 does not contain the field 'some_field'", str(e)) try: self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: The form 'form' in context 0 does not contain the field 'some_field'", str(e)) def test_noerror_field(self): "An assertion is raised if the field doesn't have any errors" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'value', 'Some error.') except AssertionError as e: self.assertIn("The field 'value' on form 'form' in context 0 contains no errors", str(e)) try: self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn("abc: The field 'value' on form 'form' in context 0 contains no errors", str(e)) def test_unknown_error(self): "An assertion is raised if the field doesn't contain the provided error" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'email', 'Some error.') except AssertionError as e: self.assertIn( "The field 'email' on form 'form' in context 0 does not " "contain the error 'Some error.' (actual errors: " "['Enter a valid email address.'])", str(e) ) try: self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn( "abc: The field 'email' on form 'form' in context 0 does " "not contain the error 'Some error.' (actual errors: " "['Enter a valid email address.'])", str(e) ) def test_unknown_nonfield_error(self): """ An assertion is raised if the form's non field errors doesn't contain the provided error. """ post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', None, 'Some error.') except AssertionError as e: self.assertIn( "The form 'form' in context 0 does not contain the non-field " "error 'Some error.' (actual errors: none)", str(e) ) try: self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc') except AssertionError as e: self.assertIn( "abc: The form 'form' in context 0 does not contain the " "non-field error 'Some error.' (actual errors: none)", str(e) ) @override_settings(ROOT_URLCONF='test_client_regress.urls') class AssertFormsetErrorTests(SimpleTestCase): msg_prefixes = [("", {}), ("abc: ", {"msg_prefix": "abc"})] def setUp(self): """Makes response object for testing field and non-field errors""" # For testing field and non-field errors self.response_form_errors = self.getResponse({ 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '2', 'form-0-text': 'Raise non-field error', 'form-0-email': 'not an email address', 'form-0-value': 37, 'form-0-single': 'b', 'form-0-multi': ('b', 'c', 'e'), 'form-1-text': 'Hello World', 'form-1-email': '[email protected]', 'form-1-value': 37, 'form-1-single': 'b', 'form-1-multi': ('b', 'c', 'e'), }) # For testing non-form errors self.response_nonform_errors = self.getResponse({ 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '2', 'form-0-text': 'Hello World', 'form-0-email': '[email protected]', 'form-0-value': 37, 'form-0-single': 'b', 'form-0-multi': ('b', 'c', 'e'), 'form-1-text': 'Hello World', 'form-1-email': '[email protected]', 'form-1-value': 37, 'form-1-single': 'b', 'form-1-multi': ('b', 'c', 'e'), }) def getResponse(self, post_data): response = self.client.post('/formset_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") return response def test_unknown_formset(self): "An assertion is raised if the formset name is unknown" for prefix, kwargs in self.msg_prefixes: msg = prefix + "The formset 'wrong_formset' was not used to render the response" with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError( self.response_form_errors, 'wrong_formset', 0, 'Some_field', 'Some error.', **kwargs ) def test_unknown_field(self): "An assertion is raised if the field name is unknown" for prefix, kwargs in self.msg_prefixes: msg = prefix + "The formset 'my_formset', form 0 in context 0 does not contain the field 'Some_field'" with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError( self.response_form_errors, 'my_formset', 0, 'Some_field', 'Some error.', **kwargs ) def test_no_error_field(self): "An assertion is raised if the field doesn't have any errors" for prefix, kwargs in self.msg_prefixes: msg = prefix + "The field 'value' on formset 'my_formset', form 1 in context 0 contains no errors" with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError(self.response_form_errors, 'my_formset', 1, 'value', 'Some error.', **kwargs) def test_unknown_error(self): "An assertion is raised if the field doesn't contain the specified error" for prefix, kwargs in self.msg_prefixes: msg = prefix + ( "The field 'email' on formset 'my_formset', form 0 " "in context 0 does not contain the error 'Some error.' " "(actual errors: ['Enter a valid email address.'])" ) with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError(self.response_form_errors, 'my_formset', 0, 'email', 'Some error.', **kwargs) def test_field_error(self): "No assertion is raised if the field contains the provided error" error_msg = ['Enter a valid email address.'] for prefix, kwargs in self.msg_prefixes: self.assertFormsetError(self.response_form_errors, 'my_formset', 0, 'email', error_msg, **kwargs) def test_no_nonfield_error(self): "An assertion is raised if the formsets non-field errors doesn't contain any errors." for prefix, kwargs in self.msg_prefixes: msg = prefix + "The formset 'my_formset', form 1 in context 0 does not contain any non-field errors." with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError(self.response_form_errors, 'my_formset', 1, None, 'Some error.', **kwargs) def test_unknown_nonfield_error(self): "An assertion is raised if the formsets non-field errors doesn't contain the provided error." for prefix, kwargs in self.msg_prefixes: msg = prefix + ( "The formset 'my_formset', form 0 in context 0 does not " "contain the non-field error 'Some error.' (actual errors: " "['Non-field error.'])" ) with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError(self.response_form_errors, 'my_formset', 0, None, 'Some error.', **kwargs) def test_nonfield_error(self): "No assertion is raised if the formsets non-field errors contains the provided error." for prefix, kwargs in self.msg_prefixes: self.assertFormsetError(self.response_form_errors, 'my_formset', 0, None, 'Non-field error.', **kwargs) def test_no_nonform_error(self): "An assertion is raised if the formsets non-form errors doesn't contain any errors." for prefix, kwargs in self.msg_prefixes: msg = prefix + "The formset 'my_formset' in context 0 does not contain any non-form errors." with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError(self.response_form_errors, 'my_formset', None, None, 'Some error.', **kwargs) def test_unknown_nonform_error(self): "An assertion is raised if the formsets non-form errors doesn't contain the provided error." for prefix, kwargs in self.msg_prefixes: msg = prefix + ( "The formset 'my_formset' in context 0 does not contain the " "non-form error 'Some error.' (actual errors: ['Forms in a set " "must have distinct email addresses.'])" ) with self.assertRaisesMessage(AssertionError, msg): self.assertFormsetError( self.response_nonform_errors, 'my_formset', None, None, 'Some error.', **kwargs ) def test_nonform_error(self): "No assertion is raised if the formsets non-form errors contains the provided error." msg = 'Forms in a set must have distinct email addresses.' for prefix, kwargs in self.msg_prefixes: self.assertFormsetError(self.response_nonform_errors, 'my_formset', None, None, msg, **kwargs) @override_settings(ROOT_URLCONF='test_client_regress.urls') class LoginTests(TestDataMixin, TestCase): def test_login_different_client(self): "Using a different test client doesn't violate authentication" # Create a second client, and log in. c = Client() login = c.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Get a redirection page with the second client. response = c.get("/login_protected_redirect_view/") # At this points, the self.client isn't logged in. # assertRedirects uses the original client, not the default client. self.assertRedirects(response, "/get_view/") @override_settings( SESSION_ENGINE='test_client_regress.session', ROOT_URLCONF='test_client_regress.urls', ) class SessionEngineTests(TestDataMixin, TestCase): def test_login(self): "A session engine that modifies the session key can be used to log in" login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Try to access a login protected page. response = self.client.get("/login_protected_view/") self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') @override_settings(ROOT_URLCONF='test_client_regress.urls',) class URLEscapingTests(SimpleTestCase): def test_simple_argument_get(self): "Get a view that has a simple string argument" response = self.client.get(reverse('arg_view', args=['Slartibartfast'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Howdy, Slartibartfast') def test_argument_with_space_get(self): "Get a view that has a string argument that requires escaping" response = self.client.get(reverse('arg_view', args=['Arthur Dent'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Hi, Arthur') def test_simple_argument_post(self): "Post for a view that has a simple string argument" response = self.client.post(reverse('arg_view', args=['Slartibartfast'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Howdy, Slartibartfast') def test_argument_with_space_post(self): "Post for a view that has a string argument that requires escaping" response = self.client.post(reverse('arg_view', args=['Arthur Dent'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'Hi, Arthur') @override_settings(ROOT_URLCONF='test_client_regress.urls') class ExceptionTests(TestDataMixin, TestCase): def test_exception_cleared(self): "#5836 - A stale user exception isn't re-raised by the test client." login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') with self.assertRaises(CustomTestException): self.client.get("/staff_only/") # At this point, an exception has been raised, and should be cleared. # This next operation should be successful; if it isn't we have a problem. login = self.client.login(username='staff', password='password') self.assertTrue(login, 'Could not log in') self.client.get("/staff_only/") @override_settings(ROOT_URLCONF='test_client_regress.urls') class TemplateExceptionTests(SimpleTestCase): @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(os.path.dirname(__file__), 'bad_templates')], }]) def test_bad_404_template(self): "Errors found when rendering 404 error templates are re-raised" with self.assertRaises(TemplateSyntaxError): self.client.get("/no_such_view/") # We need two different tests to check URLconf substitution - one to check # it was changed, and another one (without self.urls) to check it was reverted on # teardown. This pair of tests relies upon the alphabetical ordering of test execution. @override_settings(ROOT_URLCONF='test_client_regress.urls') class UrlconfSubstitutionTests(SimpleTestCase): def test_urlconf_was_changed(self): "TestCase can enforce a custom URLconf on a per-test basis" url = reverse('arg_view', args=['somename']) self.assertEqual(url, '/arg_view/somename/') # This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the # name is to ensure alphabetical ordering. class zzUrlconfSubstitutionTests(SimpleTestCase): def test_urlconf_was_reverted(self): """URLconf is reverted to original value after modification in a TestCase This will not find a match as the default ROOT_URLCONF is empty. """ with self.assertRaises(NoReverseMatch): reverse('arg_view', args=['somename']) @override_settings(ROOT_URLCONF='test_client_regress.urls') class ContextTests(TestDataMixin, TestCase): def test_single_context(self): "Context variables can be retrieved from a single context" response = self.client.get("/request_data/", data={'foo': 'whiz'}) self.assertIsInstance(response.context, RequestContext) self.assertIn('get-foo', response.context) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['data'], 'sausage') with self.assertRaisesMessage(KeyError, 'does-not-exist'): response.context['does-not-exist'] def test_inherited_context(self): "Context variables can be retrieved from a list of contexts" response = self.client.get("/request_data_extended/", data={'foo': 'whiz'}) self.assertEqual(response.context.__class__, ContextList) self.assertEqual(len(response.context), 2) self.assertIn('get-foo', response.context) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['data'], 'bacon') with self.assertRaisesMessage(KeyError, 'does-not-exist'): response.context['does-not-exist'] def test_contextlist_keys(self): c1 = Context() c1.update({'hello': 'world', 'goodbye': 'john'}) c1.update({'hello': 'dolly', 'dolly': 'parton'}) c2 = Context() c2.update({'goodbye': 'world', 'python': 'rocks'}) c2.update({'goodbye': 'dolly'}) k = ContextList([c1, c2]) # None, True and False are builtins of BaseContext, and present # in every Context without needing to be added. self.assertEqual({'None', 'True', 'False', 'hello', 'goodbye', 'python', 'dolly'}, k.keys()) def test_contextlist_get(self): c1 = Context({'hello': 'world', 'goodbye': 'john'}) c2 = Context({'goodbye': 'world', 'python': 'rocks'}) k = ContextList([c1, c2]) self.assertEqual(k.get('hello'), 'world') self.assertEqual(k.get('goodbye'), 'john') self.assertEqual(k.get('python'), 'rocks') self.assertEqual(k.get('nonexistent', 'default'), 'default') def test_15368(self): # Need to insert a context processor that assumes certain things about # the request instance. This triggers a bug caused by some ways of # copying RequestContext. with self.settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'test_client_regress.context_processors.special', ], }, }]): response = self.client.get("/request_context_view/") self.assertContains(response, 'Path: /request_context_view/') def test_nested_requests(self): """ response.context is not lost when view call another view. """ response = self.client.get("/nested_view/") self.assertIsInstance(response.context, RequestContext) self.assertEqual(response.context['nested'], 'yes') @override_settings(ROOT_URLCONF='test_client_regress.urls') class SessionTests(TestDataMixin, TestCase): def test_session(self): "The session isn't lost if a user logs in" # The session doesn't exist to start. response = self.client.get('/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'NO') # This request sets a session variable. response = self.client.get('/set_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'set_session') # The session has been modified response = self.client.get('/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'YES') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Session should still contain the modified value response = self.client.get('/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'YES') def test_session_initiated(self): session = self.client.session session['session_var'] = 'foo' session.save() response = self.client.get('/check_session/') self.assertEqual(response.content, b'foo') def test_logout(self): """Logout should work whether the user is logged in or not (#9978).""" self.client.logout() login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') self.client.logout() self.client.logout() def test_logout_with_user(self): """Logout should send user_logged_out signal if user was logged in.""" def listener(*args, **kwargs): listener.executed = True self.assertEqual(kwargs['sender'], User) listener.executed = False user_logged_out.connect(listener) self.client.login(username='testclient', password='password') self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) @override_settings(AUTH_USER_MODEL='test_client_regress.CustomUser') def test_logout_with_custom_user(self): """Logout should send user_logged_out signal if custom user was logged in.""" def listener(*args, **kwargs): self.assertEqual(kwargs['sender'], CustomUser) listener.executed = True listener.executed = False u = CustomUser.custom_objects.create(email='[email protected]') u.set_password('password') u.save() user_logged_out.connect(listener) self.client.login(username='[email protected]', password='password') self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) @override_settings(AUTHENTICATION_BACKENDS=( 'django.contrib.auth.backends.ModelBackend', 'test_client_regress.auth_backends.CustomUserBackend')) def test_logout_with_custom_auth_backend(self): "Request a logout after logging in with custom authentication backend" def listener(*args, **kwargs): self.assertEqual(kwargs['sender'], CustomUser) listener.executed = True listener.executed = False u = CustomUser.custom_objects.create(email='[email protected]') u.set_password('password') u.save() user_logged_out.connect(listener) self.client.login(username='[email protected]', password='password') self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) def test_logout_without_user(self): """Logout should send signal even if user not authenticated.""" def listener(user, *args, **kwargs): listener.user = user listener.executed = True listener.executed = False user_logged_out.connect(listener) self.client.login(username='incorrect', password='password') self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) self.assertIsNone(listener.user) def test_login_with_user(self): """Login should send user_logged_in signal on successful login.""" def listener(*args, **kwargs): listener.executed = True listener.executed = False user_logged_in.connect(listener) self.client.login(username='testclient', password='password') user_logged_out.disconnect(listener) self.assertTrue(listener.executed) def test_login_without_signal(self): """Login shouldn't send signal if user wasn't logged in""" def listener(*args, **kwargs): listener.executed = True listener.executed = False user_logged_in.connect(listener) self.client.login(username='incorrect', password='password') user_logged_in.disconnect(listener) self.assertFalse(listener.executed) @override_settings(ROOT_URLCONF='test_client_regress.urls') class RequestMethodTests(SimpleTestCase): def test_get(self): "Request a view via request method GET" response = self.client.get('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: GET') def test_post(self): "Request a view via request method POST" response = self.client.post('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: POST') def test_head(self): "Request a view via request method HEAD" response = self.client.head('/request_methods/') self.assertEqual(response.status_code, 200) # A HEAD request doesn't return any content. self.assertNotEqual(response.content, b'request method: HEAD') self.assertEqual(response.content, b'') def test_options(self): "Request a view via request method OPTIONS" response = self.client.options('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: OPTIONS') def test_put(self): "Request a view via request method PUT" response = self.client.put('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PUT') def test_delete(self): "Request a view via request method DELETE" response = self.client.delete('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: DELETE') def test_patch(self): "Request a view via request method PATCH" response = self.client.patch('/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PATCH') @override_settings(ROOT_URLCONF='test_client_regress.urls') class RequestMethodStringDataTests(SimpleTestCase): def test_post(self): "Request a view with string data via request method POST" # Regression test for #11371 data = '{"test": "json"}' response = self.client.post('/request_methods/', data=data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: POST') def test_put(self): "Request a view with string data via request method PUT" # Regression test for #11371 data = '{"test": "json"}' response = self.client.put('/request_methods/', data=data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PUT') def test_patch(self): "Request a view with string data via request method PATCH" # Regression test for #17797 data = '{"test": "json"}' response = self.client.patch('/request_methods/', data=data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'request method: PATCH') def test_empty_string_data(self): "Request a view with empty string data via request method GET/POST/HEAD" # Regression test for #21740 response = self.client.get('/body/', data='', content_type='application/json') self.assertEqual(response.content, b'') response = self.client.post('/body/', data='', content_type='application/json') self.assertEqual(response.content, b'') response = self.client.head('/body/', data='', content_type='application/json') self.assertEqual(response.content, b'') def test_json_bytes(self): response = self.client.post('/body/', data=b"{'value': 37}", content_type='application/json') self.assertEqual(response.content, b"{'value': 37}") def test_json(self): response = self.client.get('/json_response/') self.assertEqual(response.json(), {'key': 'value'}) def test_json_charset(self): response = self.client.get('/json_response_latin1/') self.assertEqual(response.charset, 'latin1') self.assertEqual(response.json(), {'a': 'Å'}) def test_json_structured_suffixes(self): valid_types = ( 'application/vnd.api+json', 'application/vnd.api.foo+json', 'application/json; charset=utf-8', 'application/activity+json', 'application/activity+json; charset=utf-8', ) for content_type in valid_types: response = self.client.get('/json_response/', {'content_type': content_type}) self.assertEqual(response.headers['Content-Type'], content_type) self.assertEqual(response.json(), {'key': 'value'}) def test_json_multiple_access(self): response = self.client.get('/json_response/') self.assertIs(response.json(), response.json()) def test_json_wrong_header(self): response = self.client.get('/body/') msg = 'Content-Type header is "text/html; charset=utf-8", not "application/json"' with self.assertRaisesMessage(ValueError, msg): self.assertEqual(response.json(), {'key': 'value'}) @override_settings(ROOT_URLCONF='test_client_regress.urls',) class QueryStringTests(SimpleTestCase): def test_get_like_requests(self): for method_name in ('get', 'head'): # A GET-like request can pass a query string as data (#10571) method = getattr(self.client, method_name) response = method("/request_data/", data={'foo': 'whiz'}) self.assertEqual(response.context['get-foo'], 'whiz') # A GET-like request can pass a query string as part of the URL response = method("/request_data/?foo=whiz") self.assertEqual(response.context['get-foo'], 'whiz') # Data provided in the URL to a GET-like request is overridden by actual form data response = method("/request_data/?foo=whiz", data={'foo': 'bang'}) self.assertEqual(response.context['get-foo'], 'bang') response = method("/request_data/?foo=whiz", data={'bar': 'bang'}) self.assertIsNone(response.context['get-foo']) self.assertEqual(response.context['get-bar'], 'bang') def test_post_like_requests(self): # A POST-like request can pass a query string as data response = self.client.post("/request_data/", data={'foo': 'whiz'}) self.assertIsNone(response.context['get-foo']) self.assertEqual(response.context['post-foo'], 'whiz') # A POST-like request can pass a query string as part of the URL response = self.client.post("/request_data/?foo=whiz") self.assertEqual(response.context['get-foo'], 'whiz') self.assertIsNone(response.context['post-foo']) # POST data provided in the URL augments actual form data response = self.client.post("/request_data/?foo=whiz", data={'foo': 'bang'}) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['post-foo'], 'bang') response = self.client.post("/request_data/?foo=whiz", data={'bar': 'bang'}) self.assertEqual(response.context['get-foo'], 'whiz') self.assertIsNone(response.context['get-bar']) self.assertIsNone(response.context['post-foo']) self.assertEqual(response.context['post-bar'], 'bang') @override_settings(ROOT_URLCONF='test_client_regress.urls') class PayloadEncodingTests(SimpleTestCase): """Regression tests for #10571.""" def test_simple_payload(self): """A simple ASCII-only text can be POSTed.""" text = 'English: mountain pass' response = self.client.post('/parse_encoded_text/', text, content_type='text/plain') self.assertEqual(response.content, text.encode()) def test_utf8_payload(self): """Non-ASCII data encoded as UTF-8 can be POSTed.""" text = 'dog: собака' response = self.client.post('/parse_encoded_text/', text, content_type='text/plain; charset=utf-8') self.assertEqual(response.content, text.encode()) def test_utf16_payload(self): """Non-ASCII data encoded as UTF-16 can be POSTed.""" text = 'dog: собака' response = self.client.post('/parse_encoded_text/', text, content_type='text/plain; charset=utf-16') self.assertEqual(response.content, text.encode('utf-16')) def test_non_utf_payload(self): """Non-ASCII data as a non-UTF based encoding can be POSTed.""" text = 'dog: собака' response = self.client.post('/parse_encoded_text/', text, content_type='text/plain; charset=koi8-r') self.assertEqual(response.content, text.encode('koi8-r')) class DummyFile: def __init__(self, filename): self.name = filename def read(self): return b'TEST_FILE_CONTENT' class UploadedFileEncodingTest(SimpleTestCase): def test_file_encoding(self): encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin')) self.assertEqual(b'--TEST_BOUNDARY', encoded_file[0]) self.assertEqual(b'Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1]) self.assertEqual(b'TEST_FILE_CONTENT', encoded_file[-1]) def test_guesses_content_type_on_file_encoding(self): self.assertEqual(b'Content-Type: application/octet-stream', encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2]) self.assertEqual(b'Content-Type: text/plain', encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2]) self.assertIn(encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2], ( b'Content-Type: application/x-compress', b'Content-Type: application/x-zip', b'Content-Type: application/x-zip-compressed', b'Content-Type: application/zip',)) self.assertEqual(b'Content-Type: application/octet-stream', encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2]) @override_settings(ROOT_URLCONF='test_client_regress.urls',) class RequestHeadersTest(SimpleTestCase): def test_client_headers(self): "A test client can receive custom headers" response = self.client.get("/check_headers/", HTTP_X_ARG_CHECK='Testing 123') self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123") self.assertEqual(response.status_code, 200) def test_client_headers_redirect(self): "Test client headers are preserved through redirects" response = self.client.get("/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123') self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123") self.assertRedirects(response, '/check_headers/', status_code=302, target_status_code=200) @override_settings(ROOT_URLCONF='test_client_regress.urls') class ReadLimitedStreamTest(SimpleTestCase): """ HttpRequest.body, HttpRequest.read(), and HttpRequest.read(BUFFER) have proper LimitedStream behavior. Refs #14753, #15785 """ def test_body_from_empty_request(self): """HttpRequest.body on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/body/").content, b'') def test_read_from_empty_request(self): """HttpRequest.read() on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/read_all/").content, b'') def test_read_numbytes_from_empty_request(self): """HttpRequest.read(LARGE_BUFFER) on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/read_buffer/").content, b'') def test_read_from_nonempty_request(self): """HttpRequest.read() on a test client PUT request with some payload should return that payload.""" payload = b'foobar' self.assertEqual(self.client.put("/read_all/", data=payload, content_type='text/plain').content, payload) def test_read_numbytes_from_nonempty_request(self): """HttpRequest.read(LARGE_BUFFER) on a test client PUT request with some payload should return that payload.""" payload = b'foobar' self.assertEqual(self.client.put("/read_buffer/", data=payload, content_type='text/plain').content, payload) @override_settings(ROOT_URLCONF='test_client_regress.urls') class RequestFactoryStateTest(SimpleTestCase): """Regression tests for #15929.""" # These tests are checking that certain middleware don't change certain # global state. Alternatively, from the point of view of a test, they are # ensuring test isolation behavior. So, unusually, it doesn't make sense to # run the tests individually, and if any are failing it is confusing to run # them with any other set of tests. def common_test_that_should_always_pass(self): request = RequestFactory().get('/') request.session = {} self.assertFalse(hasattr(request, 'user')) def test_request(self): self.common_test_that_should_always_pass() def test_request_after_client(self): # apart from the next line the three tests are identical self.client.get('/') self.common_test_that_should_always_pass() def test_request_after_client_2(self): # This test is executed after the previous one self.common_test_that_should_always_pass() @override_settings(ROOT_URLCONF='test_client_regress.urls') class RequestFactoryEnvironmentTests(SimpleTestCase): """ Regression tests for #8551 and #17067: ensure that environment variables are set correctly in RequestFactory. """ def test_should_set_correct_env_variables(self): request = RequestFactory().get('/path/') self.assertEqual(request.META.get('REMOTE_ADDR'), '127.0.0.1') self.assertEqual(request.META.get('SERVER_NAME'), 'testserver') self.assertEqual(request.META.get('SERVER_PORT'), '80') self.assertEqual(request.META.get('SERVER_PROTOCOL'), 'HTTP/1.1') self.assertEqual(request.META.get('SCRIPT_NAME') + request.META.get('PATH_INFO'), '/path/') def test_cookies(self): factory = RequestFactory() factory.cookies.load('A="B"; C="D"; Path=/; Version=1') request = factory.get('/') self.assertEqual(request.META['HTTP_COOKIE'], 'A="B"; C="D"')
4386f5c910fc91383718e41ced969536740485c4f065118bd591025fa9c021b0
import os from functools import partial from django.conf.urls.i18n import i18n_patterns from django.urls import include, path, re_path from django.utils.translation import gettext_lazy as _ from django.views import defaults, i18n, static from . import views base_dir = os.path.dirname(os.path.abspath(__file__)) media_dir = os.path.join(base_dir, 'media') locale_dir = os.path.join(base_dir, 'locale') urlpatterns = [ path('', views.index_page), # Default views path('nonexistent_url/', partial(defaults.page_not_found, exception=None)), path('server_error/', defaults.server_error), # a view that raises an exception for the debug view path('raises/', views.raises), path('raises400/', views.raises400), path('raises400_bad_request/', views.raises400_bad_request), path('raises403/', views.raises403), path('raises404/', views.raises404), path('raises500/', views.raises500), path('custom_reporter_class_view/', views.custom_reporter_class_view), path('technical404/', views.technical404, name='my404'), path('classbased404/', views.Http404View.as_view()), # i18n views path('i18n/', include('django.conf.urls.i18n')), path('jsi18n/', i18n.JavaScriptCatalog.as_view(packages=['view_tests'])), path('jsi18n/app1/', i18n.JavaScriptCatalog.as_view(packages=['view_tests.app1'])), path('jsi18n/app2/', i18n.JavaScriptCatalog.as_view(packages=['view_tests.app2'])), path('jsi18n/app5/', i18n.JavaScriptCatalog.as_view(packages=['view_tests.app5'])), path('jsi18n_english_translation/', i18n.JavaScriptCatalog.as_view(packages=['view_tests.app0'])), path('jsi18n_multi_packages1/', i18n.JavaScriptCatalog.as_view(packages=['view_tests.app1', 'view_tests.app2'])), path('jsi18n_multi_packages2/', i18n.JavaScriptCatalog.as_view(packages=['view_tests.app3', 'view_tests.app4'])), path('jsi18n_admin/', i18n.JavaScriptCatalog.as_view(packages=['django.contrib.admin', 'view_tests'])), path('jsi18n_template/', views.jsi18n), path('jsi18n_multi_catalogs/', views.jsi18n_multi_catalogs), path('jsoni18n/', i18n.JSONCatalog.as_view(packages=['view_tests'])), # Static views re_path(r'^site_media/(?P<path>.*)$', static.serve, {'document_root': media_dir, 'show_indexes': True}), ] urlpatterns += i18n_patterns( re_path(_(r'^translated/$'), views.index_page, name='i18n_prefixed'), ) urlpatterns += [ path('template_exception/', views.template_exception, name='template_exception'), path( 'raises_template_does_not_exist/<path:path>', views.raises_template_does_not_exist, name='raises_template_does_not_exist' ), path('render_no_template/', views.render_no_template, name='render_no_template'), re_path(r'^test-setlang/(?P<parameter>[^/]+)/$', views.with_parameter, name='with_parameter'), # Patterns to test the technical 404. re_path(r'^regex-post/(?P<pk>[0-9]+)/$', views.index_page, name='regex-post'), path('path-post/<int:pk>/', views.index_page, name='path-post'), ]
dcb11674d4909980266c8aca0b5145ec3094c3a349281677d03a6306e64d976f
import datetime import decimal import logging import sys from pathlib import Path from django.core.exceptions import ( BadRequest, PermissionDenied, SuspiciousOperation, ) from django.http import Http404, HttpResponse, JsonResponse from django.shortcuts import render from django.template import TemplateDoesNotExist from django.urls import get_resolver from django.views import View from django.views.debug import ( ExceptionReporter, SafeExceptionReporterFilter, technical_500_response, ) from django.views.decorators.debug import ( sensitive_post_parameters, sensitive_variables, ) TEMPLATES_PATH = Path(__file__).resolve().parent / 'templates' def index_page(request): """Dummy index page""" return HttpResponse('<html><body>Dummy page</body></html>') def with_parameter(request, parameter): return HttpResponse('ok') def raises(request): # Make sure that a callable that raises an exception in the stack frame's # local vars won't hijack the technical 500 response (#15025). def callable(): raise Exception try: raise Exception except Exception: return technical_500_response(request, *sys.exc_info()) def raises500(request): # We need to inspect the HTML generated by the fancy 500 debug view but # the test client ignores it, so we send it explicitly. try: raise Exception except Exception: return technical_500_response(request, *sys.exc_info()) def raises400(request): raise SuspiciousOperation def raises400_bad_request(request): raise BadRequest('Malformed request syntax') def raises403(request): raise PermissionDenied("Insufficient Permissions") def raises404(request): resolver = get_resolver(None) resolver.resolve('/not-in-urls') def technical404(request): raise Http404("Testing technical 404.") class Http404View(View): def get(self, request): raise Http404("Testing class-based technical 404.") def template_exception(request): return render(request, 'debug/template_exception.html') def jsi18n(request): return render(request, 'jsi18n.html') def jsi18n_multi_catalogs(request): return render(request, 'jsi18n-multi-catalogs.html') def raises_template_does_not_exist(request, path='i_dont_exist.html'): # We need to inspect the HTML generated by the fancy 500 debug view but # the test client ignores it, so we send it explicitly. try: return render(request, path) except TemplateDoesNotExist: return technical_500_response(request, *sys.exc_info()) def render_no_template(request): # If we do not specify a template, we need to make sure the debug # view doesn't blow up. return render(request, [], {}) def send_log(request, exc_info): logger = logging.getLogger('django') # The default logging config has a logging filter to ensure admin emails are # only sent with DEBUG=False, but since someone might choose to remove that # filter, we still want to be able to test the behavior of error emails # with DEBUG=True. So we need to remove the filter temporarily. admin_email_handler = [ h for h in logger.handlers if h.__class__.__name__ == "AdminEmailHandler" ][0] orig_filters = admin_email_handler.filters admin_email_handler.filters = [] admin_email_handler.include_html = True logger.error( 'Internal Server Error: %s', request.path, exc_info=exc_info, extra={'status_code': 500, 'request': request}, ) admin_email_handler.filters = orig_filters def non_sensitive_view(request): # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA try: raise Exception except Exception: exc_info = sys.exc_info() send_log(request, exc_info) return technical_500_response(request, *exc_info) @sensitive_variables('sauce') @sensitive_post_parameters('bacon-key', 'sausage-key') def sensitive_view(request): # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA try: raise Exception except Exception: exc_info = sys.exc_info() send_log(request, exc_info) return technical_500_response(request, *exc_info) @sensitive_variables() @sensitive_post_parameters() def paranoid_view(request): # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA try: raise Exception except Exception: exc_info = sys.exc_info() send_log(request, exc_info) return technical_500_response(request, *exc_info) def sensitive_args_function_caller(request): try: sensitive_args_function(''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])) except Exception: exc_info = sys.exc_info() send_log(request, exc_info) return technical_500_response(request, *exc_info) @sensitive_variables('sauce') def sensitive_args_function(sauce): # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA raise Exception def sensitive_kwargs_function_caller(request): try: sensitive_kwargs_function(''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])) except Exception: exc_info = sys.exc_info() send_log(request, exc_info) return technical_500_response(request, *exc_info) @sensitive_variables('sauce') def sensitive_kwargs_function(sauce=None): # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA raise Exception class UnsafeExceptionReporterFilter(SafeExceptionReporterFilter): """ Ignores all the filtering done by its parent class. """ def get_post_parameters(self, request): return request.POST def get_traceback_frame_variables(self, request, tb_frame): return tb_frame.f_locals.items() @sensitive_variables() @sensitive_post_parameters() def custom_exception_reporter_filter_view(request): # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA request.exception_reporter_filter = UnsafeExceptionReporterFilter() try: raise Exception except Exception: exc_info = sys.exc_info() send_log(request, exc_info) return technical_500_response(request, *exc_info) class CustomExceptionReporter(ExceptionReporter): custom_traceback_text = 'custom traceback text' def get_traceback_html(self): return self.custom_traceback_text class TemplateOverrideExceptionReporter(ExceptionReporter): html_template_path = TEMPLATES_PATH / 'my_technical_500.html' text_template_path = TEMPLATES_PATH / 'my_technical_500.txt' def custom_reporter_class_view(request): request.exception_reporter_class = CustomExceptionReporter try: raise Exception except Exception: exc_info = sys.exc_info() return technical_500_response(request, *exc_info) class Klass: @sensitive_variables('sauce') def method(self, request): # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's # source is displayed in the exception report. cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA try: raise Exception except Exception: exc_info = sys.exc_info() send_log(request, exc_info) return technical_500_response(request, *exc_info) def sensitive_method_view(request): return Klass().method(request) @sensitive_variables('sauce') @sensitive_post_parameters('bacon-key', 'sausage-key') def multivalue_dict_key_error(request): cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA try: request.POST['bar'] except Exception: exc_info = sys.exc_info() send_log(request, exc_info) return technical_500_response(request, *exc_info) def json_response_view(request): return JsonResponse({ 'a': [1, 2, 3], 'foo': {'bar': 'baz'}, # Make sure datetime and Decimal objects would be serialized properly 'timestamp': datetime.datetime(2013, 5, 19, 20), 'value': decimal.Decimal('3.14'), })
97be09ec278e9093b1842482cf6665ab85c9134e0f6a613e022fa645417c3177
# Unit tests for cache framework # Uses whatever cache backend is set in the test settings file. import copy import io import os import pickle import re import shutil import sys import tempfile import threading import time import unittest from pathlib import Path from unittest import mock, skipIf from django.conf import settings from django.core import management, signals from django.core.cache import ( DEFAULT_CACHE_ALIAS, CacheKeyWarning, InvalidCacheKey, cache, caches, ) from django.core.cache.backends.base import InvalidCacheBackendError from django.core.cache.utils import make_template_fragment_key from django.db import close_old_connections, connection, connections from django.http import ( HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse, ) from django.middleware.cache import ( CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware, ) from django.middleware.csrf import CsrfViewMiddleware from django.template import engines from django.template.context_processors import csrf from django.template.response import TemplateResponse from django.test import ( RequestFactory, SimpleTestCase, TestCase, TransactionTestCase, override_settings, ) from django.test.signals import setting_changed from django.utils import timezone, translation from django.utils.cache import ( get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers, ) from django.views.decorators.cache import cache_control, cache_page from .models import Poll, expensive_calculation # functions/classes for complex data type tests def f(): return 42 class C: def m(n): return 24 class Unpicklable: def __getstate__(self): raise pickle.PickleError() def empty_response(request): return HttpResponse() KEY_ERRORS_WITH_MEMCACHED_MSG = ( 'Cache key contains characters that will cause errors if used with ' 'memcached: %r' ) @override_settings(CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', } }) class DummyCacheTests(SimpleTestCase): # The Dummy cache backend doesn't really behave like a test backend, # so it has its own test case. def test_simple(self): "Dummy cache backend ignores cache set calls" cache.set("key", "value") self.assertIsNone(cache.get("key")) def test_add(self): "Add doesn't do anything in dummy cache backend" self.assertIs(cache.add("addkey1", "value"), True) self.assertIs(cache.add("addkey1", "newvalue"), True) self.assertIsNone(cache.get("addkey1")) def test_non_existent(self): "Nonexistent keys aren't found in the dummy cache backend" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): "get_many returns nothing for the dummy cache backend" cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}) self.assertEqual(cache.get_many(['a', 'c', 'd']), {}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {}) def test_get_many_invalid_key(self): msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces' with self.assertWarnsMessage(CacheKeyWarning, msg): cache.get_many(['key with spaces']) def test_delete(self): "Cache deletion is transparently ignored on the dummy cache backend" cache.set_many({'key1': 'spam', 'key2': 'eggs'}) self.assertIsNone(cache.get("key1")) self.assertIs(cache.delete("key1"), False) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_has_key(self): "The has_key method doesn't ever return True for the dummy cache backend" cache.set("hello1", "goodbye1") self.assertIs(cache.has_key("hello1"), False) self.assertIs(cache.has_key("goodbye1"), False) def test_in(self): "The in operator doesn't ever return True for the dummy cache backend" cache.set("hello2", "goodbye2") self.assertNotIn("hello2", cache) self.assertNotIn("goodbye2", cache) def test_incr(self): "Dummy cache values can't be incremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.incr('answer') with self.assertRaises(ValueError): cache.incr('does_not_exist') def test_decr(self): "Dummy cache values can't be decremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.decr('answer') with self.assertRaises(ValueError): cache.decr('does_not_exist') def test_touch(self): """Dummy cache can't do touch().""" self.assertIs(cache.touch('whatever'), False) def test_data_types(self): "All data types are ignored equally by the dummy cache" stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertIsNone(cache.get("stuff")) def test_expiration(self): "Expiration has no effect on the dummy cache" cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) self.assertIs(cache.add("expire2", "newvalue"), True) self.assertIsNone(cache.get("expire2")) self.assertIs(cache.has_key("expire3"), False) def test_unicode(self): "Unicode values are ignored by the dummy cache" stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } for (key, value) in stuff.items(): with self.subTest(key=key): cache.set(key, value) self.assertIsNone(cache.get(key)) def test_set_many(self): "set_many does nothing for the dummy cache backend" self.assertEqual(cache.set_many({'a': 1, 'b': 2}), []) self.assertEqual(cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1'), []) def test_set_many_invalid_key(self): msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces' with self.assertWarnsMessage(CacheKeyWarning, msg): cache.set_many({'key with spaces': 'foo'}) def test_delete_many(self): "delete_many does nothing for the dummy cache backend" cache.delete_many(['a', 'b']) def test_delete_many_invalid_key(self): msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces' with self.assertWarnsMessage(CacheKeyWarning, msg): cache.delete_many({'key with spaces': 'foo'}) def test_clear(self): "clear does nothing for the dummy cache backend" cache.clear() def test_incr_version(self): "Dummy cache versions can't be incremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.incr_version('answer') with self.assertRaises(ValueError): cache.incr_version('does_not_exist') def test_decr_version(self): "Dummy cache versions can't be decremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.decr_version('answer') with self.assertRaises(ValueError): cache.decr_version('does_not_exist') def test_get_or_set(self): self.assertEqual(cache.get_or_set('mykey', 'default'), 'default') self.assertIsNone(cache.get_or_set('mykey', None)) def test_get_or_set_callable(self): def my_callable(): return 'default' self.assertEqual(cache.get_or_set('mykey', my_callable), 'default') self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default') def custom_key_func(key, key_prefix, version): "A customized cache key function" return 'CUSTOM-' + '-'.join([key_prefix, str(version), key]) _caches_setting_base = { 'default': {}, 'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())}, 'v2': {'VERSION': 2}, 'custom_key': {'KEY_FUNCTION': custom_key_func}, 'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'}, 'cull': {'OPTIONS': {'MAX_ENTRIES': 30}}, 'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}}, } def caches_setting_for_tests(base=None, exclude=None, **params): # `base` is used to pull in the memcached config from the original settings, # `exclude` is a set of cache names denoting which `_caches_setting_base` keys # should be omitted. # `params` are test specific overrides and `_caches_settings_base` is the # base config for the tests. # This results in the following search order: # params -> _caches_setting_base -> base base = base or {} exclude = exclude or set() setting = {k: base.copy() for k in _caches_setting_base if k not in exclude} for key, cache_params in setting.items(): cache_params.update(_caches_setting_base[key]) cache_params.update(params) return setting class BaseCacheTests: # A common set of tests to apply to all cache backends factory = RequestFactory() def tearDown(self): cache.clear() def test_simple(self): # Simple cache set/get works cache.set("key", "value") self.assertEqual(cache.get("key"), "value") def test_default_used_when_none_is_set(self): """If None is cached, get() returns it instead of the default.""" cache.set('key_default_none', None) self.assertIsNone(cache.get('key_default_none', default='default')) def test_add(self): # A key can be added to a cache self.assertIs(cache.add("addkey1", "value"), True) self.assertIs(cache.add("addkey1", "newvalue"), False) self.assertEqual(cache.get("addkey1"), "value") def test_prefix(self): # Test for same cache key conflicts between shared backend cache.set('somekey', 'value') # should not be set in the prefixed cache self.assertIs(caches['prefix'].has_key('somekey'), False) caches['prefix'].set('somekey', 'value2') self.assertEqual(cache.get('somekey'), 'value') self.assertEqual(caches['prefix'].get('somekey'), 'value2') def test_non_existent(self): """Nonexistent cache keys return as None/default.""" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): # Multiple cache keys can be returned using get_many cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}) self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'}) self.assertEqual(cache.get_many(iter(['a', 'b', 'e'])), {'a': 'a', 'b': 'b'}) def test_delete(self): # Cache keys can be deleted cache.set_many({'key1': 'spam', 'key2': 'eggs'}) self.assertEqual(cache.get("key1"), "spam") self.assertIs(cache.delete("key1"), True) self.assertIsNone(cache.get("key1")) self.assertEqual(cache.get("key2"), "eggs") def test_delete_nonexistent(self): self.assertIs(cache.delete('nonexistent_key'), False) def test_has_key(self): # The cache can be inspected for cache keys cache.set("hello1", "goodbye1") self.assertIs(cache.has_key("hello1"), True) self.assertIs(cache.has_key("goodbye1"), False) cache.set("no_expiry", "here", None) self.assertIs(cache.has_key("no_expiry"), True) def test_in(self): # The in operator can be used to inspect cache contents cache.set("hello2", "goodbye2") self.assertIn("hello2", cache) self.assertNotIn("goodbye2", cache) def test_incr(self): # Cache values can be incremented cache.set('answer', 41) self.assertEqual(cache.incr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.incr('answer', 10), 52) self.assertEqual(cache.get('answer'), 52) self.assertEqual(cache.incr('answer', -10), 42) with self.assertRaises(ValueError): cache.incr('does_not_exist') def test_decr(self): # Cache values can be decremented cache.set('answer', 43) self.assertEqual(cache.decr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.decr('answer', 10), 32) self.assertEqual(cache.get('answer'), 32) self.assertEqual(cache.decr('answer', -10), 42) with self.assertRaises(ValueError): cache.decr('does_not_exist') def test_close(self): self.assertTrue(hasattr(cache, 'close')) cache.close() def test_data_types(self): # Many different data types can be cached stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertEqual(cache.get("stuff"), stuff) def test_cache_read_for_model_instance(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() my_poll = Poll.objects.create(question="Well?") self.assertEqual(Poll.objects.count(), 1) pub_date = my_poll.pub_date cache.set('question', my_poll) cached_poll = cache.get('question') self.assertEqual(cached_poll.pub_date, pub_date) # We only want the default expensive calculation run once self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_write_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache write expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) self.assertEqual(expensive_calculation.num_runs, 1) cache.set('deferred_queryset', defer_qs) # cache set should not re-evaluate default functions self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_read_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) cache.set('deferred_queryset', defer_qs) self.assertEqual(expensive_calculation.num_runs, 1) runs_before_cache_read = expensive_calculation.num_runs cache.get('deferred_queryset') # We only want the default expensive calculation run on creation and set self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read) def test_expiration(self): # Cache values can be set to expire cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) self.assertIs(cache.add("expire2", "newvalue"), True) self.assertEqual(cache.get("expire2"), "newvalue") self.assertIs(cache.has_key("expire3"), False) def test_touch(self): # cache.touch() updates the timeout. cache.set('expire1', 'very quickly', timeout=1) self.assertIs(cache.touch('expire1', timeout=4), True) time.sleep(2) self.assertIs(cache.has_key('expire1'), True) time.sleep(3) self.assertIs(cache.has_key('expire1'), False) # cache.touch() works without the timeout argument. cache.set('expire1', 'very quickly', timeout=1) self.assertIs(cache.touch('expire1'), True) time.sleep(2) self.assertIs(cache.has_key('expire1'), True) self.assertIs(cache.touch('nonexistent'), False) def test_unicode(self): # Unicode values can be cached stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } # Test `set` for (key, value) in stuff.items(): with self.subTest(key=key): cache.set(key, value) self.assertEqual(cache.get(key), value) # Test `add` for (key, value) in stuff.items(): with self.subTest(key=key): self.assertIs(cache.delete(key), True) self.assertIs(cache.add(key, value), True) self.assertEqual(cache.get(key), value) # Test `set_many` for (key, value) in stuff.items(): self.assertIs(cache.delete(key), True) cache.set_many(stuff) for (key, value) in stuff.items(): with self.subTest(key=key): self.assertEqual(cache.get(key), value) def test_binary_string(self): # Binary strings should be cacheable from zlib import compress, decompress value = 'value_to_be_compressed' compressed_value = compress(value.encode()) # Test set cache.set('binary1', compressed_value) compressed_result = cache.get('binary1') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test add self.assertIs(cache.add('binary1-add', compressed_value), True) compressed_result = cache.get('binary1-add') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test set_many cache.set_many({'binary1-set_many': compressed_value}) compressed_result = cache.get('binary1-set_many') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) def test_set_many(self): # Multiple keys can be set using set_many cache.set_many({"key1": "spam", "key2": "eggs"}) self.assertEqual(cache.get("key1"), "spam") self.assertEqual(cache.get("key2"), "eggs") def test_set_many_returns_empty_list_on_success(self): """set_many() returns an empty list when all keys are inserted.""" failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'}) self.assertEqual(failing_keys, []) def test_set_many_expiration(self): # set_many takes a second ``timeout`` parameter cache.set_many({"key1": "spam", "key2": "eggs"}, 1) time.sleep(2) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_delete_many(self): # Multiple keys can be deleted using delete_many cache.set_many({'key1': 'spam', 'key2': 'eggs', 'key3': 'ham'}) cache.delete_many(["key1", "key2"]) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) self.assertEqual(cache.get("key3"), "ham") def test_clear(self): # The cache can be emptied using clear cache.set_many({'key1': 'spam', 'key2': 'eggs'}) cache.clear() self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_long_timeout(self): """ Follow memcached's convention where a timeout greater than 30 days is treated as an absolute expiration timestamp instead of a relative offset (#12399). """ cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second self.assertEqual(cache.get('key1'), 'eggs') self.assertIs(cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1), True) self.assertEqual(cache.get('key2'), 'ham') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') def test_forever_timeout(self): """ Passing in None into timeout results in a value that is cached forever """ cache.set('key1', 'eggs', None) self.assertEqual(cache.get('key1'), 'eggs') self.assertIs(cache.add('key2', 'ham', None), True) self.assertEqual(cache.get('key2'), 'ham') self.assertIs(cache.add('key1', 'new eggs', None), False) self.assertEqual(cache.get('key1'), 'eggs') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') cache.set('key5', 'belgian fries', timeout=1) self.assertIs(cache.touch('key5', timeout=None), True) time.sleep(2) self.assertEqual(cache.get('key5'), 'belgian fries') def test_zero_timeout(self): """ Passing in zero into timeout results in a value that is not cached """ cache.set('key1', 'eggs', 0) self.assertIsNone(cache.get('key1')) self.assertIs(cache.add('key2', 'ham', 0), True) self.assertIsNone(cache.get('key2')) cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0) self.assertIsNone(cache.get('key3')) self.assertIsNone(cache.get('key4')) cache.set('key5', 'belgian fries', timeout=5) self.assertIs(cache.touch('key5', timeout=0), True) self.assertIsNone(cache.get('key5')) def test_float_timeout(self): # Make sure a timeout given as a float doesn't crash anything. cache.set("key1", "spam", 100.2) self.assertEqual(cache.get("key1"), "spam") def _perform_cull_test(self, cull_cache_name, initial_count, final_count): try: cull_cache = caches[cull_cache_name] except InvalidCacheBackendError: self.skipTest("Culling isn't implemented.") # Create initial cache key entries. This will overflow the cache, # causing a cull. for i in range(1, initial_count): cull_cache.set('cull%d' % i, 'value', 1000) count = 0 # Count how many keys are left in the cache. for i in range(1, initial_count): if cull_cache.has_key('cull%d' % i): count += 1 self.assertEqual(count, final_count) def test_cull(self): self._perform_cull_test('cull', 50, 29) def test_zero_cull(self): self._perform_cull_test('zero_cull', 50, 19) def test_cull_delete_when_store_empty(self): try: cull_cache = caches['cull'] except InvalidCacheBackendError: self.skipTest("Culling isn't implemented.") old_max_entries = cull_cache._max_entries # Force _cull to delete on first cached record. cull_cache._max_entries = -1 try: cull_cache.set('force_cull_delete', 'value', 1000) self.assertIs(cull_cache.has_key('force_cull_delete'), True) finally: cull_cache._max_entries = old_max_entries def _perform_invalid_key_test(self, key, expected_warning): """ All the builtin backends should warn (except memcached that should error) on keys that would be refused by memcached. This encourages portable caching code without making it too difficult to use production backends with more liberal key rules. Refs #6447. """ # mimic custom ``make_key`` method being defined since the default will # never show the below warnings def func(key, *args): return key old_func = cache.key_func cache.key_func = func tests = [ ('add', [key, 1]), ('get', [key]), ('set', [key, 1]), ('incr', [key]), ('decr', [key]), ('touch', [key]), ('delete', [key]), ('get_many', [[key, 'b']]), ('set_many', [{key: 1, 'b': 2}]), ('delete_many', [{key: 1, 'b': 2}]), ] try: for operation, args in tests: with self.subTest(operation=operation): with self.assertWarns(CacheKeyWarning) as cm: getattr(cache, operation)(*args) self.assertEqual(str(cm.warning), expected_warning) finally: cache.key_func = old_func def test_invalid_key_characters(self): # memcached doesn't allow whitespace or control characters in keys. key = 'key with spaces and 清' self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key) def test_invalid_key_length(self): # memcached limits key length to 250. key = ('a' * 250) + '清' expected_warning = ( 'Cache key will cause errors if used with memcached: ' '%r (longer than %s)' % (key, 250) ) self._perform_invalid_key_test(key, expected_warning) def test_cache_versioning_get_set(self): # set, using default version = 1 cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertEqual(cache.get('answer1', version=1), 42) self.assertIsNone(cache.get('answer1', version=2)) self.assertIsNone(caches['v2'].get('answer1')) self.assertEqual(caches['v2'].get('answer1', version=1), 42) self.assertIsNone(caches['v2'].get('answer1', version=2)) # set, default version = 1, but manually override version = 2 cache.set('answer2', 42, version=2) self.assertIsNone(cache.get('answer2')) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) # v2 set, using default version = 2 caches['v2'].set('answer3', 42) self.assertIsNone(cache.get('answer3')) self.assertIsNone(cache.get('answer3', version=1)) self.assertEqual(cache.get('answer3', version=2), 42) self.assertEqual(caches['v2'].get('answer3'), 42) self.assertIsNone(caches['v2'].get('answer3', version=1)) self.assertEqual(caches['v2'].get('answer3', version=2), 42) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set('answer4', 42, version=1) self.assertEqual(cache.get('answer4'), 42) self.assertEqual(cache.get('answer4', version=1), 42) self.assertIsNone(cache.get('answer4', version=2)) self.assertIsNone(caches['v2'].get('answer4')) self.assertEqual(caches['v2'].get('answer4', version=1), 42) self.assertIsNone(caches['v2'].get('answer4', version=2)) def test_cache_versioning_add(self): # add, default version = 1, but manually override version = 2 self.assertIs(cache.add('answer1', 42, version=2), True) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) self.assertIs(cache.add('answer1', 37, version=2), False) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) self.assertIs(cache.add('answer1', 37, version=1), True) self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) # v2 add, using default version = 2 self.assertIs(caches['v2'].add('answer2', 42), True) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) self.assertIs(caches['v2'].add('answer2', 37), False) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) self.assertIs(caches['v2'].add('answer2', 37, version=1), True) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) # v2 add, default version = 2, but manually override version = 1 self.assertIs(caches['v2'].add('answer3', 42, version=1), True) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) self.assertIs(caches['v2'].add('answer3', 37, version=1), False) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) self.assertIs(caches['v2'].add('answer3', 37), True) self.assertEqual(cache.get('answer3', version=1), 42) self.assertEqual(cache.get('answer3', version=2), 37) def test_cache_versioning_has_key(self): cache.set('answer1', 42) # has_key self.assertIs(cache.has_key('answer1'), True) self.assertIs(cache.has_key('answer1', version=1), True) self.assertIs(cache.has_key('answer1', version=2), False) self.assertIs(caches['v2'].has_key('answer1'), False) self.assertIs(caches['v2'].has_key('answer1', version=1), True) self.assertIs(caches['v2'].has_key('answer1', version=2), False) def test_cache_versioning_delete(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) self.assertIs(cache.delete('answer1'), True) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) self.assertIs(cache.delete('answer2', version=2), True) self.assertEqual(cache.get('answer2', version=1), 37) self.assertIsNone(cache.get('answer2', version=2)) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) self.assertIs(caches['v2'].delete('answer3'), True) self.assertEqual(cache.get('answer3', version=1), 37) self.assertIsNone(cache.get('answer3', version=2)) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) self.assertIs(caches['v2'].delete('answer4', version=1), True) self.assertIsNone(cache.get('answer4', version=1)) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_incr_decr(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) self.assertEqual(cache.incr('answer1'), 38) self.assertEqual(cache.get('answer1', version=1), 38) self.assertEqual(cache.get('answer1', version=2), 42) self.assertEqual(cache.decr('answer1'), 37) self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) self.assertEqual(cache.incr('answer2', version=2), 43) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 43) self.assertEqual(cache.decr('answer2', version=2), 42) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) self.assertEqual(caches['v2'].incr('answer3'), 43) self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 43) self.assertEqual(caches['v2'].decr('answer3'), 42) self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 42) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) self.assertEqual(caches['v2'].incr('answer4', version=1), 38) self.assertEqual(cache.get('answer4', version=1), 38) self.assertEqual(cache.get('answer4', version=2), 42) self.assertEqual(caches['v2'].decr('answer4', version=1), 37) self.assertEqual(cache.get('answer4', version=1), 37) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_get_set_many(self): # set, using default version = 1 cache.set_many({'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {}) # set, default version = 1, but manually override version = 2 cache.set_many({'ford2': 37, 'arthur2': 42}, version=2) self.assertEqual(cache.get_many(['ford2', 'arthur2']), {}) self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {}) self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) # v2 set, using default version = 2 caches['v2'].set_many({'ford3': 37, 'arthur3': 42}) self.assertEqual(cache.get_many(['ford3', 'arthur3']), {}) self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {}) self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1) self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42}) self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {}) def test_incr_version(self): cache.set('answer', 42, version=2) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertEqual(cache.get('answer', version=2), 42) self.assertIsNone(cache.get('answer', version=3)) self.assertEqual(cache.incr_version('answer', version=2), 3) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertIsNone(cache.get('answer', version=2)) self.assertEqual(cache.get('answer', version=3), 42) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertIsNone(caches['v2'].get('answer2', version=3)) self.assertEqual(caches['v2'].incr_version('answer2'), 3) self.assertIsNone(caches['v2'].get('answer2')) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertIsNone(caches['v2'].get('answer2', version=2)) self.assertEqual(caches['v2'].get('answer2', version=3), 42) with self.assertRaises(ValueError): cache.incr_version('does_not_exist') def test_decr_version(self): cache.set('answer', 42, version=2) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertEqual(cache.get('answer', version=2), 42) self.assertEqual(cache.decr_version('answer', version=2), 1) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.get('answer', version=1), 42) self.assertIsNone(cache.get('answer', version=2)) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertEqual(caches['v2'].decr_version('answer2'), 1) self.assertIsNone(caches['v2'].get('answer2')) self.assertEqual(caches['v2'].get('answer2', version=1), 42) self.assertIsNone(caches['v2'].get('answer2', version=2)) with self.assertRaises(ValueError): cache.decr_version('does_not_exist', version=2) def test_custom_key_func(self): # Two caches with different key functions aren't visible to each other cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertIsNone(caches['custom_key'].get('answer1')) self.assertIsNone(caches['custom_key2'].get('answer1')) caches['custom_key'].set('answer2', 42) self.assertIsNone(cache.get('answer2')) self.assertEqual(caches['custom_key'].get('answer2'), 42) self.assertEqual(caches['custom_key2'].get('answer2'), 42) def test_cache_write_unpicklable_object(self): fetch_middleware = FetchFromCacheMiddleware(empty_response) fetch_middleware.cache = cache request = self.factory.get('/cache/test') request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request) self.assertIsNone(get_cache_data) content = 'Testing cookie serialization.' def get_response(req): response = HttpResponse(content) response.set_cookie('foo', 'bar') return response update_middleware = UpdateCacheMiddleware(get_response) update_middleware.cache = cache response = update_middleware(request) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) UpdateCacheMiddleware(lambda req: get_cache_data)(request) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) def test_add_fail_on_pickleerror(self): # Shouldn't fail silently if trying to cache an unpicklable type. with self.assertRaises(pickle.PickleError): cache.add('unpicklable', Unpicklable()) def test_set_fail_on_pickleerror(self): with self.assertRaises(pickle.PickleError): cache.set('unpicklable', Unpicklable()) def test_get_or_set(self): self.assertIsNone(cache.get('projector')) self.assertEqual(cache.get_or_set('projector', 42), 42) self.assertEqual(cache.get('projector'), 42) self.assertIsNone(cache.get_or_set('null', None)) def test_get_or_set_callable(self): def my_callable(): return 'value' self.assertEqual(cache.get_or_set('mykey', my_callable), 'value') self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value') def test_get_or_set_callable_returning_none(self): self.assertIsNone(cache.get_or_set('mykey', lambda: None)) # Previous get_or_set() doesn't store None in the cache. self.assertEqual(cache.get('mykey', 'default'), 'default') def test_get_or_set_version(self): msg = "get_or_set() missing 1 required positional argument: 'default'" self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979) with self.assertRaisesMessage(TypeError, msg): cache.get_or_set('brian') with self.assertRaisesMessage(TypeError, msg): cache.get_or_set('brian', version=1) self.assertIsNone(cache.get('brian', version=1)) self.assertEqual(cache.get_or_set('brian', 42, version=1), 42) self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979) self.assertIsNone(cache.get('brian', version=3)) def test_get_or_set_racing(self): with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add: # Simulate cache.add() failing to add a value. In that case, the # default value should be returned. cache_add.return_value = False self.assertEqual(cache.get_or_set('key', 'default'), 'default') @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.db.DatabaseCache', # Spaces are used in the table name to ensure quoting/escaping is working LOCATION='test cache table' )) class DBCacheTests(BaseCacheTests, TransactionTestCase): available_apps = ['cache'] def setUp(self): # The super calls needs to happen first for the settings override. super().setUp() self.create_table() def tearDown(self): # The super call needs to happen first because it uses the database. super().tearDown() self.drop_table() def create_table(self): management.call_command('createcachetable', verbosity=0) def drop_table(self): with connection.cursor() as cursor: table_name = connection.ops.quote_name('test cache table') cursor.execute('DROP TABLE %s' % table_name) def test_get_many_num_queries(self): cache.set_many({'a': 1, 'b': 2}) cache.set('expired', 'expired', 0.01) with self.assertNumQueries(1): self.assertEqual(cache.get_many(['a', 'b']), {'a': 1, 'b': 2}) time.sleep(0.02) with self.assertNumQueries(2): self.assertEqual(cache.get_many(['a', 'b', 'expired']), {'a': 1, 'b': 2}) def test_delete_many_num_queries(self): cache.set_many({'a': 1, 'b': 2, 'c': 3}) with self.assertNumQueries(1): cache.delete_many(['a', 'b', 'c']) def test_zero_cull(self): self._perform_cull_test('zero_cull', 50, 18) def test_second_call_doesnt_crash(self): out = io.StringIO() management.call_command('createcachetable', stdout=out) self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES)) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.db.DatabaseCache', # Use another table name to avoid the 'table already exists' message. LOCATION='createcachetable_dry_run_mode' )) def test_createcachetable_dry_run_mode(self): out = io.StringIO() management.call_command('createcachetable', dry_run=True, stdout=out) output = out.getvalue() self.assertTrue(output.startswith("CREATE TABLE")) def test_createcachetable_with_table_argument(self): """ Delete and recreate cache table with legacy behavior (explicitly specifying the table name). """ self.drop_table() out = io.StringIO() management.call_command( 'createcachetable', 'test cache table', verbosity=2, stdout=out, ) self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n") @override_settings(USE_TZ=True) class DBCacheWithTimeZoneTests(DBCacheTests): pass class DBCacheRouter: """A router that puts the cache table on the 'other' database.""" def db_for_read(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' return None def db_for_write(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' return None def allow_migrate(self, db, app_label, **hints): if app_label == 'django_cache': return db == 'other' return None @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', 'LOCATION': 'my_cache_table', }, }, ) class CreateCacheTableForDBCacheTests(TestCase): databases = {'default', 'other'} @override_settings(DATABASE_ROUTERS=[DBCacheRouter()]) def test_createcachetable_observes_database_router(self): # cache table should not be created on 'default' with self.assertNumQueries(0, using='default'): management.call_command('createcachetable', database='default', verbosity=0) # cache table should be created on 'other' # Queries: # 1: check table doesn't already exist # 2: create savepoint (if transactional DDL is supported) # 3: create the table # 4: create the index # 5: release savepoint (if transactional DDL is supported) num = 5 if connections['other'].features.can_rollback_ddl else 3 with self.assertNumQueries(num, using='other'): management.call_command('createcachetable', database='other', verbosity=0) class PicklingSideEffect: def __init__(self, cache): self.cache = cache self.locked = False def __getstate__(self): self.locked = self.cache._lock.locked() return {} limit_locmem_entries = override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.locmem.LocMemCache', OPTIONS={'MAX_ENTRIES': 9}, )) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.locmem.LocMemCache', )) class LocMemCacheTests(BaseCacheTests, TestCase): def setUp(self): super().setUp() # LocMem requires a hack to make the other caches # share a data store with the 'normal' cache. caches['prefix']._cache = cache._cache caches['prefix']._expire_info = cache._expire_info caches['v2']._cache = cache._cache caches['v2']._expire_info = cache._expire_info caches['custom_key']._cache = cache._cache caches['custom_key']._expire_info = cache._expire_info caches['custom_key2']._cache = cache._cache caches['custom_key2']._expire_info = cache._expire_info @override_settings(CACHES={ 'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other' }, }) def test_multiple_caches(self): "Multiple locmem caches are isolated" cache.set('value', 42) self.assertEqual(caches['default'].get('value'), 42) self.assertIsNone(caches['other'].get('value')) def test_locking_on_pickle(self): """#20613/#18541 -- Ensures pickling is done outside of the lock.""" bad_obj = PicklingSideEffect(cache) cache.set('set', bad_obj) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") self.assertIs(cache.add('add', bad_obj), True) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") def test_incr_decr_timeout(self): """incr/decr does not modify expiry time (matches memcached behavior)""" key = 'value' _key = cache.make_key(key) cache.set(key, 1, timeout=cache.default_timeout * 10) expire = cache._expire_info[_key] self.assertEqual(cache.incr(key), 2) self.assertEqual(expire, cache._expire_info[_key]) self.assertEqual(cache.decr(key), 1) self.assertEqual(expire, cache._expire_info[_key]) @limit_locmem_entries def test_lru_get(self): """get() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key) cache.set(9, 9, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key) for key in range(6, 9): self.assertIsNone(cache.get(key)) self.assertEqual(cache.get(9), 9) @limit_locmem_entries def test_lru_set(self): """set() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(3, 9): cache.set(key, key, timeout=None) cache.set(9, 9, timeout=None) for key in range(3, 10): self.assertEqual(cache.get(key), key) for key in range(3): self.assertIsNone(cache.get(key)) @limit_locmem_entries def test_lru_incr(self): """incr() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(6): self.assertEqual(cache.incr(key), key + 1) cache.set(9, 9, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key + 1) for key in range(6, 9): self.assertIsNone(cache.get(key)) self.assertEqual(cache.get(9), 9) # memcached backend isn't guaranteed to be available. # To check the memcached backend, the test settings file will # need to contain at least one cache backend setting that points at # your memcache server. configured_caches = {} for _cache_params in settings.CACHES.values(): configured_caches[_cache_params['BACKEND']] = _cache_params MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache') PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache') PyMemcacheCache_params = configured_caches.get('django.core.cache.backends.memcached.PyMemcacheCache') # The memcached backends don't support cull-related options like `MAX_ENTRIES`. memcached_excluded_caches = {'cull', 'zero_cull'} class BaseMemcachedTests(BaseCacheTests): # By default it's assumed that the client doesn't clean up connections # properly, in which case the backend must do so after each request. should_disconnect_on_close = True def test_location_multiple_servers(self): locations = [ ['server1.tld', 'server2:11211'], 'server1.tld;server2:11211', 'server1.tld,server2:11211', ] for location in locations: with self.subTest(location=location): params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location} with self.settings(CACHES={'default': params}): self.assertEqual(cache._servers, ['server1.tld', 'server2:11211']) def _perform_invalid_key_test(self, key, expected_warning): """ While other backends merely warn, memcached should raise for an invalid key. """ msg = expected_warning.replace(key, cache.make_key(key)) tests = [ ('add', [key, 1]), ('get', [key]), ('set', [key, 1]), ('incr', [key]), ('decr', [key]), ('touch', [key]), ('delete', [key]), ('get_many', [[key, 'b']]), ('set_many', [{key: 1, 'b': 2}]), ('delete_many', [{key: 1, 'b': 2}]), ] for operation, args in tests: with self.subTest(operation=operation): with self.assertRaises(InvalidCacheKey) as cm: getattr(cache, operation)(*args) self.assertEqual(str(cm.exception), msg) def test_default_never_expiring_timeout(self): # Regression test for #22845 with self.settings(CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, TIMEOUT=None)): cache.set('infinite_foo', 'bar') self.assertEqual(cache.get('infinite_foo'), 'bar') def test_default_far_future_timeout(self): # Regression test for #22845 with self.settings(CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, # 60*60*24*365, 1 year TIMEOUT=31536000)): cache.set('future_foo', 'bar') self.assertEqual(cache.get('future_foo'), 'bar') def test_memcached_deletes_key_on_failed_set(self): # By default memcached allows objects up to 1MB. For the cache_db session # backend to always use the current session, memcached needs to delete # the old key if it fails to set. # pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can # tell from a quick check of its source code. This is falling back to # the default value exposed by python-memcached on my system. max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576) cache.set('small_value', 'a') self.assertEqual(cache.get('small_value'), 'a') large_value = 'a' * (max_value_length + 1) try: cache.set('small_value', large_value) except Exception: # Some clients (e.g. pylibmc) raise when the value is too large, # while others (e.g. python-memcached) intentionally return True # indicating success. This test is primarily checking that the key # was deleted, so the return/exception behavior for the set() # itself is not important. pass # small_value should be deleted, or set if configured to accept larger values value = cache.get('small_value') self.assertTrue(value is None or value == large_value) def test_close(self): # For clients that don't manage their connections properly, the # connection is closed when the request is complete. signals.request_finished.disconnect(close_old_connections) try: with mock.patch.object(cache._class, 'disconnect_all', autospec=True) as mock_disconnect: signals.request_finished.send(self.__class__) self.assertIs(mock_disconnect.called, self.should_disconnect_on_close) finally: signals.request_finished.connect(close_old_connections) def test_set_many_returns_failing_keys(self): def fail_set_multi(mapping, *args, **kwargs): return mapping.keys() with mock.patch.object(cache._class, 'set_multi', side_effect=fail_set_multi): failing_keys = cache.set_many({'key': 'value'}) self.assertEqual(failing_keys, ['key']) @unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured") @override_settings(CACHES=caches_setting_for_tests( base=MemcachedCache_params, exclude=memcached_excluded_caches, )) class MemcachedCacheTests(BaseMemcachedTests, TestCase): base_params = MemcachedCache_params def test_memcached_uses_highest_pickle_version(self): # Regression test for #19810 for cache_key in settings.CACHES: with self.subTest(cache_key=cache_key): self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL) @override_settings(CACHES=caches_setting_for_tests( base=MemcachedCache_params, exclude=memcached_excluded_caches, OPTIONS={'server_max_value_length': 9999}, )) def test_memcached_options(self): self.assertEqual(cache._cache.server_max_value_length, 9999) def test_default_used_when_none_is_set(self): """ python-memcached doesn't support default in get() so this test overrides the one in BaseCacheTests. """ cache.set('key_default_none', None) self.assertEqual(cache.get('key_default_none', default='default'), 'default') @unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured") @override_settings(CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, )) class PyLibMCCacheTests(BaseMemcachedTests, TestCase): base_params = PyLibMCCache_params # libmemcached manages its own connections. should_disconnect_on_close = False @override_settings(CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, OPTIONS={ 'binary': True, 'behaviors': {'tcp_nodelay': True}, }, )) def test_pylibmc_options(self): self.assertTrue(cache._cache.binary) self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True)) def test_pylibmc_client_servers(self): backend = self.base_params['BACKEND'] tests = [ ('unix:/run/memcached/socket', '/run/memcached/socket'), ('/run/memcached/socket', '/run/memcached/socket'), ('localhost', 'localhost'), ('localhost:11211', 'localhost:11211'), ('[::1]', '[::1]'), ('[::1]:11211', '[::1]:11211'), ('127.0.0.1', '127.0.0.1'), ('127.0.0.1:11211', '127.0.0.1:11211'), ] for location, expected in tests: settings = {'default': {'BACKEND': backend, 'LOCATION': location}} with self.subTest(location), self.settings(CACHES=settings): self.assertEqual(cache.client_servers, [expected]) @unittest.skipUnless(PyMemcacheCache_params, 'PyMemcacheCache backend not configured') @override_settings(CACHES=caches_setting_for_tests( base=PyMemcacheCache_params, exclude=memcached_excluded_caches, )) class PyMemcacheCacheTests(BaseMemcachedTests, TestCase): base_params = PyMemcacheCache_params def test_pymemcache_highest_pickle_version(self): self.assertEqual( cache._cache.default_kwargs['serde']._serialize_func.keywords['pickle_version'], pickle.HIGHEST_PROTOCOL, ) for cache_key in settings.CACHES: for client_key, client in caches[cache_key]._cache.clients.items(): with self.subTest(cache_key=cache_key, server=client_key): self.assertEqual( client.serde._serialize_func.keywords['pickle_version'], pickle.HIGHEST_PROTOCOL, ) @override_settings(CACHES=caches_setting_for_tests( base=PyMemcacheCache_params, exclude=memcached_excluded_caches, OPTIONS={'no_delay': True}, )) def test_pymemcache_options(self): self.assertIs(cache._cache.default_kwargs['no_delay'], True) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.filebased.FileBasedCache', )) class FileBasedCacheTests(BaseCacheTests, TestCase): """ Specific test cases for the file-based cache. """ def setUp(self): super().setUp() self.dirname = self.mkdtemp() # Caches location cannot be modified through override_settings / modify_settings, # hence settings are manipulated directly here and the setting_changed signal # is triggered manually. for cache_params in settings.CACHES.values(): cache_params['LOCATION'] = self.dirname setting_changed.send(self.__class__, setting='CACHES', enter=False) def tearDown(self): super().tearDown() # Call parent first, as cache.clear() may recreate cache base directory shutil.rmtree(self.dirname) def mkdtemp(self): return tempfile.mkdtemp() def test_ignores_non_cache_files(self): fname = os.path.join(self.dirname, 'not-a-cache-file') with open(fname, 'w'): os.utime(fname, None) cache.clear() self.assertTrue(os.path.exists(fname), 'Expected cache.clear to ignore non cache files') os.remove(fname) def test_clear_does_not_remove_cache_dir(self): cache.clear() self.assertTrue(os.path.exists(self.dirname), 'Expected cache.clear to keep the cache dir') def test_creates_cache_dir_if_nonexistent(self): os.rmdir(self.dirname) cache.set('foo', 'bar') self.assertTrue(os.path.exists(self.dirname)) def test_get_ignores_enoent(self): cache.set('foo', 'bar') os.unlink(cache._key_to_file('foo')) # Returns the default instead of erroring. self.assertEqual(cache.get('foo', 'baz'), 'baz') @skipIf( sys.platform == 'win32', 'Windows only partially supports umasks and chmod.', ) def test_cache_dir_permissions(self): os.rmdir(self.dirname) dir_path = Path(self.dirname) / 'nested' / 'filebasedcache' for cache_params in settings.CACHES.values(): cache_params['LOCATION'] = dir_path setting_changed.send(self.__class__, setting='CACHES', enter=False) cache.set('foo', 'bar') self.assertIs(dir_path.exists(), True) tests = [ dir_path, dir_path.parent, dir_path.parent.parent, ] for directory in tests: with self.subTest(directory=directory): dir_mode = directory.stat().st_mode & 0o777 self.assertEqual(dir_mode, 0o700) def test_get_does_not_ignore_non_filenotfound_exceptions(self): with mock.patch('builtins.open', side_effect=OSError): with self.assertRaises(OSError): cache.get('foo') def test_empty_cache_file_considered_expired(self): cache_file = cache._key_to_file('foo') with open(cache_file, 'wb') as fh: fh.write(b'') with open(cache_file, 'rb') as fh: self.assertIs(cache._is_expired(fh), True) class FileBasedCachePathLibTests(FileBasedCacheTests): def mkdtemp(self): tmp_dir = super().mkdtemp() return Path(tmp_dir) @override_settings(CACHES={ 'default': { 'BACKEND': 'cache.liberal_backend.CacheClass', }, }) class CustomCacheKeyValidationTests(SimpleTestCase): """ Tests for the ability to mixin a custom ``validate_key`` method to a custom cache backend that otherwise inherits from a builtin backend, and override the default key validation. Refs #6447. """ def test_custom_key_validation(self): # this key is both longer than 250 characters, and has spaces key = 'some key with spaces' * 15 val = 'a value' cache.set(key, val) self.assertEqual(cache.get(key), val) @override_settings( CACHES={ 'default': { 'BACKEND': 'cache.closeable_cache.CacheClass', } } ) class CacheClosingTests(SimpleTestCase): def test_close(self): self.assertFalse(cache.closed) signals.request_finished.send(self.__class__) self.assertTrue(cache.closed) DEFAULT_MEMORY_CACHES_SETTINGS = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'unique-snowflake', } } NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS) NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None class DefaultNonExpiringCacheKeyTests(SimpleTestCase): """ Settings having Cache arguments with a TIMEOUT=None create Caches that will set non-expiring keys. """ def setUp(self): # The 5 minute (300 seconds) default expiration time for keys is # defined in the implementation of the initializer method of the # BaseCache type. self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout def tearDown(self): del(self.DEFAULT_TIMEOUT) def test_default_expiration_time_for_keys_is_5_minutes(self): """The default expiration time of a cache key is 5 minutes. This value is defined in django.core.cache.backends.base.BaseCache.__init__(). """ self.assertEqual(300, self.DEFAULT_TIMEOUT) def test_caches_with_unset_timeout_has_correct_default_timeout(self): """Caches that have the TIMEOUT parameter undefined in the default settings will use the default 5 minute timeout. """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self): """Memory caches that have the TIMEOUT parameter set to `None` in the default settings with have `None` as the default timeout. This means "no timeout". """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertIsNone(cache.default_timeout) self.assertIsNone(cache.get_backend_timeout()) @override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS) def test_caches_with_unset_timeout_set_expiring_key(self): """Memory caches that have the TIMEOUT parameter unset will set cache keys having the default 5 minute timeout. """ key = "my-key" value = "my-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNotNone(cache._expire_info[cache_key]) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_set_non_expiring_key(self): """Memory caches that have the TIMEOUT parameter set to `None` will set a non expiring key by default. """ key = "another-key" value = "another-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNone(cache._expire_info[cache_key]) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ALLOWED_HOSTS=['.example.com'], ) class CacheUtils(SimpleTestCase): """TestCase for django.utils.cache functions.""" host = 'www.example.com' path = '/cache/test/' factory = RequestFactory(HTTP_HOST=host) def tearDown(self): cache.clear() def _get_request_cache(self, method='GET', query_string=None, update_cache=None): request = self._get_request(self.host, self.path, method, query_string=query_string) request._cache_update_cache = True if not update_cache else update_cache return request def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('*', ('Accept-Language', 'Cookie'), '*'), ('Accept-Language, Cookie', ('*',), '*'), ) for initial_vary, newheaders, resulting_vary in headers: with self.subTest(initial_vary=initial_vary, newheaders=newheaders): response = HttpResponse() if initial_vary is not None: response.headers['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response.headers['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) # A specified key_prefix is taken into account. key_prefix = 'localprefix' learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' 'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e' ) def test_cache_key_varies_by_url(self): """ get_cache_key keys differ by fully-qualified URL instead of path """ request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com') learn_cache_key(request1, HttpResponse()) request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com') learn_cache_key(request2, HttpResponse()) self.assertNotEqual(get_cache_key(request1), get_cache_key(request2)) def test_learn_cache_key(self): request = self.factory.head(self.path) response = HttpResponse() response.headers['Vary'] = 'Pony' # Make sure that the Vary header is added to the key hash learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_patch_cache_control(self): tests = ( # Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts (None, {'private': True}, {'private'}), ('', {'private': True}, {'private'}), # no-cache. ('', {'no_cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}), ('', {'no-cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}), ('no-cache=Set-Cookie', {'no_cache': True}, {'no-cache'}), ('no-cache=Set-Cookie,no-cache=Link', {'no_cache': True}, {'no-cache'}), ('no-cache=Set-Cookie', {'no_cache': 'Link'}, {'no-cache=Set-Cookie', 'no-cache=Link'}), ( 'no-cache=Set-Cookie,no-cache=Link', {'no_cache': 'Custom'}, {'no-cache=Set-Cookie', 'no-cache=Link', 'no-cache=Custom'}, ), # Test whether private/public attributes are mutually exclusive ('private', {'private': True}, {'private'}), ('private', {'public': True}, {'public'}), ('public', {'public': True}, {'public'}), ('public', {'private': True}, {'private'}), ('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}), ('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}), ('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}), ) cc_delim_re = re.compile(r'\s*,\s*') for initial_cc, newheaders, expected_cc in tests: with self.subTest(initial_cc=initial_cc, newheaders=newheaders): response = HttpResponse() if initial_cc is not None: response.headers['Cache-Control'] = initial_cc patch_cache_control(response, **newheaders) parts = set(cc_delim_re.split(response.headers['Cache-Control'])) self.assertEqual(parts, expected_cc) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix', }, }, ) class PrefixedCacheUtils(CacheUtils): pass @override_settings( CACHE_MIDDLEWARE_SECONDS=60, CACHE_MIDDLEWARE_KEY_PREFIX='test', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, ) class CacheHEADTest(SimpleTestCase): path = '/cache/test/' factory = RequestFactory() def tearDown(self): cache.clear() def _set_cache(self, request, msg): return UpdateCacheMiddleware(lambda req: HttpResponse(msg))(request) def test_head_caches_correctly(self): test_content = 'test content' request = self.factory.head(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) def test_head_with_cached_get(self): test_content = 'test content' request = self.factory.get(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, LANGUAGES=[ ('en', 'English'), ('es', 'Spanish'), ], ) class CacheI18nTest(SimpleTestCase): path = '/cache/test/' factory = RequestFactory() def tearDown(self): cache.clear() @override_settings(USE_I18N=True, USE_TZ=False) def test_cache_key_i18n_translation(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") key2 = get_cache_key(request) self.assertEqual(key, key2) def check_accept_language_vary(self, accept_language, vary, reference_key): request = self.factory.get(self.path) request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response.headers['Vary'] = vary key = learn_cache_key(request, response) key2 = get_cache_key(request) self.assertEqual(key, reference_key) self.assertEqual(key2, reference_key) @override_settings(USE_I18N=True, USE_TZ=False) def test_cache_key_i18n_translation_accept_language(self): lang = translation.get_language() self.assertEqual(lang, 'en') request = self.factory.get(self.path) request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response.headers['Vary'] = 'accept-encoding' key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") self.check_accept_language_vary( 'en-us', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'en-US', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'en-US,en;q=0.8', 'accept-encoding, accept-language, cookie', key ) self.check_accept_language_vary( 'en-US,en;q=0.8,ko;q=0.6', 'accept-language, cookie, accept-encoding', key ) self.check_accept_language_vary( 'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ', 'accept-encoding, cookie, accept-language', key ) self.check_accept_language_vary( 'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4', 'accept-language, accept-encoding, cookie', key ) self.check_accept_language_vary( 'ko;q=1.0,en;q=0.5', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'ko, en', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'ko-KR, en-US', 'accept-encoding, accept-language, cookie', key ) @override_settings(USE_I18N=False, USE_TZ=True) def test_cache_key_i18n_timezone(self): request = self.factory.get(self.path) tz = timezone.get_current_timezone_name() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active") key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False) def test_cache_key_no_i18n(self): request = self.factory.get(self.path) lang = translation.get_language() tz = timezone.get_current_timezone_name() response = HttpResponse() key = learn_cache_key(request, response) self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active") self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active") @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, USE_I18N=True, ) def test_middleware(self): def set_cache(request, lang, msg): def get_response(req): return HttpResponse(msg) translation.activate(lang) return UpdateCacheMiddleware(get_response)(request) # cache with non empty request.GET request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request) # first access, cache must return None self.assertIsNone(get_cache_data) content = 'Check for cache with QUERY_STRING' def get_response(req): return HttpResponse(content) UpdateCacheMiddleware(get_response)(request) get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request) # cache must return content self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) # different QUERY_STRING, cache must be empty request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request) self.assertIsNone(get_cache_data) # i18n tests en_message = "Hello world!" es_message = "Hola mundo!" request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'en', en_message) get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request) # The cache can be recovered self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, en_message.encode()) # change the session language and set content request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'es', es_message) # change again the language translation.activate('en') # retrieve the content from cache get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request) self.assertEqual(get_cache_data.content, en_message.encode()) # change again the language translation.activate('es') get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request) self.assertEqual(get_cache_data.content, es_message.encode()) # reset the language translation.deactivate() @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, ) def test_middleware_doesnt_cache_streaming_response(self): request = self.factory.get(self.path) get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request) self.assertIsNone(get_cache_data) def get_stream_response(req): return StreamingHttpResponse(['Check for cache with streaming content.']) UpdateCacheMiddleware(get_stream_response)(request) get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request) self.assertIsNone(get_cache_data) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix' }, }, ) class PrefixedCacheI18nTest(CacheI18nTest): pass def hello_world_view(request, value): return HttpResponse('Hello World %s' % value) def csrf_view(request): return HttpResponse(csrf(request)['csrf_token']) @override_settings( CACHE_MIDDLEWARE_ALIAS='other', CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix', CACHE_MIDDLEWARE_SECONDS=30, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other', 'TIMEOUT': '1', }, }, ) class CacheMiddlewareTest(SimpleTestCase): factory = RequestFactory() def setUp(self): self.default_cache = caches['default'] self.other_cache = caches['other'] def tearDown(self): self.default_cache.clear() self.other_cache.clear() super().tearDown() def test_constructor(self): """ Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as Middleware vs. usage of CacheMiddleware as view decorator and setting attributes appropriately. """ # If only one argument is passed in construction, it's being used as # middleware. middleware = CacheMiddleware(empty_response) # Now test object attributes against values defined in setUp above self.assertEqual(middleware.cache_timeout, 30) self.assertEqual(middleware.key_prefix, 'middlewareprefix') self.assertEqual(middleware.cache_alias, 'other') self.assertEqual(middleware.cache, self.other_cache) # If more arguments are being passed in construction, it's being used # as a decorator. First, test with "defaults": as_view_decorator = CacheMiddleware(empty_response, cache_alias=None, key_prefix=None) self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30 self.assertEqual(as_view_decorator.key_prefix, '') # Value of DEFAULT_CACHE_ALIAS from django.core.cache self.assertEqual(as_view_decorator.cache_alias, 'default') self.assertEqual(as_view_decorator.cache, self.default_cache) # Next, test with custom values: as_view_decorator_with_custom = CacheMiddleware( hello_world_view, cache_timeout=60, cache_alias='other', key_prefix='foo' ) self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60) self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo') self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other') self.assertEqual(as_view_decorator_with_custom.cache, self.other_cache) def test_update_cache_middleware_constructor(self): middleware = UpdateCacheMiddleware(empty_response) self.assertEqual(middleware.cache_timeout, 30) self.assertIsNone(middleware.page_timeout) self.assertEqual(middleware.key_prefix, 'middlewareprefix') self.assertEqual(middleware.cache_alias, 'other') self.assertEqual(middleware.cache, self.other_cache) def test_fetch_cache_middleware_constructor(self): middleware = FetchFromCacheMiddleware(empty_response) self.assertEqual(middleware.key_prefix, 'middlewareprefix') self.assertEqual(middleware.cache_alias, 'other') self.assertEqual(middleware.cache, self.other_cache) def test_middleware(self): middleware = CacheMiddleware(hello_world_view) prefix_middleware = CacheMiddleware(hello_world_view, key_prefix='prefix1') timeout_middleware = CacheMiddleware(hello_world_view, cache_timeout=1) request = self.factory.get('/view/') # Put the request through the request middleware result = middleware.process_request(request) self.assertIsNone(result) response = hello_world_view(request, '1') # Now put the response through the response middleware response = middleware.process_response(request, response) # Repeating the request should result in a cache hit result = middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b'Hello World 1') # The same request through a different middleware won't hit result = prefix_middleware.process_request(request) self.assertIsNone(result) # The same request with a timeout _will_ hit result = timeout_middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b'Hello World 1') def test_view_decorator(self): # decorate the same view with different cache decorators default_view = cache_page(3)(hello_world_view) default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view) explicit_default_view = cache_page(3, cache='default')(hello_world_view) explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view) other_view = cache_page(1, cache='other')(hello_world_view) other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view) request = self.factory.get('/view/') # Request the view once response = default_view(request, '1') self.assertEqual(response.content, b'Hello World 1') # Request again -- hit the cache response = default_view(request, '2') self.assertEqual(response.content, b'Hello World 1') # Requesting the same view with the explicit cache should yield the same result response = explicit_default_view(request, '3') self.assertEqual(response.content, b'Hello World 1') # Requesting with a prefix will hit a different cache key response = explicit_default_with_prefix_view(request, '4') self.assertEqual(response.content, b'Hello World 4') # Hitting the same view again gives a cache hit response = explicit_default_with_prefix_view(request, '5') self.assertEqual(response.content, b'Hello World 4') # And going back to the implicit cache will hit the same cache response = default_with_prefix_view(request, '6') self.assertEqual(response.content, b'Hello World 4') # Requesting from an alternate cache won't hit cache response = other_view(request, '7') self.assertEqual(response.content, b'Hello World 7') # But a repeated hit will hit cache response = other_view(request, '8') self.assertEqual(response.content, b'Hello World 7') # And prefixing the alternate cache yields yet another cache entry response = other_with_prefix_view(request, '9') self.assertEqual(response.content, b'Hello World 9') # But if we wait a couple of seconds... time.sleep(2) # ... the default cache will still hit caches['default'] response = default_view(request, '11') self.assertEqual(response.content, b'Hello World 1') # ... the default cache with a prefix will still hit response = default_with_prefix_view(request, '12') self.assertEqual(response.content, b'Hello World 4') # ... the explicit default cache will still hit response = explicit_default_view(request, '13') self.assertEqual(response.content, b'Hello World 1') # ... the explicit default cache with a prefix will still hit response = explicit_default_with_prefix_view(request, '14') self.assertEqual(response.content, b'Hello World 4') # .. but a rapidly expiring cache won't hit response = other_view(request, '15') self.assertEqual(response.content, b'Hello World 15') # .. even if it has a prefix response = other_with_prefix_view(request, '16') self.assertEqual(response.content, b'Hello World 16') def test_cache_page_timeout(self): # Page timeout takes precedence over the "max-age" section of the # "Cache-Control". tests = [ (1, 3), # max_age < page_timeout. (3, 1), # max_age > page_timeout. ] for max_age, page_timeout in tests: with self.subTest(max_age=max_age, page_timeout=page_timeout): view = cache_page(timeout=page_timeout)( cache_control(max_age=max_age)(hello_world_view) ) request = self.factory.get('/view/') response = view(request, '1') self.assertEqual(response.content, b'Hello World 1') time.sleep(1) response = view(request, '2') self.assertEqual( response.content, b'Hello World 1' if page_timeout > max_age else b'Hello World 2', ) cache.clear() def test_cached_control_private_not_cached(self): """Responses with 'Cache-Control: private' are not cached.""" view_with_private_cache = cache_page(3)(cache_control(private=True)(hello_world_view)) request = self.factory.get('/view/') response = view_with_private_cache(request, '1') self.assertEqual(response.content, b'Hello World 1') response = view_with_private_cache(request, '2') self.assertEqual(response.content, b'Hello World 2') def test_sensitive_cookie_not_cached(self): """ Django must prevent caching of responses that set a user-specific (and maybe security sensitive) cookie in response to a cookie-less request. """ request = self.factory.get('/view/') csrf_middleware = CsrfViewMiddleware(csrf_view) csrf_middleware.process_view(request, csrf_view, (), {}) cache_middleware = CacheMiddleware(csrf_middleware) self.assertIsNone(cache_middleware.process_request(request)) cache_middleware(request) # Inserting a CSRF cookie in a cookie-less request prevented caching. self.assertIsNone(cache_middleware.process_request(request)) def test_304_response_has_http_caching_headers_but_not_cached(self): original_view = mock.Mock(return_value=HttpResponseNotModified()) view = cache_page(2)(original_view) request = self.factory.get('/view/') # The view shouldn't be cached on the second call. view(request).close() response = view(request) response.close() self.assertEqual(original_view.call_count, 2) self.assertIsInstance(response, HttpResponseNotModified) self.assertIn('Cache-Control', response) self.assertIn('Expires', response) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ) class TestWithTemplateResponse(SimpleTestCase): """ Tests various headers w/ TemplateResponse. Most are probably redundant since they manipulate the same object anyway but the ETag header is 'special' because it relies on the content being complete (which is not necessarily always the case with a TemplateResponse) """ path = '/cache/test/' factory = RequestFactory() def tearDown(self): cache.clear() def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ) for initial_vary, newheaders, resulting_vary in headers: with self.subTest(initial_vary=initial_vary, newheaders=newheaders): template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) if initial_vary is not None: response.headers['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response.headers['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) key_prefix = 'localprefix' # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) # A specified key_prefix is taken into account. learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e' ) class TestMakeTemplateFragmentKey(SimpleTestCase): def test_without_vary_on(self): key = make_template_fragment_key('a.fragment') self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e') def test_with_one_vary_on(self): key = make_template_fragment_key('foo', ['abc']) self.assertEqual(key, 'template.cache.foo.493e283d571a73056196f1a68efd0f66') def test_with_many_vary_on(self): key = make_template_fragment_key('bar', ['abc', 'def']) self.assertEqual(key, 'template.cache.bar.17c1a507a0cb58384f4c639067a93520') def test_proper_escaping(self): key = make_template_fragment_key('spam', ['abc:def%']) self.assertEqual(key, 'template.cache.spam.06c8ae8e8c430b69fb0a6443504153dc') def test_with_ints_vary_on(self): key = make_template_fragment_key('foo', [1, 2, 3, 4, 5]) self.assertEqual(key, 'template.cache.foo.7ae8fd2e0d25d651c683bdeebdb29461') def test_with_unicode_vary_on(self): key = make_template_fragment_key('foo', ['42º', '😀']) self.assertEqual(key, 'template.cache.foo.7ced1c94e543668590ba39b3c08b0237') def test_long_vary_on(self): key = make_template_fragment_key('foo', ['x' * 10000]) self.assertEqual(key, 'template.cache.foo.3670b349b5124aa56bdb50678b02b23a') class CacheHandlerTest(SimpleTestCase): def test_same_instance(self): """ Attempting to retrieve the same alias should yield the same instance. """ cache1 = caches['default'] cache2 = caches['default'] self.assertIs(cache1, cache2) def test_per_thread(self): """ Requesting the same alias from separate threads should yield separate instances. """ c = [] def runner(): c.append(caches['default']) for x in range(2): t = threading.Thread(target=runner) t.start() t.join() self.assertIsNot(c[0], c[1])
fdbc8e20761c78feb86f0b37cb13d8fdc295f0b8ffd19e9c174a60bc36f2ab8e
from django.test import SimpleTestCase from django.utils.hashable import make_hashable class TestHashable(SimpleTestCase): def test_equal(self): tests = ( ([], ()), (['a', 1], ('a', 1)), ({}, ()), ({'a'}, ('a',)), (frozenset({'a'}), {'a'}), ({'a': 1, 'b': 2}, (('a', 1), ('b', 2))), ({'b': 2, 'a': 1}, (('a', 1), ('b', 2))), (('a', ['b', 1]), ('a', ('b', 1))), (('a', {'b': 1}), ('a', (('b', 1),))), ) for value, expected in tests: with self.subTest(value=value): self.assertEqual(make_hashable(value), expected) def test_count_equal(self): tests = ( ({'a': 1, 'b': ['a', 1]}, (('a', 1), ('b', ('a', 1)))), ({'a': 1, 'b': ('a', [1, 2])}, (('a', 1), ('b', ('a', (1, 2))))), ) for value, expected in tests: with self.subTest(value=value): self.assertCountEqual(make_hashable(value), expected) def test_unhashable(self): class Unhashable: __hash__ = None with self.assertRaisesMessage(TypeError, "unhashable type: 'Unhashable'"): make_hashable(Unhashable())
503a5642de9b253cbc4d65f360631b5ec3dd97e30068648706aba1817798a9b1
import platform import unittest from datetime import datetime from unittest import mock from django.test import SimpleTestCase, ignore_warnings from django.utils.datastructures import MultiValueDict from django.utils.deprecation import RemovedInDjango40Warning from django.utils.http import ( base36_to_int, escape_leading_slashes, http_date, int_to_base36, is_safe_url, is_same_domain, parse_etags, parse_http_date, parse_qsl, quote_etag, url_has_allowed_host_and_scheme, urlencode, urlquote, urlquote_plus, urlsafe_base64_decode, urlsafe_base64_encode, urlunquote, urlunquote_plus, ) class URLEncodeTests(SimpleTestCase): cannot_encode_none_msg = ( "Cannot encode None for key 'a' in a query string. Did you mean to " "pass an empty string or omit the value?" ) def test_tuples(self): self.assertEqual(urlencode((('a', 1), ('b', 2), ('c', 3))), 'a=1&b=2&c=3') def test_dict(self): result = urlencode({'a': 1, 'b': 2, 'c': 3}) # Dictionaries are treated as unordered. self.assertIn(result, [ 'a=1&b=2&c=3', 'a=1&c=3&b=2', 'b=2&a=1&c=3', 'b=2&c=3&a=1', 'c=3&a=1&b=2', 'c=3&b=2&a=1', ]) def test_dict_containing_sequence_not_doseq(self): self.assertEqual(urlencode({'a': [1, 2]}, doseq=False), 'a=%5B1%2C+2%5D') def test_dict_containing_tuple_not_doseq(self): self.assertEqual(urlencode({'a': (1, 2)}, doseq=False), 'a=%281%2C+2%29') def test_custom_iterable_not_doseq(self): class IterableWithStr: def __str__(self): return 'custom' def __iter__(self): yield from range(0, 3) self.assertEqual(urlencode({'a': IterableWithStr()}, doseq=False), 'a=custom') def test_dict_containing_sequence_doseq(self): self.assertEqual(urlencode({'a': [1, 2]}, doseq=True), 'a=1&a=2') def test_dict_containing_empty_sequence_doseq(self): self.assertEqual(urlencode({'a': []}, doseq=True), '') def test_multivaluedict(self): result = urlencode(MultiValueDict({ 'name': ['Adrian', 'Simon'], 'position': ['Developer'], }), doseq=True) # MultiValueDicts are similarly unordered. self.assertIn(result, [ 'name=Adrian&name=Simon&position=Developer', 'position=Developer&name=Adrian&name=Simon', ]) def test_dict_with_bytes_values(self): self.assertEqual(urlencode({'a': b'abc'}, doseq=True), 'a=abc') def test_dict_with_sequence_of_bytes(self): self.assertEqual(urlencode({'a': [b'spam', b'eggs', b'bacon']}, doseq=True), 'a=spam&a=eggs&a=bacon') def test_dict_with_bytearray(self): self.assertEqual(urlencode({'a': bytearray(range(2))}, doseq=True), 'a=0&a=1') def test_generator(self): self.assertEqual(urlencode({'a': range(2)}, doseq=True), 'a=0&a=1') self.assertEqual(urlencode({'a': range(2)}, doseq=False), 'a=range%280%2C+2%29') def test_none(self): with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg): urlencode({'a': None}) def test_none_in_sequence(self): with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg): urlencode({'a': [None]}, doseq=True) def test_none_in_generator(self): def gen(): yield None with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg): urlencode({'a': gen()}, doseq=True) class Base36IntTests(SimpleTestCase): def test_roundtrip(self): for n in [0, 1, 1000, 1000000]: self.assertEqual(n, base36_to_int(int_to_base36(n))) def test_negative_input(self): with self.assertRaisesMessage(ValueError, 'Negative base36 conversion input.'): int_to_base36(-1) def test_to_base36_errors(self): for n in ['1', 'foo', {1: 2}, (1, 2, 3), 3.141]: with self.assertRaises(TypeError): int_to_base36(n) def test_invalid_literal(self): for n in ['#', ' ']: with self.assertRaisesMessage(ValueError, "invalid literal for int() with base 36: '%s'" % n): base36_to_int(n) def test_input_too_large(self): with self.assertRaisesMessage(ValueError, 'Base36 input too large'): base36_to_int('1' * 14) def test_to_int_errors(self): for n in [123, {1: 2}, (1, 2, 3), 3.141]: with self.assertRaises(TypeError): base36_to_int(n) def test_values(self): for n, b36 in [(0, '0'), (1, '1'), (42, '16'), (818469960, 'django')]: self.assertEqual(int_to_base36(n), b36) self.assertEqual(base36_to_int(b36), n) class IsSafeURLTests(SimpleTestCase): def test_bad_urls(self): bad_urls = ( 'http://example.com', 'http:///example.com', 'https://example.com', 'ftp://example.com', r'\\example.com', r'\\\example.com', r'/\\/example.com', r'\\\example.com', r'\\example.com', r'\\//example.com', r'/\/example.com', r'\/example.com', r'/\example.com', 'http:///example.com', r'http:/\//example.com', r'http:\/example.com', r'http:/\example.com', 'javascript:alert("XSS")', '\njavascript:alert(x)', '\x08//example.com', r'http://otherserver\@example.com', r'http:\\testserver\@example.com', r'http://testserver\me:[email protected]', r'http://testserver\@example.com', r'http:\\testserver\confirm\[email protected]', 'http:999999999', 'ftp:9999999999', '\n', 'http://[2001:cdba:0000:0000:0000:0000:3257:9652/', 'http://2001:cdba:0000:0000:0000:0000:3257:9652]/', ) for bad_url in bad_urls: with self.subTest(url=bad_url): self.assertIs( url_has_allowed_host_and_scheme(bad_url, allowed_hosts={'testserver', 'testserver2'}), False, ) def test_good_urls(self): good_urls = ( '/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://example.com', 'view/?param=//example.com', 'https://testserver/', 'HTTPS://testserver/', '//testserver/', 'http://testserver/[email protected]', '/url%20with%20spaces/', 'path/http:2222222222', ) for good_url in good_urls: with self.subTest(url=good_url): self.assertIs( url_has_allowed_host_and_scheme(good_url, allowed_hosts={'otherserver', 'testserver'}), True, ) def test_basic_auth(self): # Valid basic auth credentials are allowed. self.assertIs( url_has_allowed_host_and_scheme(r'http://user:pass@testserver/', allowed_hosts={'user:pass@testserver'}), True, ) def test_no_allowed_hosts(self): # A path without host is allowed. self.assertIs(url_has_allowed_host_and_scheme('/confirm/[email protected]', allowed_hosts=None), True) # Basic auth without host is not allowed. self.assertIs(url_has_allowed_host_and_scheme(r'http://testserver\@example.com', allowed_hosts=None), False) def test_allowed_hosts_str(self): self.assertIs(url_has_allowed_host_and_scheme('http://good.com/good', allowed_hosts='good.com'), True) self.assertIs(url_has_allowed_host_and_scheme('http://good.co/evil', allowed_hosts='good.com'), False) def test_secure_param_https_urls(self): secure_urls = ( 'https://example.com/p', 'HTTPS://example.com/p', '/view/?param=http://example.com', ) for url in secure_urls: with self.subTest(url=url): self.assertIs( url_has_allowed_host_and_scheme(url, allowed_hosts={'example.com'}, require_https=True), True, ) def test_secure_param_non_https_urls(self): insecure_urls = ( 'http://example.com/p', 'ftp://example.com/p', '//example.com/p', ) for url in insecure_urls: with self.subTest(url=url): self.assertIs( url_has_allowed_host_and_scheme(url, allowed_hosts={'example.com'}, require_https=True), False, ) def test_is_safe_url_deprecated(self): msg = ( 'django.utils.http.is_safe_url() is deprecated in favor of ' 'url_has_allowed_host_and_scheme().' ) with self.assertWarnsMessage(RemovedInDjango40Warning, msg): is_safe_url('https://example.com', allowed_hosts={'example.com'}) class URLSafeBase64Tests(unittest.TestCase): def test_roundtrip(self): bytestring = b'foo' encoded = urlsafe_base64_encode(bytestring) decoded = urlsafe_base64_decode(encoded) self.assertEqual(bytestring, decoded) @ignore_warnings(category=RemovedInDjango40Warning) class URLQuoteTests(unittest.TestCase): def test_quote(self): self.assertEqual(urlquote('Paris & Orl\xe9ans'), 'Paris%20%26%20Orl%C3%A9ans') self.assertEqual(urlquote('Paris & Orl\xe9ans', safe="&"), 'Paris%20&%20Orl%C3%A9ans') def test_unquote(self): self.assertEqual(urlunquote('Paris%20%26%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans') self.assertEqual(urlunquote('Paris%20&%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans') def test_quote_plus(self): self.assertEqual(urlquote_plus('Paris & Orl\xe9ans'), 'Paris+%26+Orl%C3%A9ans') self.assertEqual(urlquote_plus('Paris & Orl\xe9ans', safe="&"), 'Paris+&+Orl%C3%A9ans') def test_unquote_plus(self): self.assertEqual(urlunquote_plus('Paris+%26+Orl%C3%A9ans'), 'Paris & Orl\xe9ans') self.assertEqual(urlunquote_plus('Paris+&+Orl%C3%A9ans'), 'Paris & Orl\xe9ans') class IsSameDomainTests(unittest.TestCase): def test_good(self): for pair in ( ('example.com', 'example.com'), ('example.com', '.example.com'), ('foo.example.com', '.example.com'), ('example.com:8888', 'example.com:8888'), ('example.com:8888', '.example.com:8888'), ('foo.example.com:8888', '.example.com:8888'), ): self.assertIs(is_same_domain(*pair), True) def test_bad(self): for pair in ( ('example2.com', 'example.com'), ('foo.example.com', 'example.com'), ('example.com:9999', 'example.com:8888'), ('foo.example.com:8888', ''), ): self.assertIs(is_same_domain(*pair), False) class ETagProcessingTests(unittest.TestCase): def test_parsing(self): self.assertEqual( parse_etags(r'"" , "etag", "e\\tag", W/"weak"'), ['""', '"etag"', r'"e\\tag"', 'W/"weak"'] ) self.assertEqual(parse_etags('*'), ['*']) # Ignore RFC 2616 ETags that are invalid according to RFC 7232. self.assertEqual(parse_etags(r'"etag", "e\"t\"ag"'), ['"etag"']) def test_quoting(self): self.assertEqual(quote_etag('etag'), '"etag"') # unquoted self.assertEqual(quote_etag('"etag"'), '"etag"') # quoted self.assertEqual(quote_etag('W/"etag"'), 'W/"etag"') # quoted, weak class HttpDateProcessingTests(unittest.TestCase): def test_http_date(self): t = 1167616461.0 self.assertEqual(http_date(t), 'Mon, 01 Jan 2007 01:54:21 GMT') def test_parsing_rfc1123(self): parsed = parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT') self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37)) @unittest.skipIf(platform.architecture()[0] == '32bit', 'The Year 2038 problem.') @mock.patch('django.utils.http.datetime.datetime') def test_parsing_rfc850(self, mocked_datetime): mocked_datetime.side_effect = datetime mocked_datetime.utcnow = mock.Mock() utcnow_1 = datetime(2019, 11, 6, 8, 49, 37) utcnow_2 = datetime(2020, 11, 6, 8, 49, 37) utcnow_3 = datetime(2048, 11, 6, 8, 49, 37) tests = ( (utcnow_1, 'Tuesday, 31-Dec-69 08:49:37 GMT', datetime(2069, 12, 31, 8, 49, 37)), (utcnow_1, 'Tuesday, 10-Nov-70 08:49:37 GMT', datetime(1970, 11, 10, 8, 49, 37)), (utcnow_1, 'Sunday, 06-Nov-94 08:49:37 GMT', datetime(1994, 11, 6, 8, 49, 37)), (utcnow_2, 'Wednesday, 31-Dec-70 08:49:37 GMT', datetime(2070, 12, 31, 8, 49, 37)), (utcnow_2, 'Friday, 31-Dec-71 08:49:37 GMT', datetime(1971, 12, 31, 8, 49, 37)), (utcnow_3, 'Sunday, 31-Dec-00 08:49:37 GMT', datetime(2000, 12, 31, 8, 49, 37)), (utcnow_3, 'Friday, 31-Dec-99 08:49:37 GMT', datetime(1999, 12, 31, 8, 49, 37)), ) for utcnow, rfc850str, expected_date in tests: with self.subTest(rfc850str=rfc850str): mocked_datetime.utcnow.return_value = utcnow parsed = parse_http_date(rfc850str) self.assertEqual(datetime.utcfromtimestamp(parsed), expected_date) def test_parsing_asctime(self): parsed = parse_http_date('Sun Nov 6 08:49:37 1994') self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37)) def test_parsing_year_less_than_70(self): parsed = parse_http_date('Sun Nov 6 08:49:37 0037') self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(2037, 11, 6, 8, 49, 37)) class EscapeLeadingSlashesTests(unittest.TestCase): def test(self): tests = ( ('//example.com', '/%2Fexample.com'), ('//', '/%2F'), ) for url, expected in tests: with self.subTest(url=url): self.assertEqual(escape_leading_slashes(url), expected) # TODO: Remove when dropping support for PY37. Backport of unit tests for # urllib.parse.parse_qsl() from Python 3.8. Copyright (C) 2020 Python Software # Foundation (see LICENSE.python). class ParseQSLBackportTests(unittest.TestCase): def test_parse_qsl(self): tests = [ ('', []), ('&', []), ('&&', []), ('=', [('', '')]), ('=a', [('', 'a')]), ('a', [('a', '')]), ('a=', [('a', '')]), ('&a=b', [('a', 'b')]), ('a=a+b&b=b+c', [('a', 'a b'), ('b', 'b c')]), ('a=1&a=2', [('a', '1'), ('a', '2')]), (b'', []), (b'&', []), (b'&&', []), (b'=', [(b'', b'')]), (b'=a', [(b'', b'a')]), (b'a', [(b'a', b'')]), (b'a=', [(b'a', b'')]), (b'&a=b', [(b'a', b'b')]), (b'a=a+b&b=b+c', [(b'a', b'a b'), (b'b', b'b c')]), (b'a=1&a=2', [(b'a', b'1'), (b'a', b'2')]), (';', []), (';;', []), (';a=b', [('a', 'b')]), ('a=a+b;b=b+c', [('a', 'a b'), ('b', 'b c')]), ('a=1;a=2', [('a', '1'), ('a', '2')]), (b';', []), (b';;', []), (b';a=b', [(b'a', b'b')]), (b'a=a+b;b=b+c', [(b'a', b'a b'), (b'b', b'b c')]), (b'a=1;a=2', [(b'a', b'1'), (b'a', b'2')]), ] for original, expected in tests: with self.subTest(original): result = parse_qsl(original, keep_blank_values=True) self.assertEqual(result, expected, 'Error parsing %r' % original) expect_without_blanks = [v for v in expected if len(v[1])] result = parse_qsl(original, keep_blank_values=False) self.assertEqual(result, expect_without_blanks, 'Error parsing %r' % original) def test_parse_qsl_encoding(self): result = parse_qsl('key=\u0141%E9', encoding='latin-1') self.assertEqual(result, [('key', '\u0141\xE9')]) result = parse_qsl('key=\u0141%C3%A9', encoding='utf-8') self.assertEqual(result, [('key', '\u0141\xE9')]) result = parse_qsl('key=\u0141%C3%A9', encoding='ascii') self.assertEqual(result, [('key', '\u0141\ufffd\ufffd')]) result = parse_qsl('key=\u0141%E9-', encoding='ascii') self.assertEqual(result, [('key', '\u0141\ufffd-')]) result = parse_qsl('key=\u0141%E9-', encoding='ascii', errors='ignore') self.assertEqual(result, [('key', '\u0141-')]) def test_parse_qsl_max_num_fields(self): with self.assertRaises(ValueError): parse_qsl('&'.join(['a=a'] * 11), max_num_fields=10) with self.assertRaises(ValueError): parse_qsl(';'.join(['a=a'] * 11), max_num_fields=10) parse_qsl('&'.join(['a=a'] * 10), max_num_fields=10)
ea19872657c1ba02cd227eaadf5fc219d16df25dd838bf644366c5bbc3522fd0
""" A test spanning all the capabilities of all the serializers. This class defines sample data and a dynamically generated test case that is capable of testing the capabilities of the serializers. This includes all valid data values, plus forward, backwards and self references. """ import datetime import decimal import uuid from django.core import serializers from django.db import connection, models from django.test import TestCase from .models import ( Anchor, AutoNowDateTimeData, BigIntegerData, BinaryData, BooleanData, BooleanPKData, CharData, CharPKData, DateData, DatePKData, DateTimeData, DateTimePKData, DecimalData, DecimalPKData, EmailData, EmailPKData, ExplicitInheritBaseModel, FileData, FilePathData, FilePathPKData, FKData, FKDataToField, FKDataToO2O, FKSelfData, FKToUUID, FloatData, FloatPKData, GenericData, GenericIPAddressData, GenericIPAddressPKData, InheritAbstractModel, InheritBaseModel, IntegerData, IntegerPKData, Intermediate, LengthModel, M2MData, M2MIntermediateData, M2MSelfData, ModifyingSaveData, NullBooleanData, O2OData, PositiveBigIntegerData, PositiveIntegerData, PositiveIntegerPKData, PositiveSmallIntegerData, PositiveSmallIntegerPKData, SlugData, SlugPKData, SmallData, SmallPKData, Tag, TextData, TimeData, UniqueAnchor, UUIDData, UUIDDefaultData, ) from .tests import register_tests # A set of functions that can be used to recreate # test data objects of various kinds. # The save method is a raw base model save, to make # sure that the data in the database matches the # exact test case. def data_create(pk, klass, data): instance = klass(id=pk) instance.data = data models.Model.save_base(instance, raw=True) return [instance] def generic_create(pk, klass, data): instance = klass(id=pk) instance.data = data[0] models.Model.save_base(instance, raw=True) for tag in data[1:]: instance.tags.create(data=tag) return [instance] def fk_create(pk, klass, data): instance = klass(id=pk) setattr(instance, 'data_id', data) models.Model.save_base(instance, raw=True) return [instance] def m2m_create(pk, klass, data): instance = klass(id=pk) models.Model.save_base(instance, raw=True) instance.data.set(data) return [instance] def im2m_create(pk, klass, data): instance = klass(id=pk) models.Model.save_base(instance, raw=True) return [instance] def im_create(pk, klass, data): instance = klass(id=pk) instance.right_id = data['right'] instance.left_id = data['left'] if 'extra' in data: instance.extra = data['extra'] models.Model.save_base(instance, raw=True) return [instance] def o2o_create(pk, klass, data): instance = klass() instance.data_id = data models.Model.save_base(instance, raw=True) return [instance] def pk_create(pk, klass, data): instance = klass() instance.data = data models.Model.save_base(instance, raw=True) return [instance] def inherited_create(pk, klass, data): instance = klass(id=pk, **data) # This isn't a raw save because: # 1) we're testing inheritance, not field behavior, so none # of the field values need to be protected. # 2) saving the child class and having the parent created # automatically is easier than manually creating both. models.Model.save(instance) created = [instance] for klass in instance._meta.parents: created.append(klass.objects.get(id=pk)) return created # A set of functions that can be used to compare # test data objects of various kinds def data_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) if klass == BinaryData and data is not None: testcase.assertEqual( bytes(data), bytes(instance.data), "Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % ( pk, repr(bytes(data)), type(data), repr(bytes(instance.data)), type(instance.data), ) ) else: testcase.assertEqual( data, instance.data, "Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % ( pk, data, type(data), instance, type(instance.data), ) ) def generic_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) testcase.assertEqual(data[0], instance.data) testcase.assertEqual(data[1:], [t.data for t in instance.tags.order_by('id')]) def fk_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) testcase.assertEqual(data, instance.data_id) def m2m_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) testcase.assertEqual(data, [obj.id for obj in instance.data.order_by('id')]) def im2m_compare(testcase, pk, klass, data): klass.objects.get(id=pk) # actually nothing else to check, the instance just should exist def im_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) testcase.assertEqual(data['left'], instance.left_id) testcase.assertEqual(data['right'], instance.right_id) if 'extra' in data: testcase.assertEqual(data['extra'], instance.extra) else: testcase.assertEqual("doesn't matter", instance.extra) def o2o_compare(testcase, pk, klass, data): instance = klass.objects.get(data=data) testcase.assertEqual(data, instance.data_id) def pk_compare(testcase, pk, klass, data): instance = klass.objects.get(data=data) testcase.assertEqual(data, instance.data) def inherited_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) for key, value in data.items(): testcase.assertEqual(value, getattr(instance, key)) # Define some data types. Each data type is # actually a pair of functions; one to create # and one to compare objects of that type data_obj = (data_create, data_compare) generic_obj = (generic_create, generic_compare) fk_obj = (fk_create, fk_compare) m2m_obj = (m2m_create, m2m_compare) im2m_obj = (im2m_create, im2m_compare) im_obj = (im_create, im_compare) o2o_obj = (o2o_create, o2o_compare) pk_obj = (pk_create, pk_compare) inherited_obj = (inherited_create, inherited_compare) uuid_obj = uuid.uuid4() test_data = [ # Format: (data type, PK value, Model Class, data) (data_obj, 1, BinaryData, memoryview(b"\x05\xFD\x00")), (data_obj, 2, BinaryData, None), (data_obj, 5, BooleanData, True), (data_obj, 6, BooleanData, False), (data_obj, 7, BooleanData, None), (data_obj, 10, CharData, "Test Char Data"), (data_obj, 11, CharData, ""), (data_obj, 12, CharData, "None"), (data_obj, 13, CharData, "null"), (data_obj, 14, CharData, "NULL"), (data_obj, 15, CharData, None), # (We use something that will fit into a latin1 database encoding here, # because that is still the default used on many system setups.) (data_obj, 16, CharData, '\xa5'), (data_obj, 20, DateData, datetime.date(2006, 6, 16)), (data_obj, 21, DateData, None), (data_obj, 30, DateTimeData, datetime.datetime(2006, 6, 16, 10, 42, 37)), (data_obj, 31, DateTimeData, None), (data_obj, 40, EmailData, "[email protected]"), (data_obj, 41, EmailData, None), (data_obj, 42, EmailData, ""), (data_obj, 50, FileData, 'file:///foo/bar/whiz.txt'), # (data_obj, 51, FileData, None), (data_obj, 52, FileData, ""), (data_obj, 60, FilePathData, "/foo/bar/whiz.txt"), (data_obj, 61, FilePathData, None), (data_obj, 62, FilePathData, ""), (data_obj, 70, DecimalData, decimal.Decimal('12.345')), (data_obj, 71, DecimalData, decimal.Decimal('-12.345')), (data_obj, 72, DecimalData, decimal.Decimal('0.0')), (data_obj, 73, DecimalData, None), (data_obj, 74, FloatData, 12.345), (data_obj, 75, FloatData, -12.345), (data_obj, 76, FloatData, 0.0), (data_obj, 77, FloatData, None), (data_obj, 80, IntegerData, 123456789), (data_obj, 81, IntegerData, -123456789), (data_obj, 82, IntegerData, 0), (data_obj, 83, IntegerData, None), # (XX, ImageData (data_obj, 95, GenericIPAddressData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"), (data_obj, 96, GenericIPAddressData, None), (data_obj, 100, NullBooleanData, True), (data_obj, 101, NullBooleanData, False), (data_obj, 102, NullBooleanData, None), (data_obj, 110, PositiveBigIntegerData, 9223372036854775807), (data_obj, 111, PositiveBigIntegerData, None), (data_obj, 120, PositiveIntegerData, 123456789), (data_obj, 121, PositiveIntegerData, None), (data_obj, 130, PositiveSmallIntegerData, 12), (data_obj, 131, PositiveSmallIntegerData, None), (data_obj, 140, SlugData, "this-is-a-slug"), (data_obj, 141, SlugData, None), (data_obj, 142, SlugData, ""), (data_obj, 150, SmallData, 12), (data_obj, 151, SmallData, -12), (data_obj, 152, SmallData, 0), (data_obj, 153, SmallData, None), (data_obj, 160, TextData, """This is a long piece of text. It contains line breaks. Several of them. The end."""), (data_obj, 161, TextData, ""), (data_obj, 162, TextData, None), (data_obj, 170, TimeData, datetime.time(10, 42, 37)), (data_obj, 171, TimeData, None), (generic_obj, 200, GenericData, ['Generic Object 1', 'tag1', 'tag2']), (generic_obj, 201, GenericData, ['Generic Object 2', 'tag2', 'tag3']), (data_obj, 300, Anchor, "Anchor 1"), (data_obj, 301, Anchor, "Anchor 2"), (data_obj, 302, UniqueAnchor, "UAnchor 1"), (fk_obj, 400, FKData, 300), # Post reference (fk_obj, 401, FKData, 500), # Pre reference (fk_obj, 402, FKData, None), # Empty reference (m2m_obj, 410, M2MData, []), # Empty set (m2m_obj, 411, M2MData, [300, 301]), # Post reference (m2m_obj, 412, M2MData, [500, 501]), # Pre reference (m2m_obj, 413, M2MData, [300, 301, 500, 501]), # Pre and Post reference (o2o_obj, None, O2OData, 300), # Post reference (o2o_obj, None, O2OData, 500), # Pre reference (fk_obj, 430, FKSelfData, 431), # Pre reference (fk_obj, 431, FKSelfData, 430), # Post reference (fk_obj, 432, FKSelfData, None), # Empty reference (m2m_obj, 440, M2MSelfData, []), (m2m_obj, 441, M2MSelfData, []), (m2m_obj, 442, M2MSelfData, [440, 441]), (m2m_obj, 443, M2MSelfData, [445, 446]), (m2m_obj, 444, M2MSelfData, [440, 441, 445, 446]), (m2m_obj, 445, M2MSelfData, []), (m2m_obj, 446, M2MSelfData, []), (fk_obj, 450, FKDataToField, "UAnchor 1"), (fk_obj, 451, FKDataToField, "UAnchor 2"), (fk_obj, 452, FKDataToField, None), (fk_obj, 460, FKDataToO2O, 300), (im2m_obj, 470, M2MIntermediateData, None), # testing post- and pre-references and extra fields (im_obj, 480, Intermediate, {'right': 300, 'left': 470}), (im_obj, 481, Intermediate, {'right': 300, 'left': 490}), (im_obj, 482, Intermediate, {'right': 500, 'left': 470}), (im_obj, 483, Intermediate, {'right': 500, 'left': 490}), (im_obj, 484, Intermediate, {'right': 300, 'left': 470, 'extra': "extra"}), (im_obj, 485, Intermediate, {'right': 300, 'left': 490, 'extra': "extra"}), (im_obj, 486, Intermediate, {'right': 500, 'left': 470, 'extra': "extra"}), (im_obj, 487, Intermediate, {'right': 500, 'left': 490, 'extra': "extra"}), (im2m_obj, 490, M2MIntermediateData, []), (data_obj, 500, Anchor, "Anchor 3"), (data_obj, 501, Anchor, "Anchor 4"), (data_obj, 502, UniqueAnchor, "UAnchor 2"), (pk_obj, 601, BooleanPKData, True), (pk_obj, 602, BooleanPKData, False), (pk_obj, 610, CharPKData, "Test Char PKData"), (pk_obj, 620, DatePKData, datetime.date(2006, 6, 16)), (pk_obj, 630, DateTimePKData, datetime.datetime(2006, 6, 16, 10, 42, 37)), (pk_obj, 640, EmailPKData, "[email protected]"), # (pk_obj, 650, FilePKData, 'file:///foo/bar/whiz.txt'), (pk_obj, 660, FilePathPKData, "/foo/bar/whiz.txt"), (pk_obj, 670, DecimalPKData, decimal.Decimal('12.345')), (pk_obj, 671, DecimalPKData, decimal.Decimal('-12.345')), (pk_obj, 672, DecimalPKData, decimal.Decimal('0.0')), (pk_obj, 673, FloatPKData, 12.345), (pk_obj, 674, FloatPKData, -12.345), (pk_obj, 675, FloatPKData, 0.0), (pk_obj, 680, IntegerPKData, 123456789), (pk_obj, 681, IntegerPKData, -123456789), (pk_obj, 682, IntegerPKData, 0), # (XX, ImagePKData (pk_obj, 695, GenericIPAddressPKData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"), (pk_obj, 720, PositiveIntegerPKData, 123456789), (pk_obj, 730, PositiveSmallIntegerPKData, 12), (pk_obj, 740, SlugPKData, "this-is-a-slug"), (pk_obj, 750, SmallPKData, 12), (pk_obj, 751, SmallPKData, -12), (pk_obj, 752, SmallPKData, 0), # (pk_obj, 760, TextPKData, """This is a long piece of text. # It contains line breaks. # Several of them. # The end."""), # (pk_obj, 770, TimePKData, datetime.time(10, 42, 37)), # (pk_obj, 790, XMLPKData, "<foo></foo>"), (pk_obj, 791, UUIDData, uuid_obj), (fk_obj, 792, FKToUUID, uuid_obj), (pk_obj, 793, UUIDDefaultData, uuid_obj), (data_obj, 800, AutoNowDateTimeData, datetime.datetime(2006, 6, 16, 10, 42, 37)), (data_obj, 810, ModifyingSaveData, 42), (inherited_obj, 900, InheritAbstractModel, {'child_data': 37, 'parent_data': 42}), (inherited_obj, 910, ExplicitInheritBaseModel, {'child_data': 37, 'parent_data': 42}), (inherited_obj, 920, InheritBaseModel, {'child_data': 37, 'parent_data': 42}), (data_obj, 1000, BigIntegerData, 9223372036854775807), (data_obj, 1001, BigIntegerData, -9223372036854775808), (data_obj, 1002, BigIntegerData, 0), (data_obj, 1003, BigIntegerData, None), (data_obj, 1004, LengthModel, 0), (data_obj, 1005, LengthModel, 1), ] # Because Oracle treats the empty string as NULL, Oracle is expected to fail # when field.empty_strings_allowed is True and the value is None; skip these # tests. if connection.features.interprets_empty_strings_as_nulls: test_data = [data for data in test_data if not (data[0] == data_obj and data[2]._meta.get_field('data').empty_strings_allowed and data[3] is None)] class SerializerDataTests(TestCase): pass def serializerTest(self, format): # FK to an object with PK of 0. This won't work on MySQL without the # NO_AUTO_VALUE_ON_ZERO SQL mode since it won't let you create an object # with an autoincrement primary key of 0. if connection.features.allows_auto_pk_0: test_data.extend([ (data_obj, 0, Anchor, 'Anchor 0'), (fk_obj, 465, FKData, 0), ]) # Create all the objects defined in the test data objects = [] instance_count = {} for (func, pk, klass, datum) in test_data: with connection.constraint_checks_disabled(): objects.extend(func[0](pk, klass, datum)) # Get a count of the number of objects created for each class for klass in instance_count: instance_count[klass] = klass.objects.count() # Add the generic tagged objects to the object list objects.extend(Tag.objects.all()) # Serialize the test database serialized_data = serializers.serialize(format, objects, indent=2) for obj in serializers.deserialize(format, serialized_data): obj.save() # Assert that the deserialized data is the same # as the original source for (func, pk, klass, datum) in test_data: func[1](self, pk, klass, datum) # Assert that the number of objects deserialized is the # same as the number that was serialized. for klass, count in instance_count.items(): self.assertEqual(count, klass.objects.count()) register_tests(SerializerDataTests, 'test_%s_serializer', serializerTest)
55e56d02f563e43c57af81e200dcb1b7dda93e405912af6283dc5604070524ec
from datetime import date from django import forms from django.contrib.admin.models import ADDITION, CHANGE, DELETION, LogEntry from django.contrib.admin.options import ( HORIZONTAL, VERTICAL, ModelAdmin, TabularInline, get_content_type_for_model, ) from django.contrib.admin.sites import AdminSite from django.contrib.admin.widgets import ( AdminDateWidget, AdminRadioSelect, AutocompleteSelect, AutocompleteSelectMultiple, ) from django.contrib.auth.models import User from django.db import models from django.forms.widgets import Select from django.test import SimpleTestCase, TestCase from django.test.utils import isolate_apps from .models import Band, Concert, Song class MockRequest: pass class MockSuperUser: def has_perm(self, perm, obj=None): return True request = MockRequest() request.user = MockSuperUser() class ModelAdminTests(TestCase): @classmethod def setUpTestData(cls): cls.band = Band.objects.create( name='The Doors', bio='', sign_date=date(1965, 1, 1), ) def setUp(self): self.site = AdminSite() def test_modeladmin_str(self): ma = ModelAdmin(Band, self.site) self.assertEqual(str(ma), 'modeladmin.ModelAdmin') # form/fields/fieldsets interaction ############################## def test_default_fields(self): ma = ModelAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date']) self.assertEqual(list(ma.get_fields(request)), ['name', 'bio', 'sign_date']) self.assertEqual(list(ma.get_fields(request, self.band)), ['name', 'bio', 'sign_date']) self.assertIsNone(ma.get_exclude(request, self.band)) def test_default_fieldsets(self): # fieldsets_add and fieldsets_change should return a special data structure that # is used in the templates. They should generate the "right thing" whether we # have specified a custom form, the fields argument, or nothing at all. # # Here's the default case. There are no custom form_add/form_change methods, # no fields argument, and no fieldsets argument. ma = ModelAdmin(Band, self.site) self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name', 'bio', 'sign_date']})]) self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name', 'bio', 'sign_date']})]) def test_get_fieldsets(self): # get_fieldsets() is called when figuring out form fields (#18681). class BandAdmin(ModelAdmin): def get_fieldsets(self, request, obj=None): return [(None, {'fields': ['name', 'bio']})] ma = BandAdmin(Band, self.site) form = ma.get_form(None) self.assertEqual(form._meta.fields, ['name', 'bio']) class InlineBandAdmin(TabularInline): model = Concert fk_name = 'main_band' can_delete = False def get_fieldsets(self, request, obj=None): return [(None, {'fields': ['day', 'transport']})] ma = InlineBandAdmin(Band, self.site) form = ma.get_formset(None).form self.assertEqual(form._meta.fields, ['day', 'transport']) def test_lookup_allowed_allows_nonexistent_lookup(self): """ A lookup_allowed allows a parameter whose field lookup doesn't exist. (#21129). """ class BandAdmin(ModelAdmin): fields = ['name'] ma = BandAdmin(Band, self.site) self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value')) @isolate_apps('modeladmin') def test_lookup_allowed_onetoone(self): class Department(models.Model): code = models.CharField(max_length=4, unique=True) class Employee(models.Model): department = models.ForeignKey(Department, models.CASCADE, to_field="code") class EmployeeProfile(models.Model): employee = models.OneToOneField(Employee, models.CASCADE) class EmployeeInfo(models.Model): employee = models.OneToOneField(Employee, models.CASCADE) description = models.CharField(max_length=100) class EmployeeProfileAdmin(ModelAdmin): list_filter = [ 'employee__employeeinfo__description', 'employee__department__code', ] ma = EmployeeProfileAdmin(EmployeeProfile, self.site) # Reverse OneToOneField self.assertIs(ma.lookup_allowed('employee__employeeinfo__description', 'test_value'), True) # OneToOneField and ForeignKey self.assertIs(ma.lookup_allowed('employee__department__code', 'test_value'), True) def test_field_arguments(self): # If fields is specified, fieldsets_add and fieldsets_change should # just stick the fields into a formsets structure and return it. class BandAdmin(ModelAdmin): fields = ['name'] ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_fields(request)), ['name']) self.assertEqual(list(ma.get_fields(request, self.band)), ['name']) self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name']})]) self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name']})]) def test_field_arguments_restricted_on_form(self): # If fields or fieldsets is specified, it should exclude fields on the # Form class to the fields specified. This may cause errors to be # raised in the db layer if required model fields aren't in fields/ # fieldsets, but that's preferable to ghost errors where a field in the # Form class isn't being displayed because it's not in fields/fieldsets. # Using `fields`. class BandAdmin(ModelAdmin): fields = ['name'] ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['name']) self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name']) # Using `fieldsets`. class BandAdmin(ModelAdmin): fieldsets = [(None, {'fields': ['name']})] ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['name']) self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name']) # Using `exclude`. class BandAdmin(ModelAdmin): exclude = ['bio'] ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date']) # You can also pass a tuple to `exclude`. class BandAdmin(ModelAdmin): exclude = ('bio',) ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date']) # Using `fields` and `exclude`. class BandAdmin(ModelAdmin): fields = ['name', 'bio'] exclude = ['bio'] ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['name']) def test_custom_form_meta_exclude_with_readonly(self): """ The custom ModelForm's `Meta.exclude` is respected when used in conjunction with `ModelAdmin.readonly_fields` and when no `ModelAdmin.exclude` is defined (#14496). """ # With ModelAdmin class AdminBandForm(forms.ModelForm): class Meta: model = Band exclude = ['bio'] class BandAdmin(ModelAdmin): readonly_fields = ['name'] form = AdminBandForm ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['sign_date']) # With InlineModelAdmin class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ['day'] class ConcertInline(TabularInline): readonly_fields = ['transport'] form = AdminConcertForm fk_name = 'main_band' model = Concert class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ['main_band', 'opening_band', 'id', 'DELETE']) def test_custom_formfield_override_readonly(self): class AdminBandForm(forms.ModelForm): name = forms.CharField() class Meta: exclude = () model = Band class BandAdmin(ModelAdmin): form = AdminBandForm readonly_fields = ['name'] ma = BandAdmin(Band, self.site) # `name` shouldn't appear in base_fields because it's part of # readonly_fields. self.assertEqual( list(ma.get_form(request).base_fields), ['bio', 'sign_date'] ) # But it should appear in get_fields()/fieldsets() so it can be # displayed as read-only. self.assertEqual( list(ma.get_fields(request)), ['bio', 'sign_date', 'name'] ) self.assertEqual( list(ma.get_fieldsets(request)), [(None, {'fields': ['bio', 'sign_date', 'name']})] ) def test_custom_form_meta_exclude(self): """ The custom ModelForm's `Meta.exclude` is overridden if `ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined (#14496). """ # With ModelAdmin class AdminBandForm(forms.ModelForm): class Meta: model = Band exclude = ['bio'] class BandAdmin(ModelAdmin): exclude = ['name'] form = AdminBandForm ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['bio', 'sign_date']) # With InlineModelAdmin class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ['day'] class ConcertInline(TabularInline): exclude = ['transport'] form = AdminConcertForm fk_name = 'main_band' model = Concert class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ['main_band', 'opening_band', 'day', 'id', 'DELETE'] ) def test_overriding_get_exclude(self): class BandAdmin(ModelAdmin): def get_exclude(self, request, obj=None): return ['name'] self.assertEqual( list(BandAdmin(Band, self.site).get_form(request).base_fields), ['bio', 'sign_date'] ) def test_get_exclude_overrides_exclude(self): class BandAdmin(ModelAdmin): exclude = ['bio'] def get_exclude(self, request, obj=None): return ['name'] self.assertEqual( list(BandAdmin(Band, self.site).get_form(request).base_fields), ['bio', 'sign_date'] ) def test_get_exclude_takes_obj(self): class BandAdmin(ModelAdmin): def get_exclude(self, request, obj=None): if obj: return ['sign_date'] return ['name'] self.assertEqual( list(BandAdmin(Band, self.site).get_form(request, self.band).base_fields), ['name', 'bio'] ) def test_custom_form_validation(self): # If a form is specified, it should use it allowing custom validation # to work properly. This won't break any of the admin widgets or media. class AdminBandForm(forms.ModelForm): delete = forms.BooleanField() class BandAdmin(ModelAdmin): form = AdminBandForm ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date', 'delete']) self.assertEqual(type(ma.get_form(request).base_fields['sign_date'].widget), AdminDateWidget) def test_form_exclude_kwarg_override(self): """ The `exclude` kwarg passed to `ModelAdmin.get_form()` overrides all other declarations (#8999). """ class AdminBandForm(forms.ModelForm): class Meta: model = Band exclude = ['name'] class BandAdmin(ModelAdmin): exclude = ['sign_date'] form = AdminBandForm def get_form(self, request, obj=None, **kwargs): kwargs['exclude'] = ['bio'] return super().get_form(request, obj, **kwargs) ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date']) def test_formset_exclude_kwarg_override(self): """ The `exclude` kwarg passed to `InlineModelAdmin.get_formset()` overrides all other declarations (#8999). """ class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ['day'] class ConcertInline(TabularInline): exclude = ['transport'] form = AdminConcertForm fk_name = 'main_band' model = Concert def get_formset(self, request, obj=None, **kwargs): kwargs['exclude'] = ['opening_band'] return super().get_formset(request, obj, **kwargs) class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ['main_band', 'day', 'transport', 'id', 'DELETE'] ) def test_formset_overriding_get_exclude_with_form_fields(self): class AdminConcertForm(forms.ModelForm): class Meta: model = Concert fields = ['main_band', 'opening_band', 'day', 'transport'] class ConcertInline(TabularInline): form = AdminConcertForm fk_name = 'main_band' model = Concert def get_exclude(self, request, obj=None): return ['opening_band'] class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ['main_band', 'day', 'transport', 'id', 'DELETE'] ) def test_formset_overriding_get_exclude_with_form_exclude(self): class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ['day'] class ConcertInline(TabularInline): form = AdminConcertForm fk_name = 'main_band' model = Concert def get_exclude(self, request, obj=None): return ['opening_band'] class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ['main_band', 'day', 'transport', 'id', 'DELETE'] ) def test_raw_id_fields_widget_override(self): """ The autocomplete_fields, raw_id_fields, and radio_fields widgets may overridden by specifying a widget in get_formset(). """ class ConcertInline(TabularInline): model = Concert fk_name = 'main_band' raw_id_fields = ('opening_band',) def get_formset(self, request, obj=None, **kwargs): kwargs['widgets'] = {'opening_band': Select} return super().get_formset(request, obj, **kwargs) class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) band_widget = list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields['opening_band'].widget # Without the override this would be ForeignKeyRawIdWidget. self.assertIsInstance(band_widget, Select) def test_queryset_override(self): # If the queryset of a ModelChoiceField in a custom form is overridden, # RelatedFieldWidgetWrapper doesn't mess that up. band2 = Band.objects.create(name='The Beatles', bio='', sign_date=date(1962, 1, 1)) ma = ModelAdmin(Concert, self.site) form = ma.get_form(request)() self.assertHTMLEqual( str(form["main_band"]), '<div class="related-widget-wrapper">' '<select name="main_band" id="id_main_band" required>' '<option value="" selected>---------</option>' '<option value="%d">The Beatles</option>' '<option value="%d">The Doors</option>' '</select></div>' % (band2.id, self.band.id) ) class AdminConcertForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields["main_band"].queryset = Band.objects.filter(name='The Doors') class ConcertAdminWithForm(ModelAdmin): form = AdminConcertForm ma = ConcertAdminWithForm(Concert, self.site) form = ma.get_form(request)() self.assertHTMLEqual( str(form["main_band"]), '<div class="related-widget-wrapper">' '<select name="main_band" id="id_main_band" required>' '<option value="" selected>---------</option>' '<option value="%d">The Doors</option>' '</select></div>' % self.band.id ) def test_regression_for_ticket_15820(self): """ `obj` is passed from `InlineModelAdmin.get_fieldsets()` to `InlineModelAdmin.get_formset()`. """ class CustomConcertForm(forms.ModelForm): class Meta: model = Concert fields = ['day'] class ConcertInline(TabularInline): model = Concert fk_name = 'main_band' def get_formset(self, request, obj=None, **kwargs): if obj: kwargs['form'] = CustomConcertForm return super().get_formset(request, obj, **kwargs) class BandAdmin(ModelAdmin): inlines = [ConcertInline] Concert.objects.create(main_band=self.band, opening_band=self.band, day=1) ma = BandAdmin(Band, self.site) inline_instances = ma.get_inline_instances(request) fieldsets = list(inline_instances[0].get_fieldsets(request)) self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport']) fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model)) self.assertEqual(fieldsets[0][1]['fields'], ['day']) # radio_fields behavior ########################################### def test_default_foreign_key_widget(self): # First, without any radio_fields specified, the widgets for ForeignKey # and fields with choices specified ought to be a basic Select widget. # ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so # they need to be handled properly when type checking. For Select fields, all of # the choices lists have a first entry of dashes. cma = ModelAdmin(Concert, self.site) cmafa = cma.get_form(request) self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), Select) self.assertEqual( list(cmafa.base_fields['main_band'].widget.choices), [('', '---------'), (self.band.id, 'The Doors')]) self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), Select) self.assertEqual( list(cmafa.base_fields['opening_band'].widget.choices), [('', '---------'), (self.band.id, 'The Doors')] ) self.assertEqual(type(cmafa.base_fields['day'].widget), Select) self.assertEqual( list(cmafa.base_fields['day'].widget.choices), [('', '---------'), (1, 'Fri'), (2, 'Sat')] ) self.assertEqual(type(cmafa.base_fields['transport'].widget), Select) self.assertEqual( list(cmafa.base_fields['transport'].widget.choices), [('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]) def test_foreign_key_as_radio_field(self): # Now specify all the fields as radio_fields. Widgets should now be # RadioSelect, and the choices list should have a first entry of 'None' if # blank=True for the model field. Finally, the widget should have the # 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL. class ConcertAdmin(ModelAdmin): radio_fields = { 'main_band': HORIZONTAL, 'opening_band': VERTICAL, 'day': VERTICAL, 'transport': HORIZONTAL, } cma = ConcertAdmin(Concert, self.site) cmafa = cma.get_form(request) self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['main_band'].widget.attrs, {'class': 'radiolist inline'}) self.assertEqual( list(cmafa.base_fields['main_band'].widget.choices), [(self.band.id, 'The Doors')] ) self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs, {'class': 'radiolist'}) self.assertEqual( list(cmafa.base_fields['opening_band'].widget.choices), [('', 'None'), (self.band.id, 'The Doors')] ) self.assertEqual(type(cmafa.base_fields['day'].widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['day'].widget.attrs, {'class': 'radiolist'}) self.assertEqual(list(cmafa.base_fields['day'].widget.choices), [(1, 'Fri'), (2, 'Sat')]) self.assertEqual(type(cmafa.base_fields['transport'].widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['transport'].widget.attrs, {'class': 'radiolist inline'}) self.assertEqual( list(cmafa.base_fields['transport'].widget.choices), [('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')] ) class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ('transport',) class ConcertAdmin(ModelAdmin): form = AdminConcertForm ma = ConcertAdmin(Concert, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['main_band', 'opening_band', 'day']) class AdminConcertForm(forms.ModelForm): extra = forms.CharField() class Meta: model = Concert fields = ['extra', 'transport'] class ConcertAdmin(ModelAdmin): form = AdminConcertForm ma = ConcertAdmin(Concert, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ['extra', 'transport']) class ConcertInline(TabularInline): form = AdminConcertForm model = Concert fk_name = 'main_band' can_delete = True class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ['extra', 'transport', 'id', 'DELETE', 'main_band'] ) def test_log_actions(self): ma = ModelAdmin(Band, self.site) mock_request = MockRequest() mock_request.user = User.objects.create(username='bill') content_type = get_content_type_for_model(self.band) tests = ( (ma.log_addition, ADDITION, {'added': {}}), (ma.log_change, CHANGE, {'changed': {'fields': ['name', 'bio']}}), (ma.log_deletion, DELETION, str(self.band)), ) for method, flag, message in tests: with self.subTest(name=method.__name__): created = method(mock_request, self.band, message) fetched = LogEntry.objects.filter(action_flag=flag).latest('id') self.assertEqual(created, fetched) self.assertEqual(fetched.action_flag, flag) self.assertEqual(fetched.content_type, content_type) self.assertEqual(fetched.object_id, str(self.band.pk)) self.assertEqual(fetched.user, mock_request.user) if flag == DELETION: self.assertEqual(fetched.change_message, '') self.assertEqual(fetched.object_repr, message) else: self.assertEqual(fetched.change_message, str(message)) self.assertEqual(fetched.object_repr, str(self.band)) def test_get_autocomplete_fields(self): class NameAdmin(ModelAdmin): search_fields = ['name'] class SongAdmin(ModelAdmin): autocomplete_fields = ['featuring'] fields = ['featuring', 'band'] class OtherSongAdmin(SongAdmin): def get_autocomplete_fields(self, request): return ['band'] self.site.register(Band, NameAdmin) try: # Uses autocomplete_fields if not overridden. model_admin = SongAdmin(Song, self.site) form = model_admin.get_form(request)() self.assertIsInstance(form.fields['featuring'].widget.widget, AutocompleteSelectMultiple) # Uses overridden get_autocomplete_fields model_admin = OtherSongAdmin(Song, self.site) form = model_admin.get_form(request)() self.assertIsInstance(form.fields['band'].widget.widget, AutocompleteSelect) finally: self.site.unregister(Band) def test_get_deleted_objects(self): mock_request = MockRequest() mock_request.user = User.objects.create_superuser(username='bob', email='[email protected]', password='test') self.site.register(Band, ModelAdmin) ma = self.site._registry[Band] deletable_objects, model_count, perms_needed, protected = ma.get_deleted_objects([self.band], request) self.assertEqual(deletable_objects, ['Band: The Doors']) self.assertEqual(model_count, {'bands': 1}) self.assertEqual(perms_needed, set()) self.assertEqual(protected, []) def test_get_deleted_objects_with_custom_has_delete_permission(self): """ ModelAdmin.get_deleted_objects() uses ModelAdmin.has_delete_permission() for permissions checking. """ mock_request = MockRequest() mock_request.user = User.objects.create_superuser(username='bob', email='[email protected]', password='test') class TestModelAdmin(ModelAdmin): def has_delete_permission(self, request, obj=None): return False self.site.register(Band, TestModelAdmin) ma = self.site._registry[Band] deletable_objects, model_count, perms_needed, protected = ma.get_deleted_objects([self.band], request) self.assertEqual(deletable_objects, ['Band: The Doors']) self.assertEqual(model_count, {'bands': 1}) self.assertEqual(perms_needed, {'band'}) self.assertEqual(protected, []) class ModelAdminPermissionTests(SimpleTestCase): class MockUser: def has_module_perms(self, app_label): return app_label == 'modeladmin' class MockViewUser(MockUser): def has_perm(self, perm, obj=None): return perm == 'modeladmin.view_band' class MockAddUser(MockUser): def has_perm(self, perm, obj=None): return perm == 'modeladmin.add_band' class MockChangeUser(MockUser): def has_perm(self, perm, obj=None): return perm == 'modeladmin.change_band' class MockDeleteUser(MockUser): def has_perm(self, perm, obj=None): return perm == 'modeladmin.delete_band' def test_has_view_permission(self): """ has_view_permission() returns True for users who can view objects and False for users who can't. """ ma = ModelAdmin(Band, AdminSite()) request = MockRequest() request.user = self.MockViewUser() self.assertIs(ma.has_view_permission(request), True) request.user = self.MockAddUser() self.assertIs(ma.has_view_permission(request), False) request.user = self.MockChangeUser() self.assertIs(ma.has_view_permission(request), True) request.user = self.MockDeleteUser() self.assertIs(ma.has_view_permission(request), False) def test_has_add_permission(self): """ has_add_permission returns True for users who can add objects and False for users who can't. """ ma = ModelAdmin(Band, AdminSite()) request = MockRequest() request.user = self.MockViewUser() self.assertFalse(ma.has_add_permission(request)) request.user = self.MockAddUser() self.assertTrue(ma.has_add_permission(request)) request.user = self.MockChangeUser() self.assertFalse(ma.has_add_permission(request)) request.user = self.MockDeleteUser() self.assertFalse(ma.has_add_permission(request)) def test_inline_has_add_permission_uses_obj(self): class ConcertInline(TabularInline): model = Concert def has_add_permission(self, request, obj): return bool(obj) class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, AdminSite()) request = MockRequest() request.user = self.MockAddUser() self.assertEqual(ma.get_inline_instances(request), []) band = Band(name='The Doors', bio='', sign_date=date(1965, 1, 1)) inline_instances = ma.get_inline_instances(request, band) self.assertEqual(len(inline_instances), 1) self.assertIsInstance(inline_instances[0], ConcertInline) def test_has_change_permission(self): """ has_change_permission returns True for users who can edit objects and False for users who can't. """ ma = ModelAdmin(Band, AdminSite()) request = MockRequest() request.user = self.MockViewUser() self.assertIs(ma.has_change_permission(request), False) request.user = self.MockAddUser() self.assertFalse(ma.has_change_permission(request)) request.user = self.MockChangeUser() self.assertTrue(ma.has_change_permission(request)) request.user = self.MockDeleteUser() self.assertFalse(ma.has_change_permission(request)) def test_has_delete_permission(self): """ has_delete_permission returns True for users who can delete objects and False for users who can't. """ ma = ModelAdmin(Band, AdminSite()) request = MockRequest() request.user = self.MockViewUser() self.assertIs(ma.has_delete_permission(request), False) request.user = self.MockAddUser() self.assertFalse(ma.has_delete_permission(request)) request.user = self.MockChangeUser() self.assertFalse(ma.has_delete_permission(request)) request.user = self.MockDeleteUser() self.assertTrue(ma.has_delete_permission(request)) def test_has_module_permission(self): """ as_module_permission returns True for users who have any permission for the module and False for users who don't. """ ma = ModelAdmin(Band, AdminSite()) request = MockRequest() request.user = self.MockViewUser() self.assertIs(ma.has_module_permission(request), True) request.user = self.MockAddUser() self.assertTrue(ma.has_module_permission(request)) request.user = self.MockChangeUser() self.assertTrue(ma.has_module_permission(request)) request.user = self.MockDeleteUser() self.assertTrue(ma.has_module_permission(request)) original_app_label = ma.opts.app_label ma.opts.app_label = 'anotherapp' try: request.user = self.MockViewUser() self.assertIs(ma.has_module_permission(request), False) request.user = self.MockAddUser() self.assertFalse(ma.has_module_permission(request)) request.user = self.MockChangeUser() self.assertFalse(ma.has_module_permission(request)) request.user = self.MockDeleteUser() self.assertFalse(ma.has_module_permission(request)) finally: ma.opts.app_label = original_app_label
33ea1d2539f478ed0f5ef4b5853680bb4af15100fb51f224547b10c519fbf101
""" Tests for django test runner """ import unittest from unittest import mock from admin_scripts.tests import AdminScriptTestCase from django import db from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management import call_command from django.core.management.base import SystemCheckError from django.test import TransactionTestCase, skipUnlessDBFeature, testcases from django.test.runner import DiscoverRunner from django.test.testcases import connections_support_transactions from django.test.utils import captured_stderr, dependency_ordered from .models import B, Person, Through class DependencyOrderingTests(unittest.TestCase): def test_simple_dependencies(self): raw = [ ('s1', ('s1_db', ['alpha'])), ('s2', ('s2_db', ['bravo'])), ('s3', ('s3_db', ['charlie'])), ] dependencies = { 'alpha': ['charlie'], 'bravo': ['charlie'], } ordered = dependency_ordered(raw, dependencies=dependencies) ordered_sigs = [sig for sig, value in ordered] self.assertIn('s1', ordered_sigs) self.assertIn('s2', ordered_sigs) self.assertIn('s3', ordered_sigs) self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1')) self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2')) def test_chained_dependencies(self): raw = [ ('s1', ('s1_db', ['alpha'])), ('s2', ('s2_db', ['bravo'])), ('s3', ('s3_db', ['charlie'])), ] dependencies = { 'alpha': ['bravo'], 'bravo': ['charlie'], } ordered = dependency_ordered(raw, dependencies=dependencies) ordered_sigs = [sig for sig, value in ordered] self.assertIn('s1', ordered_sigs) self.assertIn('s2', ordered_sigs) self.assertIn('s3', ordered_sigs) # Explicit dependencies self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1')) self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2')) # Implied dependencies self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1')) def test_multiple_dependencies(self): raw = [ ('s1', ('s1_db', ['alpha'])), ('s2', ('s2_db', ['bravo'])), ('s3', ('s3_db', ['charlie'])), ('s4', ('s4_db', ['delta'])), ] dependencies = { 'alpha': ['bravo', 'delta'], 'bravo': ['charlie'], 'delta': ['charlie'], } ordered = dependency_ordered(raw, dependencies=dependencies) ordered_sigs = [sig for sig, aliases in ordered] self.assertIn('s1', ordered_sigs) self.assertIn('s2', ordered_sigs) self.assertIn('s3', ordered_sigs) self.assertIn('s4', ordered_sigs) # Explicit dependencies self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1')) self.assertLess(ordered_sigs.index('s4'), ordered_sigs.index('s1')) self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2')) self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s4')) # Implicit dependencies self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1')) def test_circular_dependencies(self): raw = [ ('s1', ('s1_db', ['alpha'])), ('s2', ('s2_db', ['bravo'])), ] dependencies = { 'bravo': ['alpha'], 'alpha': ['bravo'], } with self.assertRaises(ImproperlyConfigured): dependency_ordered(raw, dependencies=dependencies) def test_own_alias_dependency(self): raw = [ ('s1', ('s1_db', ['alpha', 'bravo'])) ] dependencies = { 'alpha': ['bravo'] } with self.assertRaises(ImproperlyConfigured): dependency_ordered(raw, dependencies=dependencies) # reordering aliases shouldn't matter raw = [ ('s1', ('s1_db', ['bravo', 'alpha'])) ] with self.assertRaises(ImproperlyConfigured): dependency_ordered(raw, dependencies=dependencies) class MockTestRunner: def __init__(self, *args, **kwargs): pass MockTestRunner.run_tests = mock.Mock(return_value=[]) class ManageCommandTests(unittest.TestCase): def test_custom_test_runner(self): call_command('test', 'sites', testrunner='test_runner.tests.MockTestRunner') MockTestRunner.run_tests.assert_called_with(('sites',)) def test_bad_test_runner(self): with self.assertRaises(AttributeError): call_command('test', 'sites', testrunner='test_runner.NonexistentRunner') def test_time_recorded(self): with captured_stderr() as stderr: call_command('test', '--timing', 'sites', testrunner='test_runner.tests.MockTestRunner') self.assertIn('Total run took', stderr.getvalue()) class CustomTestRunnerOptionsSettingsTests(AdminScriptTestCase): """ Custom runners can add command line arguments. The runner is specified through a settings file. """ def setUp(self): super().setUp() settings = { 'TEST_RUNNER': '\'test_runner.runner.CustomOptionsTestRunner\'', } self.write_settings('settings.py', sdict=settings) def test_default_options(self): args = ['test', '--settings=test_project.settings'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, '1:2:3') def test_default_and_given_options(self): args = ['test', '--settings=test_project.settings', '--option_b=foo'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, '1:foo:3') def test_option_name_and_value_separated(self): args = ['test', '--settings=test_project.settings', '--option_b', 'foo'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, '1:foo:3') def test_all_options_given(self): args = ['test', '--settings=test_project.settings', '--option_a=bar', '--option_b=foo', '--option_c=31337'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, 'bar:foo:31337') class CustomTestRunnerOptionsCmdlineTests(AdminScriptTestCase): """ Custom runners can add command line arguments when the runner is specified using --testrunner. """ def setUp(self): super().setUp() self.write_settings('settings.py') def test_testrunner_option(self): args = [ 'test', '--testrunner', 'test_runner.runner.CustomOptionsTestRunner', '--option_a=bar', '--option_b=foo', '--option_c=31337' ] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, 'bar:foo:31337') def test_testrunner_equals(self): args = [ 'test', '--testrunner=test_runner.runner.CustomOptionsTestRunner', '--option_a=bar', '--option_b=foo', '--option_c=31337' ] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, 'bar:foo:31337') def test_no_testrunner(self): args = ['test', '--testrunner'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertIn('usage', err) self.assertNotIn('Traceback', err) self.assertNoOutput(out) class Ticket17477RegressionTests(AdminScriptTestCase): def setUp(self): super().setUp() self.write_settings('settings.py') def test_ticket_17477(self): """'manage.py help test' works after r16352.""" args = ['help', 'test'] out, err = self.run_manage(args) self.assertNoOutput(err) class SQLiteInMemoryTestDbs(TransactionTestCase): available_apps = ['test_runner'] databases = {'default', 'other'} @unittest.skipUnless(all(db.connections[conn].vendor == 'sqlite' for conn in db.connections), "This is an sqlite-specific issue") def test_transaction_support(self): # Assert connections mocking is appropriately applied by preventing # any attempts at calling create_test_db on the global connection # objects. for connection in db.connections.all(): create_test_db = mock.patch.object( connection.creation, 'create_test_db', side_effect=AssertionError("Global connection object shouldn't be manipulated.") ) create_test_db.start() self.addCleanup(create_test_db.stop) for option_key, option_value in ( ('NAME', ':memory:'), ('TEST', {'NAME': ':memory:'})): tested_connections = db.ConnectionHandler({ 'default': { 'ENGINE': 'django.db.backends.sqlite3', option_key: option_value, }, 'other': { 'ENGINE': 'django.db.backends.sqlite3', option_key: option_value, }, }) with mock.patch('django.test.utils.connections', new=tested_connections): other = tested_connections['other'] DiscoverRunner(verbosity=0).setup_databases() msg = ( "DATABASES setting '%s' option set to sqlite3's ':memory:' value " "shouldn't interfere with transaction support detection." % option_key ) # Transaction support is properly initialized for the 'other' DB. self.assertTrue(other.features.supports_transactions, msg) # And all the DBs report that they support transactions. self.assertTrue(connections_support_transactions(), msg) class DummyBackendTest(unittest.TestCase): def test_setup_databases(self): """ setup_databases() doesn't fail with dummy database backend. """ tested_connections = db.ConnectionHandler({}) with mock.patch('django.test.utils.connections', new=tested_connections): runner_instance = DiscoverRunner(verbosity=0) old_config = runner_instance.setup_databases() runner_instance.teardown_databases(old_config) class AliasedDefaultTestSetupTest(unittest.TestCase): def test_setup_aliased_default_database(self): """ setup_databases() doesn't fail when 'default' is aliased """ tested_connections = db.ConnectionHandler({ 'default': { 'NAME': 'dummy' }, 'aliased': { 'NAME': 'dummy' } }) with mock.patch('django.test.utils.connections', new=tested_connections): runner_instance = DiscoverRunner(verbosity=0) old_config = runner_instance.setup_databases() runner_instance.teardown_databases(old_config) class SetupDatabasesTests(unittest.TestCase): def setUp(self): self.runner_instance = DiscoverRunner(verbosity=0) def test_setup_aliased_databases(self): tested_connections = db.ConnectionHandler({ 'default': { 'ENGINE': 'django.db.backends.dummy', 'NAME': 'dbname', }, 'other': { 'ENGINE': 'django.db.backends.dummy', 'NAME': 'dbname', } }) with mock.patch('django.db.backends.dummy.base.DatabaseWrapper.creation_class') as mocked_db_creation: with mock.patch('django.test.utils.connections', new=tested_connections): old_config = self.runner_instance.setup_databases() self.runner_instance.teardown_databases(old_config) mocked_db_creation.return_value.destroy_test_db.assert_called_once_with('dbname', 0, False) def test_destroy_test_db_restores_db_name(self): tested_connections = db.ConnectionHandler({ 'default': { 'ENGINE': settings.DATABASES[db.DEFAULT_DB_ALIAS]["ENGINE"], 'NAME': 'xxx_test_database', }, }) # Using the real current name as old_name to not mess with the test suite. old_name = settings.DATABASES[db.DEFAULT_DB_ALIAS]["NAME"] with mock.patch('django.db.connections', new=tested_connections): tested_connections['default'].creation.destroy_test_db(old_name, verbosity=0, keepdb=True) self.assertEqual(tested_connections['default'].settings_dict["NAME"], old_name) def test_serialization(self): tested_connections = db.ConnectionHandler({ 'default': { 'ENGINE': 'django.db.backends.dummy', }, }) with mock.patch('django.db.backends.dummy.base.DatabaseWrapper.creation_class') as mocked_db_creation: with mock.patch('django.test.utils.connections', new=tested_connections): self.runner_instance.setup_databases() mocked_db_creation.return_value.create_test_db.assert_called_once_with( verbosity=0, autoclobber=False, serialize=True, keepdb=False ) def test_serialized_off(self): tested_connections = db.ConnectionHandler({ 'default': { 'ENGINE': 'django.db.backends.dummy', 'TEST': {'SERIALIZE': False}, }, }) with mock.patch('django.db.backends.dummy.base.DatabaseWrapper.creation_class') as mocked_db_creation: with mock.patch('django.test.utils.connections', new=tested_connections): self.runner_instance.setup_databases() mocked_db_creation.return_value.create_test_db.assert_called_once_with( verbosity=0, autoclobber=False, serialize=False, keepdb=False ) @skipUnlessDBFeature('supports_sequence_reset') class AutoIncrementResetTest(TransactionTestCase): """ Creating the same models in different test methods receive the same PK values since the sequences are reset before each test method. """ available_apps = ['test_runner'] reset_sequences = True def _test(self): # Regular model p = Person.objects.create(first_name='Jack', last_name='Smith') self.assertEqual(p.pk, 1) # Auto-created many-to-many through model p.friends.add(Person.objects.create(first_name='Jacky', last_name='Smith')) self.assertEqual(p.friends.through.objects.first().pk, 1) # Many-to-many through model b = B.objects.create() t = Through.objects.create(person=p, b=b) self.assertEqual(t.pk, 1) def test_autoincrement_reset1(self): self._test() def test_autoincrement_reset2(self): self._test() class EmptyDefaultDatabaseTest(unittest.TestCase): def test_empty_default_database(self): """ An empty default database in settings does not raise an ImproperlyConfigured error when running a unit test that does not use a database. """ testcases.connections = db.ConnectionHandler({'default': {}}) connection = testcases.connections[db.utils.DEFAULT_DB_ALIAS] self.assertEqual(connection.settings_dict['ENGINE'], 'django.db.backends.dummy') connections_support_transactions() class RunTestsExceptionHandlingTests(unittest.TestCase): def test_run_checks_raises(self): """ Teardown functions are run when run_checks() raises SystemCheckError. """ with mock.patch('django.test.runner.DiscoverRunner.setup_test_environment'), \ mock.patch('django.test.runner.DiscoverRunner.setup_databases'), \ mock.patch('django.test.runner.DiscoverRunner.build_suite'), \ mock.patch('django.test.runner.DiscoverRunner.run_checks', side_effect=SystemCheckError), \ mock.patch('django.test.runner.DiscoverRunner.teardown_databases') as teardown_databases, \ mock.patch('django.test.runner.DiscoverRunner.teardown_test_environment') as teardown_test_environment: runner = DiscoverRunner(verbosity=0, interactive=False) with self.assertRaises(SystemCheckError): runner.run_tests(['test_runner_apps.sample.tests_sample.TestDjangoTestCase']) self.assertTrue(teardown_databases.called) self.assertTrue(teardown_test_environment.called) def test_run_checks_raises_and_teardown_raises(self): """ SystemCheckError is surfaced when run_checks() raises SystemCheckError and teardown databases() raises ValueError. """ with mock.patch('django.test.runner.DiscoverRunner.setup_test_environment'), \ mock.patch('django.test.runner.DiscoverRunner.setup_databases'), \ mock.patch('django.test.runner.DiscoverRunner.build_suite'), \ mock.patch('django.test.runner.DiscoverRunner.run_checks', side_effect=SystemCheckError), \ mock.patch('django.test.runner.DiscoverRunner.teardown_databases', side_effect=ValueError) \ as teardown_databases, \ mock.patch('django.test.runner.DiscoverRunner.teardown_test_environment') as teardown_test_environment: runner = DiscoverRunner(verbosity=0, interactive=False) with self.assertRaises(SystemCheckError): runner.run_tests(['test_runner_apps.sample.tests_sample.TestDjangoTestCase']) self.assertTrue(teardown_databases.called) self.assertFalse(teardown_test_environment.called) def test_run_checks_passes_and_teardown_raises(self): """ Exceptions on teardown are surfaced if no exceptions happen during run_checks(). """ with mock.patch('django.test.runner.DiscoverRunner.setup_test_environment'), \ mock.patch('django.test.runner.DiscoverRunner.setup_databases'), \ mock.patch('django.test.runner.DiscoverRunner.build_suite'), \ mock.patch('django.test.runner.DiscoverRunner.run_checks'), \ mock.patch('django.test.runner.DiscoverRunner.teardown_databases', side_effect=ValueError) \ as teardown_databases, \ mock.patch('django.test.runner.DiscoverRunner.teardown_test_environment') as teardown_test_environment: runner = DiscoverRunner(verbosity=0, interactive=False) with self.assertRaises(ValueError): # Suppress the output when running TestDjangoTestCase. with mock.patch('sys.stderr'): runner.run_tests(['test_runner_apps.sample.tests_sample.TestDjangoTestCase']) self.assertTrue(teardown_databases.called) self.assertFalse(teardown_test_environment.called)
72e480d412000545d68a06ee1de2d45abf8a20e8250d2f3fe2895adb32d4a191
import os from argparse import ArgumentParser from contextlib import contextmanager from unittest import ( TestSuite, TextTestRunner, defaultTestLoader, mock, skipUnless, ) from django.db import connections from django.test import SimpleTestCase from django.test.runner import DiscoverRunner from django.test.utils import ( NullTimeKeeper, TimeKeeper, captured_stderr, captured_stdout, ) from django.utils.version import PY37 @contextmanager def change_cwd(directory): current_dir = os.path.abspath(os.path.dirname(__file__)) new_dir = os.path.join(current_dir, directory) old_cwd = os.getcwd() os.chdir(new_dir) try: yield finally: os.chdir(old_cwd) class DiscoverRunnerTests(SimpleTestCase): @staticmethod def get_test_methods_names(suite): return [ t.__class__.__name__ + '.' + t._testMethodName for t in suite._tests ] def test_init_debug_mode(self): runner = DiscoverRunner() self.assertFalse(runner.debug_mode) def test_add_arguments_debug_mode(self): parser = ArgumentParser() DiscoverRunner.add_arguments(parser) ns = parser.parse_args([]) self.assertFalse(ns.debug_mode) ns = parser.parse_args(["--debug-mode"]) self.assertTrue(ns.debug_mode) def test_dotted_test_module(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests_sample'], ).countTestCases() self.assertEqual(count, 4) def test_dotted_test_class_vanilla_unittest(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests_sample.TestVanillaUnittest'], ).countTestCases() self.assertEqual(count, 1) def test_dotted_test_class_django_testcase(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests_sample.TestDjangoTestCase'], ).countTestCases() self.assertEqual(count, 1) def test_dotted_test_method_django_testcase(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests_sample.TestDjangoTestCase.test_sample'], ).countTestCases() self.assertEqual(count, 1) def test_pattern(self): count = DiscoverRunner( pattern="*_tests.py", ).build_suite(['test_runner_apps.sample']).countTestCases() self.assertEqual(count, 1) @skipUnless(PY37, 'unittest -k option requires Python 3.7 and later') def test_name_patterns(self): all_test_1 = [ 'DjangoCase1.test_1', 'DjangoCase2.test_1', 'SimpleCase1.test_1', 'SimpleCase2.test_1', 'UnittestCase1.test_1', 'UnittestCase2.test_1', ] all_test_2 = [ 'DjangoCase1.test_2', 'DjangoCase2.test_2', 'SimpleCase1.test_2', 'SimpleCase2.test_2', 'UnittestCase1.test_2', 'UnittestCase2.test_2', ] all_tests = sorted([*all_test_1, *all_test_2, 'UnittestCase2.test_3_test']) for pattern, expected in [ [['test_1'], all_test_1], [['UnittestCase1'], ['UnittestCase1.test_1', 'UnittestCase1.test_2']], [['*test'], ['UnittestCase2.test_3_test']], [['test*'], all_tests], [['test'], all_tests], [['test_1', 'test_2'], sorted([*all_test_1, *all_test_2])], [['test*1'], all_test_1], ]: with self.subTest(pattern): suite = DiscoverRunner( test_name_patterns=pattern ).build_suite(['test_runner_apps.simple']) self.assertEqual(expected, self.get_test_methods_names(suite)) def test_file_path(self): with change_cwd(".."): count = DiscoverRunner().build_suite( ['test_runner_apps/sample/'], ).countTestCases() self.assertEqual(count, 5) def test_empty_label(self): """ If the test label is empty, discovery should happen on the current working directory. """ with change_cwd("."): suite = DiscoverRunner().build_suite([]) self.assertEqual( suite._tests[0].id().split(".")[0], os.path.basename(os.getcwd()), ) def test_empty_test_case(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests_sample.EmptyTestCase'], ).countTestCases() self.assertEqual(count, 0) def test_discovery_on_package(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests'], ).countTestCases() self.assertEqual(count, 1) def test_ignore_adjacent(self): """ When given a dotted path to a module, unittest discovery searches not just the module, but also the directory containing the module. This results in tests from adjacent modules being run when they should not. The discover runner avoids this behavior. """ count = DiscoverRunner().build_suite( ['test_runner_apps.sample.empty'], ).countTestCases() self.assertEqual(count, 0) def test_testcase_ordering(self): with change_cwd(".."): suite = DiscoverRunner().build_suite(['test_runner_apps/sample/']) self.assertEqual( suite._tests[0].__class__.__name__, 'TestDjangoTestCase', msg="TestDjangoTestCase should be the first test case") self.assertEqual( suite._tests[1].__class__.__name__, 'TestZimpleTestCase', msg="TestZimpleTestCase should be the second test case") # All others can follow in unspecified order, including doctests self.assertIn('DocTestCase', [t.__class__.__name__ for t in suite._tests[2:]]) def test_duplicates_ignored(self): """ Tests shouldn't be discovered twice when discovering on overlapping paths. """ base_app = 'forms_tests' sub_app = 'forms_tests.field_tests' with self.modify_settings(INSTALLED_APPS={'append': sub_app}): single = DiscoverRunner().build_suite([base_app]).countTestCases() dups = DiscoverRunner().build_suite([base_app, sub_app]).countTestCases() self.assertEqual(single, dups) def test_reverse(self): """ Reverse should reorder tests while maintaining the grouping specified by ``DiscoverRunner.reorder_by``. """ runner = DiscoverRunner(reverse=True) suite = runner.build_suite( test_labels=('test_runner_apps.sample', 'test_runner_apps.simple')) self.assertIn('test_runner_apps.simple', next(iter(suite)).id(), msg="Test labels should be reversed.") suite = runner.build_suite(test_labels=('test_runner_apps.simple',)) suite = tuple(suite) self.assertIn('DjangoCase', suite[0].id(), msg="Test groups should not be reversed.") self.assertIn('SimpleCase', suite[4].id(), msg="Test groups order should be preserved.") self.assertIn('DjangoCase2', suite[0].id(), msg="Django test cases should be reversed.") self.assertIn('SimpleCase2', suite[4].id(), msg="Simple test cases should be reversed.") self.assertIn('UnittestCase2', suite[8].id(), msg="Unittest test cases should be reversed.") self.assertIn('test_2', suite[0].id(), msg="Methods of Django cases should be reversed.") self.assertIn('test_2', suite[4].id(), msg="Methods of simple cases should be reversed.") self.assertIn('test_2', suite[9].id(), msg="Methods of unittest cases should be reversed.") def test_overridable_get_test_runner_kwargs(self): self.assertIsInstance(DiscoverRunner().get_test_runner_kwargs(), dict) def test_overridable_test_suite(self): self.assertEqual(DiscoverRunner().test_suite, TestSuite) def test_overridable_test_runner(self): self.assertEqual(DiscoverRunner().test_runner, TextTestRunner) def test_overridable_test_loader(self): self.assertEqual(DiscoverRunner().test_loader, defaultTestLoader) def test_tags(self): runner = DiscoverRunner(tags=['core']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 1) runner = DiscoverRunner(tags=['fast']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 2) runner = DiscoverRunner(tags=['slow']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 2) def test_exclude_tags(self): runner = DiscoverRunner(tags=['fast'], exclude_tags=['core']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 1) runner = DiscoverRunner(tags=['fast'], exclude_tags=['slow']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 0) runner = DiscoverRunner(exclude_tags=['slow']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 0) def test_tag_inheritance(self): def count_tests(**kwargs): suite = DiscoverRunner(**kwargs).build_suite(['test_runner_apps.tagged.tests_inheritance']) return suite.countTestCases() self.assertEqual(count_tests(tags=['foo']), 4) self.assertEqual(count_tests(tags=['bar']), 2) self.assertEqual(count_tests(tags=['baz']), 2) self.assertEqual(count_tests(tags=['foo'], exclude_tags=['bar']), 2) self.assertEqual(count_tests(tags=['foo'], exclude_tags=['bar', 'baz']), 1) self.assertEqual(count_tests(exclude_tags=['foo']), 0) def test_included_tags_displayed(self): runner = DiscoverRunner(tags=['foo', 'bar'], verbosity=2) with captured_stdout() as stdout: runner.build_suite(['test_runner_apps.tagged.tests']) self.assertIn('Including test tag(s): bar, foo.\n', stdout.getvalue()) def test_excluded_tags_displayed(self): runner = DiscoverRunner(exclude_tags=['foo', 'bar'], verbosity=3) with captured_stdout() as stdout: runner.build_suite(['test_runner_apps.tagged.tests']) self.assertIn('Excluding test tag(s): bar, foo.\n', stdout.getvalue()) def test_pdb_with_parallel(self): msg = ( 'You cannot use --pdb with parallel tests; pass --parallel=1 to ' 'use it.' ) with self.assertRaisesMessage(ValueError, msg): DiscoverRunner(pdb=True, parallel=2) def test_buffer_with_parallel(self): msg = ( 'You cannot use -b/--buffer with parallel tests; pass ' '--parallel=1 to use it.' ) with self.assertRaisesMessage(ValueError, msg): DiscoverRunner(buffer=True, parallel=2) def test_buffer_mode_test_pass(self): runner = DiscoverRunner(buffer=True, verbose=0) with captured_stdout() as stdout, captured_stderr() as stderr: suite = runner.build_suite([ 'test_runner_apps.buffer.tests_buffer.WriteToStdoutStderrTestCase.test_pass', ]) runner.run_suite(suite) self.assertNotIn('Write to stderr.', stderr.getvalue()) self.assertNotIn('Write to stdout.', stdout.getvalue()) def test_buffer_mode_test_fail(self): runner = DiscoverRunner(buffer=True, verbose=0) with captured_stdout() as stdout, captured_stderr() as stderr: suite = runner.build_suite([ 'test_runner_apps.buffer.tests_buffer.WriteToStdoutStderrTestCase.test_fail', ]) runner.run_suite(suite) self.assertIn('Write to stderr.', stderr.getvalue()) self.assertIn('Write to stdout.', stdout.getvalue()) @mock.patch('faulthandler.enable') def test_faulthandler_enabled(self, mocked_enable): with mock.patch('faulthandler.is_enabled', return_value=False): DiscoverRunner(enable_faulthandler=True) mocked_enable.assert_called() @mock.patch('faulthandler.enable') def test_faulthandler_already_enabled(self, mocked_enable): with mock.patch('faulthandler.is_enabled', return_value=True): DiscoverRunner(enable_faulthandler=True) mocked_enable.assert_not_called() @mock.patch('faulthandler.enable') def test_faulthandler_enabled_fileno(self, mocked_enable): # sys.stderr that is not an actual file. with mock.patch('faulthandler.is_enabled', return_value=False), captured_stderr(): DiscoverRunner(enable_faulthandler=True) mocked_enable.assert_called() @mock.patch('faulthandler.enable') def test_faulthandler_disabled(self, mocked_enable): with mock.patch('faulthandler.is_enabled', return_value=False): DiscoverRunner(enable_faulthandler=False) mocked_enable.assert_not_called() def test_timings_not_captured(self): runner = DiscoverRunner(timing=False) with captured_stderr() as stderr: with runner.time_keeper.timed('test'): pass runner.time_keeper.print_results() self.assertTrue(isinstance(runner.time_keeper, NullTimeKeeper)) self.assertNotIn('test', stderr.getvalue()) def test_timings_captured(self): runner = DiscoverRunner(timing=True) with captured_stderr() as stderr: with runner.time_keeper.timed('test'): pass runner.time_keeper.print_results() self.assertTrue(isinstance(runner.time_keeper, TimeKeeper)) self.assertIn('test', stderr.getvalue()) class DiscoverRunnerGetDatabasesTests(SimpleTestCase): runner = DiscoverRunner(verbosity=2) skip_msg = 'Skipping setup of unused database(s): ' def get_databases(self, test_labels): suite = self.runner.build_suite(test_labels) with captured_stdout() as stdout: databases = self.runner.get_databases(suite) return databases, stdout.getvalue() def assertSkippedDatabases(self, test_labels, expected_databases): databases, output = self.get_databases(test_labels) self.assertEqual(databases, expected_databases) skipped_databases = set(connections) - expected_databases if skipped_databases: self.assertIn(self.skip_msg + ', '.join(sorted(skipped_databases)), output) else: self.assertNotIn(self.skip_msg, output) def test_mixed(self): databases, output = self.get_databases(['test_runner_apps.databases.tests']) self.assertEqual(databases, set(connections)) self.assertNotIn(self.skip_msg, output) def test_all(self): databases, output = self.get_databases(['test_runner_apps.databases.tests.AllDatabasesTests']) self.assertEqual(databases, set(connections)) self.assertNotIn(self.skip_msg, output) def test_default_and_other(self): self.assertSkippedDatabases([ 'test_runner_apps.databases.tests.DefaultDatabaseTests', 'test_runner_apps.databases.tests.OtherDatabaseTests', ], {'default', 'other'}) def test_default_only(self): self.assertSkippedDatabases([ 'test_runner_apps.databases.tests.DefaultDatabaseTests', ], {'default'}) def test_other_only(self): self.assertSkippedDatabases([ 'test_runner_apps.databases.tests.OtherDatabaseTests' ], {'other'}) def test_no_databases_required(self): self.assertSkippedDatabases([ 'test_runner_apps.databases.tests.NoDatabaseTests' ], set())
260adaa1c927e80a69736c22c27d19cf4532891d2c7038585bdb114f5567196c
from datetime import datetime from decimal import Decimal from django import forms from django.conf import settings from django.contrib.admin import helpers from django.contrib.admin.utils import ( NestedObjects, display_for_field, display_for_value, flatten, flatten_fieldsets, help_text_for_field, label_for_field, lookup_field, quote, ) from django.db import DEFAULT_DB_ALIAS, models from django.test import SimpleTestCase, TestCase, override_settings from django.utils.formats import localize from django.utils.safestring import mark_safe from .models import ( Article, Car, Count, Event, EventGuide, Location, Site, Vehicle, ) class NestedObjectsTests(TestCase): """ Tests for ``NestedObject`` utility collection. """ @classmethod def setUpTestData(cls): cls.n = NestedObjects(using=DEFAULT_DB_ALIAS) cls.objs = [Count.objects.create(num=i) for i in range(5)] def _check(self, target): self.assertEqual(self.n.nested(lambda obj: obj.num), target) def _connect(self, i, j): self.objs[i].parent = self.objs[j] self.objs[i].save() def _collect(self, *indices): self.n.collect([self.objs[i] for i in indices]) def test_unrelated_roots(self): self._connect(2, 1) self._collect(0) self._collect(1) self._check([0, 1, [2]]) def test_siblings(self): self._connect(1, 0) self._connect(2, 0) self._collect(0) self._check([0, [1, 2]]) def test_non_added_parent(self): self._connect(0, 1) self._collect(0) self._check([0]) def test_cyclic(self): self._connect(0, 2) self._connect(1, 0) self._connect(2, 1) self._collect(0) self._check([0, [1, [2]]]) def test_queries(self): self._connect(1, 0) self._connect(2, 0) # 1 query to fetch all children of 0 (1 and 2) # 1 query to fetch all children of 1 and 2 (none) # Should not require additional queries to populate the nested graph. self.assertNumQueries(2, self._collect, 0) def test_on_delete_do_nothing(self): """ The nested collector doesn't query for DO_NOTHING objects. """ n = NestedObjects(using=DEFAULT_DB_ALIAS) objs = [Event.objects.create()] EventGuide.objects.create(event=objs[0]) with self.assertNumQueries(2): # One for Location, one for Guest, and no query for EventGuide n.collect(objs) def test_relation_on_abstract(self): """ NestedObjects.collect() doesn't trip (AttributeError) on the special notation for relations on abstract models (related_name that contains %(app_label)s and/or %(class)s) (#21846). """ n = NestedObjects(using=DEFAULT_DB_ALIAS) Car.objects.create() n.collect([Vehicle.objects.first()]) class UtilsTests(SimpleTestCase): empty_value = '-empty-' def test_values_from_lookup_field(self): """ Regression test for #12654: lookup_field """ SITE_NAME = 'example.com' TITLE_TEXT = 'Some title' CREATED_DATE = datetime.min ADMIN_METHOD = 'admin method' SIMPLE_FUNCTION = 'function' INSTANCE_ATTRIBUTE = 'attr' class MockModelAdmin: def get_admin_value(self, obj): return ADMIN_METHOD def simple_function(obj): return SIMPLE_FUNCTION site_obj = Site(domain=SITE_NAME) article = Article( site=site_obj, title=TITLE_TEXT, created=CREATED_DATE, ) article.non_field = INSTANCE_ATTRIBUTE verifications = ( ('site', SITE_NAME), ('created', localize(CREATED_DATE)), ('title', TITLE_TEXT), ('get_admin_value', ADMIN_METHOD), (simple_function, SIMPLE_FUNCTION), ('test_from_model', article.test_from_model()), ('non_field', INSTANCE_ATTRIBUTE) ) mock_admin = MockModelAdmin() for name, value in verifications: field, attr, resolved_value = lookup_field(name, article, mock_admin) if field is not None: resolved_value = display_for_field(resolved_value, field, self.empty_value) self.assertEqual(value, resolved_value) def test_null_display_for_field(self): """ Regression test for #12550: display_for_field should handle None value. """ display_value = display_for_field(None, models.CharField(), self.empty_value) self.assertEqual(display_value, self.empty_value) display_value = display_for_field(None, models.CharField( choices=( (None, "test_none"), ) ), self.empty_value) self.assertEqual(display_value, "test_none") display_value = display_for_field(None, models.DateField(), self.empty_value) self.assertEqual(display_value, self.empty_value) display_value = display_for_field(None, models.TimeField(), self.empty_value) self.assertEqual(display_value, self.empty_value) # Regression test for #13071: NullBooleanField has special # handling. display_value = display_for_field(None, models.NullBooleanField(), self.empty_value) expected = '<img src="%sadmin/img/icon-unknown.svg" alt="None">' % settings.STATIC_URL self.assertHTMLEqual(display_value, expected) display_value = display_for_field(None, models.BooleanField(null=True), self.empty_value) expected = '<img src="%sadmin/img/icon-unknown.svg" alt="None" />' % settings.STATIC_URL self.assertHTMLEqual(display_value, expected) display_value = display_for_field(None, models.DecimalField(), self.empty_value) self.assertEqual(display_value, self.empty_value) display_value = display_for_field(None, models.FloatField(), self.empty_value) self.assertEqual(display_value, self.empty_value) display_value = display_for_field(None, models.JSONField(), self.empty_value) self.assertEqual(display_value, self.empty_value) def test_json_display_for_field(self): tests = [ ({'a': {'b': 'c'}}, '{"a": {"b": "c"}}'), (['a', 'b'], '["a", "b"]'), ('a', '"a"'), ({'a': '你好 世界'}, '{"a": "你好 世界"}'), ({('a', 'b'): 'c'}, "{('a', 'b'): 'c'}"), # Invalid JSON. ] for value, display_value in tests: with self.subTest(value=value): self.assertEqual( display_for_field(value, models.JSONField(), self.empty_value), display_value, ) def test_number_formats_display_for_field(self): display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value) self.assertEqual(display_value, '12345.6789') display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value) self.assertEqual(display_value, '12345.6789') display_value = display_for_field(12345, models.IntegerField(), self.empty_value) self.assertEqual(display_value, '12345') @override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True) def test_number_formats_with_thousand_separator_display_for_field(self): display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value) self.assertEqual(display_value, '12,345.6789') display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value) self.assertEqual(display_value, '12,345.6789') display_value = display_for_field(12345, models.IntegerField(), self.empty_value) self.assertEqual(display_value, '12,345') def test_list_display_for_value(self): display_value = display_for_value([1, 2, 3], self.empty_value) self.assertEqual(display_value, '1, 2, 3') display_value = display_for_value([1, 2, 'buckle', 'my', 'shoe'], self.empty_value) self.assertEqual(display_value, '1, 2, buckle, my, shoe') @override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True) def test_list_display_for_value_boolean(self): self.assertEqual( display_for_value(True, '', boolean=True), '<img src="/static/admin/img/icon-yes.svg" alt="True">' ) self.assertEqual( display_for_value(False, '', boolean=True), '<img src="/static/admin/img/icon-no.svg" alt="False">' ) self.assertEqual(display_for_value(True, ''), 'True') self.assertEqual(display_for_value(False, ''), 'False') def test_label_for_field(self): """ Tests for label_for_field """ self.assertEqual( label_for_field("title", Article), "title" ) self.assertEqual( label_for_field("hist", Article), "History" ) self.assertEqual( label_for_field("hist", Article, return_attr=True), ("History", None) ) self.assertEqual( label_for_field("__str__", Article), "article" ) with self.assertRaisesMessage(AttributeError, "Unable to lookup 'unknown' on Article"): label_for_field("unknown", Article) def test_callable(obj): return "nothing" self.assertEqual( label_for_field(test_callable, Article), "Test callable" ) self.assertEqual( label_for_field(test_callable, Article, return_attr=True), ("Test callable", test_callable) ) self.assertEqual( label_for_field("test_from_model", Article), "Test from model" ) self.assertEqual( label_for_field("test_from_model", Article, return_attr=True), ("Test from model", Article.test_from_model) ) self.assertEqual( label_for_field("test_from_model_with_override", Article), "not What you Expect" ) self.assertEqual( label_for_field(lambda x: "nothing", Article), "--" ) self.assertEqual(label_for_field('site_id', Article), 'Site id') class MockModelAdmin: def test_from_model(self, obj): return "nothing" test_from_model.short_description = "not Really the Model" self.assertEqual( label_for_field("test_from_model", Article, model_admin=MockModelAdmin), "not Really the Model" ) self.assertEqual( label_for_field("test_from_model", Article, model_admin=MockModelAdmin, return_attr=True), ("not Really the Model", MockModelAdmin.test_from_model) ) def test_label_for_field_form_argument(self): class ArticleForm(forms.ModelForm): extra_form_field = forms.BooleanField() class Meta: fields = '__all__' model = Article self.assertEqual( label_for_field('extra_form_field', Article, form=ArticleForm()), 'Extra form field' ) msg = "Unable to lookup 'nonexistent' on Article or ArticleForm" with self.assertRaisesMessage(AttributeError, msg): label_for_field('nonexistent', Article, form=ArticleForm()), def test_label_for_property(self): # NOTE: cannot use @property decorator, because of # AttributeError: 'property' object has no attribute 'short_description' class MockModelAdmin: def my_property(self): return "this if from property" my_property.short_description = 'property short description' test_from_property = property(my_property) self.assertEqual( label_for_field("test_from_property", Article, model_admin=MockModelAdmin), 'property short description' ) def test_help_text_for_field(self): tests = [ ('article', ''), ('unknown', ''), ('hist', 'History help text'), ] for name, help_text in tests: with self.subTest(name=name): self.assertEqual(help_text_for_field(name, Article), help_text) def test_related_name(self): """ Regression test for #13963 """ self.assertEqual( label_for_field('location', Event, return_attr=True), ('location', None), ) self.assertEqual( label_for_field('event', Location, return_attr=True), ('awesome event', None), ) self.assertEqual( label_for_field('guest', Event, return_attr=True), ('awesome guest', None), ) def test_safestring_in_field_label(self): # safestring should not be escaped class MyForm(forms.Form): text = forms.CharField(label=mark_safe('<i>text</i>')) cb = forms.BooleanField(label=mark_safe('<i>cb</i>')) form = MyForm() self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(), '<label for="id_text" class="required inline"><i>text</i>:</label>') self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(), '<label for="id_cb" class="vCheckboxLabel required inline"><i>cb</i></label>') # normal strings needs to be escaped class MyForm(forms.Form): text = forms.CharField(label='&text') cb = forms.BooleanField(label='&cb') form = MyForm() self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(), '<label for="id_text" class="required inline">&amp;text:</label>') self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(), '<label for="id_cb" class="vCheckboxLabel required inline">&amp;cb</label>') def test_flatten(self): flat_all = ['url', 'title', 'content', 'sites'] inputs = ( ((), []), (('url', 'title', ('content', 'sites')), flat_all), (('url', 'title', 'content', 'sites'), flat_all), ((('url', 'title'), ('content', 'sites')), flat_all) ) for orig, expected in inputs: self.assertEqual(flatten(orig), expected) def test_flatten_fieldsets(self): """ Regression test for #18051 """ fieldsets = ( (None, { 'fields': ('url', 'title', ('content', 'sites')) }), ) self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites']) fieldsets = ( (None, { 'fields': ('url', 'title', ['content', 'sites']) }), ) self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites']) def test_quote(self): self.assertEqual(quote('something\nor\nother'), 'something_0Aor_0Aother')
e9b3fb4024120e1b4cc2420938701a43e8d2b2ef36487ab9f6526c0fb1b89647
import datetime import pickle import sys import unittest from operator import attrgetter from threading import Lock from django.core.exceptions import EmptyResultSet, FieldError from django.db import DEFAULT_DB_ALIAS, connection from django.db.models import Count, Exists, F, OuterRef, Q from django.db.models.expressions import RawSQL from django.db.models.sql.constants import LOUTER from django.db.models.sql.where import NothingNode, WhereNode from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import CaptureQueriesContext, ignore_warnings from django.utils.deprecation import RemovedInDjango40Warning from .models import ( FK1, Annotation, Article, Author, BaseA, Book, CategoryItem, CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA, Classroom, CommonMixedCaseForeignKeys, Company, Cover, CustomPk, CustomPkTag, DateTimePK, Detail, DumbCategory, Eaten, Employment, ExtraInfo, Fan, Food, Identifier, Individual, Item, Job, JobResponsibilities, Join, LeafA, LeafB, LoopX, LoopZ, ManagedModel, Member, MixedCaseDbColumnCategoryItem, MixedCaseFieldCategoryItem, ModelA, ModelB, ModelC, ModelD, MyObject, NamedCategory, Node, Note, NullableName, Number, ObjectA, ObjectB, ObjectC, OneToOneCategory, Order, OrderItem, Page, Paragraph, Person, Plaything, PointerA, Program, ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking, Related, RelatedIndividual, RelatedObject, Report, ReportComment, ReservedName, Responsibility, School, SharedConnection, SimpleCategory, SingleObject, SpecialCategory, Staff, StaffUser, Student, Tag, Task, Teacher, Ticket21203Child, Ticket21203Parent, Ticket23605A, Ticket23605B, Ticket23605C, TvChef, Valid, X, ) class Queries1Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic) cls.t3 = Tag.objects.create(name='t3', parent=cls.t1) t4 = Tag.objects.create(name='t4', parent=cls.t3) cls.t5 = Tag.objects.create(name='t5', parent=cls.t3) cls.n1 = Note.objects.create(note='n1', misc='foo', id=1) cls.n2 = Note.objects.create(note='n2', misc='bar', id=2) cls.n3 = Note.objects.create(note='n3', misc='foo', id=3, negate=False) ann1 = Annotation.objects.create(name='a1', tag=cls.t1) ann1.notes.add(cls.n1) ann2 = Annotation.objects.create(name='a2', tag=t4) ann2.notes.add(cls.n2, cls.n3) # Create these out of order so that sorting by 'id' will be different to sorting # by 'info'. Helps detect some problems later. cls.e2 = ExtraInfo.objects.create(info='e2', note=cls.n2, value=41, filterable=False) e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42) cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1) cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1) cls.a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2) cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2) cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0) cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0) time3 = datetime.datetime(2007, 12, 20, 22, 25, 0) time4 = datetime.datetime(2007, 12, 20, 21, 0, 0) cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3) cls.i1.tags.set([cls.t1, cls.t2]) cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=cls.n2) cls.i2.tags.set([cls.t1, cls.t3]) cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3) i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3) i4.tags.set([t4]) cls.r1 = Report.objects.create(name='r1', creator=cls.a1) Report.objects.create(name='r2', creator=cls.a3) Report.objects.create(name='r3') # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering # will be rank3, rank2, rank1. cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2) Cover.objects.create(title="first", item=i4) Cover.objects.create(title="second", item=cls.i2) def test_subquery_condition(self): qs1 = Tag.objects.filter(pk__lte=0) qs2 = Tag.objects.filter(parent__in=qs1) qs3 = Tag.objects.filter(parent__in=qs2) self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'}) self.assertIn('v0', str(qs3.query).lower()) qs4 = qs3.filter(parent__in=qs1) self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'}) # It is possible to reuse U for the second subquery, no need to use W. self.assertNotIn('w0', str(qs4.query).lower()) # So, 'U0."id"' is referenced in SELECT and WHERE twice. self.assertEqual(str(qs4.query).lower().count('u0.'), 4) def test_ticket1050(self): self.assertQuerysetEqual( Item.objects.filter(tags__isnull=True), ['<Item: three>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__id__isnull=True), ['<Item: three>'] ) def test_ticket1801(self): self.assertQuerysetEqual( Author.objects.filter(item=self.i2), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(item=self.i3), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3), ['<Author: a2>'] ) def test_ticket2306(self): # Checking that no join types are "left outer" joins. query = Item.objects.filter(tags=self.t2).query self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()]) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).order_by('name'), ['<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)), ['<Item: one>'] ) # Each filter call is processed "at once" against a single table, so this is # different from the previous example as it tries to find tags that are two # things at once (rather than two tags). self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), [] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)), [] ) qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id) self.assertQuerysetEqual(list(qs), ['<Author: a2>']) self.assertEqual(2, qs.query.count_active_tables(), 2) qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id) self.assertEqual(qs.query.count_active_tables(), 3) def test_ticket4464(self): self.assertQuerysetEqual( Item.objects.filter(tags=self.t1).filter(tags=self.t2), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'), ['<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3), ['<Item: two>'] ) # Make sure .distinct() works with slicing (this was broken in Oracle). self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3], ['<Item: one>', '<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3], ['<Item: one>', '<Item: two>'] ) def test_tickets_2080_3592(self): self.assertQuerysetEqual( Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(item__name='one') | Q(name='a3')), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(name='a3') | Q(item__name='one')), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(item__name='three') | Q(report__name='r3')), ['<Author: a2>'] ) def test_ticket6074(self): # Merging two empty result sets shouldn't leave a queryset with no constraints # (which would match everything). self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), []) self.assertQuerysetEqual( Author.objects.filter(Q(id__in=[]) | Q(id__in=[])), [] ) def test_tickets_1878_2939(self): self.assertEqual(Item.objects.values('creator').distinct().count(), 3) # Create something with a duplicate 'name' so that we can test multi-column # cases (which require some tricky SQL transformations under the covers). xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1) xx.save() self.assertEqual( Item.objects.exclude(name='two').values('creator', 'name').distinct().count(), 4 ) self.assertEqual( ( Item.objects .exclude(name='two') .extra(select={'foo': '%s'}, select_params=(1,)) .values('creator', 'name', 'foo') .distinct() .count() ), 4 ) self.assertEqual( ( Item.objects .exclude(name='two') .extra(select={'foo': '%s'}, select_params=(1,)) .values('creator', 'name') .distinct() .count() ), 4 ) xx.delete() def test_ticket7323(self): self.assertEqual(Item.objects.values('creator', 'name').count(), 4) def test_ticket2253(self): q1 = Item.objects.order_by('name') q2 = Item.objects.filter(id=self.i1.id) self.assertQuerysetEqual( q1, ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual(q2, ['<Item: one>']) self.assertQuerysetEqual( (q1 | q2).order_by('name'), ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>']) q1 = Item.objects.filter(tags=self.t1) q2 = Item.objects.filter(note=self.n3, tags=self.t2) q3 = Item.objects.filter(creator=self.a4) self.assertQuerysetEqual( ((q1 & q2) | q3).order_by('name'), ['<Item: four>', '<Item: one>'] ) def test_order_by_tables(self): q1 = Item.objects.order_by('name') q2 = Item.objects.filter(id=self.i1.id) list(q2) combined_query = (q1 & q2).order_by('name').query self.assertEqual(len([ t for t in combined_query.alias_map if combined_query.alias_refcount[t] ]), 1) def test_order_by_join_unref(self): """ This test is related to the above one, testing that there aren't old JOINs in the query. """ qs = Celebrity.objects.order_by('greatest_fan__fan_of') self.assertIn('OUTER JOIN', str(qs.query)) qs = qs.order_by('id') self.assertNotIn('OUTER JOIN', str(qs.query)) def test_get_clears_ordering(self): """ get() should clear ordering for optimization purposes. """ with CaptureQueriesContext(connection) as captured_queries: Author.objects.order_by('name').get(pk=self.a1.pk) self.assertNotIn('order by', captured_queries[0]['sql'].lower()) def test_tickets_4088_4306(self): self.assertQuerysetEqual( Report.objects.filter(creator=1001), ['<Report: r1>'] ) self.assertQuerysetEqual( Report.objects.filter(creator__num=1001), ['<Report: r1>'] ) self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), []) self.assertQuerysetEqual( Report.objects.filter(creator__id=self.a1.id), ['<Report: r1>'] ) self.assertQuerysetEqual( Report.objects.filter(creator__name='a1'), ['<Report: r1>'] ) def test_ticket4510(self): self.assertQuerysetEqual( Author.objects.filter(report__name='r1'), ['<Author: a1>'] ) def test_ticket7378(self): self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>']) def test_tickets_5324_6704(self): self.assertQuerysetEqual( Item.objects.filter(tags__name='t4'), ['<Item: four>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4').order_by('name').distinct(), ['<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(), ['<Item: two>', '<Item: three>', '<Item: one>'] ) self.assertQuerysetEqual( Author.objects.exclude(item__name='one').distinct().order_by('name'), ['<Author: a2>', '<Author: a3>', '<Author: a4>'] ) # Excluding across a m2m relation when there is more than one related # object associated was problematic. self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1').order_by('name'), ['<Item: four>', '<Item: three>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'), ['<Item: three>'] ) # Excluding from a relation that cannot be NULL should not use outer joins. query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()]) # Similarly, when one of the joins cannot possibly, ever, involve NULL # values (Author -> ExtraInfo, in the following), it should never be # promoted to a left outer join. So the following query should only # involve one "left outer" join (Author -> Item is 0-to-many). qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)) self.assertEqual( len([ x for x in qs.query.alias_map.values() if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias] ]), 1 ) # The previous changes shouldn't affect nullable foreign key joins. self.assertQuerysetEqual( Tag.objects.filter(parent__isnull=True).order_by('name'), ['<Tag: t1>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent__isnull=True).order_by('name'), ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) def test_ticket2091(self): t = Tag.objects.get(name='t4') self.assertQuerysetEqual( Item.objects.filter(tags__in=[t]), ['<Item: four>'] ) def test_avoid_infinite_loop_on_too_many_subqueries(self): x = Tag.objects.filter(pk=1) local_recursion_limit = sys.getrecursionlimit() // 16 msg = 'Maximum recursion depth exceeded: too many subqueries.' with self.assertRaisesMessage(RecursionError, msg): for i in range(local_recursion_limit + 2): x = Tag.objects.filter(pk__in=x) def test_reasonable_number_of_subq_aliases(self): x = Tag.objects.filter(pk=1) for _ in range(20): x = Tag.objects.filter(pk__in=x) self.assertEqual( x.query.subq_aliases, { 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN', } ) def test_heterogeneous_qs_combination(self): # Combining querysets built on different models should behave in a well-defined # fashion. We raise an error. with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'): Author.objects.all() & Tag.objects.all() with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'): Author.objects.all() | Tag.objects.all() def test_ticket3141(self): self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4) self.assertEqual( Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(), 4 ) def test_ticket2400(self): self.assertQuerysetEqual( Author.objects.filter(item__isnull=True), ['<Author: a3>'] ) self.assertQuerysetEqual( Tag.objects.filter(item__isnull=True), ['<Tag: t5>'] ) def test_ticket2496(self): self.assertQuerysetEqual( Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1], ['<Item: four>'] ) def test_error_raised_on_filter_with_dictionary(self): with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'): Note.objects.filter({'note': 'n1', 'misc': 'foo'}) def test_tickets_2076_7256(self): # Ordering on related tables should be possible, even if the table is # not otherwise involved. self.assertQuerysetEqual( Item.objects.order_by('note__note', 'name'), ['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>'] ) # Ordering on a related field should use the remote model's default # ordering as a final step. self.assertQuerysetEqual( Author.objects.order_by('extra', '-name'), ['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>'] ) # Using remote model default ordering can span multiple models (in this # case, Cover is ordered by Item's default, which uses Note's default). self.assertQuerysetEqual( Cover.objects.all(), ['<Cover: first>', '<Cover: second>'] ) # If the remote model does not have a default ordering, we order by its 'id' # field. self.assertQuerysetEqual( Item.objects.order_by('creator', 'name'), ['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>'] ) # Ordering by a many-valued attribute (e.g. a many-to-many or reverse # ForeignKey) is legal, but the results might not make sense. That # isn't Django's problem. Garbage in, garbage out. self.assertQuerysetEqual( Item.objects.filter(tags__isnull=False).order_by('tags', 'id'), ['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>'] ) # If we replace the default ordering, Django adjusts the required # tables automatically. Item normally requires a join with Note to do # the default ordering, but that isn't needed here. qs = Item.objects.order_by('name') self.assertQuerysetEqual( qs, ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertEqual(len(qs.query.alias_map), 1) def test_tickets_2874_3002(self): qs = Item.objects.select_related().order_by('note__note', 'name') self.assertQuerysetEqual( qs, ['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>'] ) # This is also a good select_related() test because there are multiple # Note entries in the SQL. The two Note items should be different. self.assertEqual(repr(qs[0].note), '<Note: n2>') self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>') def test_ticket3037(self): self.assertQuerysetEqual( Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')), ['<Item: four>'] ) def test_tickets_5321_7070(self): # Ordering columns must be included in the output columns. Note that # this means results that might otherwise be distinct are not (if there # are multiple values in the ordering cols), as in this example. This # isn't a bug; it's a warning to be careful with the selection of # ordering columns. self.assertSequenceEqual( Note.objects.values('misc').distinct().order_by('note', '-misc'), [{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}] ) def test_ticket4358(self): # If you don't pass any fields to values(), relation fields are # returned as "foo_id" keys, not "foo". For consistency, you should be # able to pass "foo_id" in the fields list and have it work, too. We # actually allow both "foo" and "foo_id". # The *_id version is returned by default. self.assertIn('note_id', ExtraInfo.objects.values()[0]) # You can also pass it in explicitly. self.assertSequenceEqual(ExtraInfo.objects.values('note_id'), [{'note_id': 1}, {'note_id': 2}]) # ...or use the field name. self.assertSequenceEqual(ExtraInfo.objects.values('note'), [{'note': 1}, {'note': 2}]) def test_ticket6154(self): # Multiple filter statements are joined using "AND" all the time. self.assertQuerysetEqual( Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)), ['<Author: a1>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id), ['<Author: a1>'] ) def test_ticket6981(self): self.assertQuerysetEqual( Tag.objects.select_related('parent').order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) def test_ticket9926(self): self.assertQuerysetEqual( Tag.objects.select_related("parent", "category").order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.select_related('parent', "parent__category").order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) def test_tickets_6180_6203(self): # Dates with limits and/or counts self.assertEqual(Item.objects.count(), 4) self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1) self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2) self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2) self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0)) def test_tickets_7087_12242(self): # Dates with extra select columns self.assertQuerysetEqual( Item.objects.datetimes('created', 'day').extra(select={'a': 1}), ['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)'] ) self.assertQuerysetEqual( Item.objects.extra(select={'a': 1}).datetimes('created', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)'] ) name = "one" self.assertQuerysetEqual( Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) self.assertQuerysetEqual( Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) def test_ticket7155(self): # Nullable dates self.assertQuerysetEqual( Item.objects.datetimes('modified', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) @ignore_warnings(category=RemovedInDjango40Warning) def test_ticket7098(self): self.assertSequenceEqual( Item.objects.values('note__note').order_by('queries_note.note', 'id'), [{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}] ) def test_order_by_rawsql(self): self.assertSequenceEqual( Item.objects.values('note__note').order_by( RawSQL('queries_note.note', ()), 'id', ), [ {'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}, ], ) def test_order_by_raw_column_alias_warning(self): msg = ( "Passing column raw column aliases to order_by() is deprecated. " "Wrap 'queries_author.name' in a RawSQL expression before " "passing it to order_by()." ) with self.assertRaisesMessage(RemovedInDjango40Warning, msg): Item.objects.values('creator__name').order_by('queries_author.name') def test_ticket7096(self): # Make sure exclude() with multiple conditions continues to work. self.assertQuerysetEqual( Tag.objects.filter(parent=self.t1, name='t3').order_by('name'), ['<Tag: t3>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(), ['<Item: four>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'), ['<Item: four>', '<Item: three>'] ) # More twisted cases, involving nested negations. self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t1', name='one')), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'), ['<Item: two>'] ) self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'), ['<Item: four>', '<Item: one>', '<Item: three>'] ) def test_tickets_7204_7506(self): # Make sure querysets with related fields can be pickled. If this # doesn't crash, it's a Good Thing. pickle.dumps(Item.objects.all()) def test_ticket7813(self): # We should also be able to pickle things that use select_related(). # The only tricky thing here is to ensure that we do the related # selections properly after unpickling. qs = Item.objects.select_related() query = qs.query.get_compiler(qs.db).as_sql()[0] query2 = pickle.loads(pickle.dumps(qs.query)) self.assertEqual( query2.get_compiler(qs.db).as_sql()[0], query ) def test_deferred_load_qs_pickling(self): # Check pickling of deferred-loading querysets qs = Item.objects.defer('name', 'creator') q2 = pickle.loads(pickle.dumps(qs)) self.assertEqual(list(qs), list(q2)) q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL)) self.assertEqual(list(qs), list(q3)) def test_ticket7277(self): self.assertQuerysetEqual( self.n1.annotation_set.filter( Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5) ), ['<Annotation: a1>'] ) def test_tickets_7448_7707(self): # Complex objects should be converted to strings before being used in # lookups. self.assertQuerysetEqual( Item.objects.filter(created__in=[self.time1, self.time2]), ['<Item: one>', '<Item: two>'] ) def test_ticket7235(self): # An EmptyQuerySet should not raise exceptions if it is filtered. Eaten.objects.create(meal='m') q = Eaten.objects.none() with self.assertNumQueries(0): self.assertQuerysetEqual(q.all(), []) self.assertQuerysetEqual(q.filter(meal='m'), []) self.assertQuerysetEqual(q.exclude(meal='m'), []) self.assertQuerysetEqual(q.complex_filter({'pk': 1}), []) self.assertQuerysetEqual(q.select_related('food'), []) self.assertQuerysetEqual(q.annotate(Count('food')), []) self.assertQuerysetEqual(q.order_by('meal', 'food'), []) self.assertQuerysetEqual(q.distinct(), []) self.assertQuerysetEqual( q.extra(select={'foo': "1"}), [] ) self.assertQuerysetEqual(q.reverse(), []) q.query.low_mark = 1 with self.assertRaisesMessage(AssertionError, 'Cannot change a query once a slice has been taken'): q.extra(select={'foo': "1"}) self.assertQuerysetEqual(q.defer('meal'), []) self.assertQuerysetEqual(q.only('meal'), []) def test_ticket7791(self): # There were "issues" when ordering and distinct-ing on fields related # via ForeignKeys. self.assertEqual( len(Note.objects.order_by('extrainfo__info').distinct()), 3 ) # Pickling of QuerySets using datetimes() should work. qs = Item.objects.datetimes('created', 'month') pickle.loads(pickle.dumps(qs)) def test_ticket9997(self): # If a ValuesList or Values queryset is passed as an inner query, we # make sure it's only requesting a single value and use that as the # thing to select. self.assertQuerysetEqual( Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')), ['<Tag: t2>', '<Tag: t3>'] ) # Multi-valued values() and values_list() querysets should raise errors. with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'): Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id')) with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'): Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id')) def test_ticket9985(self): # qs.values_list(...).values(...) combinations should work. self.assertSequenceEqual( Note.objects.values_list("note", flat=True).values("id").order_by("id"), [{'id': 1}, {'id': 2}, {'id': 3}] ) self.assertQuerysetEqual( Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')), ['<Annotation: a1>'] ) def test_ticket10205(self): # When bailing out early because of an empty "__in" filter, we need # to set things up correctly internally so that subqueries can continue properly. self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0) def test_ticket10432(self): # Testing an empty "__in" filter with a generator as the value. def f(): return iter([]) n_obj = Note.objects.all()[0] def g(): yield n_obj.pk self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), []) self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj]) def test_ticket10742(self): # Queries used in an __in clause don't execute subqueries subq = Author.objects.filter(num__lt=3000) qs = Author.objects.filter(pk__in=subq) self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>']) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) subq = Author.objects.filter(num__lt=3000) qs = Author.objects.exclude(pk__in=subq) self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>']) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) subq = Author.objects.filter(num__lt=3000) self.assertQuerysetEqual( Author.objects.filter(Q(pk__in=subq) & Q(name='a1')), ['<Author: a1>'] ) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) def test_ticket7076(self): # Excluding shouldn't eliminate NULL entries. self.assertQuerysetEqual( Item.objects.exclude(modified=self.time1).order_by('name'), ['<Item: four>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent__name=self.t1.name), ['<Tag: t1>', '<Tag: t4>', '<Tag: t5>'] ) def test_ticket7181(self): # Ordering by related tables should accommodate nullable fields (this # test is a little tricky, since NULL ordering is database dependent. # Instead, we just count the number of results). self.assertEqual(len(Tag.objects.order_by('parent__name')), 5) # Empty querysets can be merged with others. self.assertQuerysetEqual( Note.objects.none() | Note.objects.all(), ['<Note: n1>', '<Note: n2>', '<Note: n3>'] ) self.assertQuerysetEqual( Note.objects.all() | Note.objects.none(), ['<Note: n1>', '<Note: n2>', '<Note: n3>'] ) self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), []) self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), []) def test_ticket8439(self): # Complex combinations of conjunctions, disjunctions and nullable # relations. self.assertQuerysetEqual( Author.objects.filter(Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name='xyz')), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(report=self.r1, name='xyz') | Q(item__note__extrainfo=self.e2)), ['<Author: a2>'] ) self.assertQuerysetEqual( Annotation.objects.filter(Q(tag__parent=self.t1) | Q(notes__note='n1', name='a1')), ['<Annotation: a1>'] ) xx = ExtraInfo.objects.create(info='xx', note=self.n3) self.assertQuerysetEqual( Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)), ['<Note: n1>', '<Note: n3>'] ) q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query self.assertEqual( len([x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias]]), 1 ) def test_ticket17429(self): """ Meta.ordering=None works the same as Meta.ordering=[] """ original_ordering = Tag._meta.ordering Tag._meta.ordering = None try: self.assertQuerysetEqual( Tag.objects.all(), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ordered=False ) finally: Tag._meta.ordering = original_ordering def test_exclude(self): self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4'), [repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))]) self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name='t4') | Q(tags__name='t3')), [repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | Q(tags__name='t3')))]) self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name='t4') | ~Q(tags__name='t3')), [repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | ~Q(tags__name='t3')))]) def test_nested_exclude(self): self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))]) def test_double_exclude(self): self.assertQuerysetEqual( Item.objects.filter(Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))]) self.assertQuerysetEqual( Item.objects.filter(Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))]) def test_exclude_in(self): self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name__in=['t4', 't3'])), [repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))]) self.assertQuerysetEqual( Item.objects.filter(Q(tags__name__in=['t4', 't3'])), [repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))]) def test_ticket_10790_1(self): # Querying direct fields with isnull should trim the left outer join. # It also should not create INNER JOIN. q = Tag.objects.filter(parent__isnull=True) self.assertQuerysetEqual(q, ['<Tag: t1>']) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.filter(parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__isnull=True) self.assertQuerysetEqual( q, ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__isnull=False) self.assertQuerysetEqual(q, ['<Tag: t1>']) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertNotIn('INNER JOIN', str(q.query)) def test_ticket_10790_2(self): # Querying across several tables should strip only the last outer join, # while preserving the preceding inner joins. q = Tag.objects.filter(parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) # Querying without isnull should not convert anything to left outer join. q = Tag.objects.filter(parent__parent=self.t1) self.assertQuerysetEqual( q, ['<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) def test_ticket_10790_3(self): # Querying via indirect fields should populate the left outer join q = NamedCategory.objects.filter(tag__isnull=True) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) # join to dumbcategory ptr_id self.assertEqual(str(q.query).count('INNER JOIN'), 1) self.assertQuerysetEqual(q, []) # Querying across several tables should strip only the last join, while # preserving the preceding left outer joins. q = NamedCategory.objects.filter(tag__parent__isnull=True) self.assertEqual(str(q.query).count('INNER JOIN'), 1) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertQuerysetEqual(q, ['<NamedCategory: Generic>']) def test_ticket_10790_4(self): # Querying across m2m field should not strip the m2m table from join. q = Author.objects.filter(item__tags__isnull=True) self.assertQuerysetEqual( q, ['<Author: a2>', '<Author: a3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 2) self.assertNotIn('INNER JOIN', str(q.query)) q = Author.objects.filter(item__tags__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3) self.assertNotIn('INNER JOIN', str(q.query)) def test_ticket_10790_5(self): # Querying with isnull=False across m2m field should not create outer joins q = Author.objects.filter(item__tags__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 2) q = Author.objects.filter(item__tags__parent__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 3) q = Author.objects.filter(item__tags__parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 4) def test_ticket_10790_6(self): # Querying with isnull=True across m2m field should not create inner joins # and strip last outer join q = Author.objects.filter(item__tags__parent__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a2>', '<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 4) self.assertEqual(str(q.query).count('INNER JOIN'), 0) q = Author.objects.filter(item__tags__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3) self.assertEqual(str(q.query).count('INNER JOIN'), 0) def test_ticket_10790_7(self): # Reverse querying with isnull should not strip the join q = Author.objects.filter(item__isnull=True) self.assertQuerysetEqual( q, ['<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q.query).count('INNER JOIN'), 0) q = Author.objects.filter(item__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) def test_ticket_10790_8(self): # Querying with combined q-objects should also strip the left outer join q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1)) self.assertQuerysetEqual( q, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 0) def test_ticket_10790_combine(self): # Combining queries should not re-populate the left outer join q1 = Tag.objects.filter(parent__isnull=True) q2 = Tag.objects.filter(parent__isnull=False) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q1 & q2 self.assertQuerysetEqual(q3, []) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q2 = Tag.objects.filter(parent=self.t1) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q2 | q1 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q1 = Tag.objects.filter(parent__isnull=True) q2 = Tag.objects.filter(parent__parent__isnull=True) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q2 | q1 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) def test_ticket19672(self): self.assertQuerysetEqual( Report.objects.filter(Q(creator__isnull=False) & ~Q(creator__extra__value=41)), ['<Report: r1>'] ) def test_ticket_20250(self): # A negated Q along with an annotated queryset failed in Django 1.4 qs = Author.objects.annotate(Count('item')) qs = qs.filter(~Q(extra__value=0)).order_by('name') self.assertIn('SELECT', str(qs.query)) self.assertQuerysetEqual( qs, ['<Author: a1>', '<Author: a2>', '<Author: a3>', '<Author: a4>'] ) def test_lookup_constraint_fielderror(self): msg = ( "Cannot resolve keyword 'unknown_field' into field. Choices are: " "annotation, category, category_id, children, id, item, " "managedmodel, name, note, parent, parent_id" ) with self.assertRaisesMessage(FieldError, msg): Tag.objects.filter(unknown_field__name='generic') def test_common_mixed_case_foreign_keys(self): """ Valid query should be generated when fields fetched from joined tables include FKs whose names only differ by case. """ c1 = SimpleCategory.objects.create(name='c1') c2 = SimpleCategory.objects.create(name='c2') c3 = SimpleCategory.objects.create(name='c3') category = CategoryItem.objects.create(category=c1) mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(CaTeGoRy=c2) mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(category=c3) CommonMixedCaseForeignKeys.objects.create( category=category, mixed_case_field_category=mixed_case_field_category, mixed_case_db_column_category=mixed_case_db_column_category, ) qs = CommonMixedCaseForeignKeys.objects.values( 'category', 'mixed_case_field_category', 'mixed_case_db_column_category', 'category__category', 'mixed_case_field_category__CaTeGoRy', 'mixed_case_db_column_category__category', ) self.assertTrue(qs.first()) def test_excluded_intermediary_m2m_table_joined(self): self.assertSequenceEqual( Note.objects.filter(~Q(tag__annotation__name=F('note'))), [self.n1, self.n2, self.n3], ) self.assertSequenceEqual( Note.objects.filter(tag__annotation__name='a1').filter(~Q(tag__annotation__name=F('note'))), [], ) def test_field_with_filterable(self): self.assertSequenceEqual( Author.objects.filter(extra=self.e2), [self.a3, self.a4], ) def test_negate_field(self): self.assertSequenceEqual( Note.objects.filter(negate=True), [self.n1, self.n2], ) self.assertSequenceEqual(Note.objects.exclude(negate=True), [self.n3]) class Queries2Tests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=4) Number.objects.create(num=8) Number.objects.create(num=12) def test_ticket4289(self): # A slight variation on the restricting the filtering choices by the # lookup constraints. self.assertQuerysetEqual(Number.objects.filter(num__lt=4), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), []) self.assertQuerysetEqual( Number.objects.filter(num__gt=8, num__lt=13), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)), ['<Number: 8>'] ) def test_ticket12239(self): # Custom lookups are registered to round float values correctly on gte # and lt IntegerField queries. self.assertQuerysetEqual( Number.objects.filter(num__gt=11.9), ['<Number: 12>'] ) self.assertQuerysetEqual(Number.objects.filter(num__gt=12), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), []) self.assertQuerysetEqual( Number.objects.filter(num__lt=12), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lt=12.0), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lt=12.1), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__gte=11.9), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__gte=12), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__gte=12.0), ['<Number: 12>'] ) self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), []) self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), []) self.assertQuerysetEqual( Number.objects.filter(num__lte=11.9), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.0), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.1), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.9), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) def test_ticket7759(self): # Count should work with a partially read result set. count = Number.objects.count() qs = Number.objects.all() def run(): for obj in qs: return qs.count() == count self.assertTrue(run()) class Queries3Tests(TestCase): def test_ticket7107(self): # This shouldn't create an infinite loop. self.assertQuerysetEqual(Valid.objects.all(), []) def test_ticket8683(self): # An error should be raised when QuerySet.datetimes() is passed the # wrong type of field. with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."): Item.objects.datetimes('name', 'month') def test_ticket22023(self): with self.assertRaisesMessage(TypeError, "Cannot call only() after .values() or .values_list()"): Valid.objects.values().only() with self.assertRaisesMessage(TypeError, "Cannot call defer() after .values() or .values_list()"): Valid.objects.values().defer() class Queries4Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) n1 = Note.objects.create(note='n1', misc='foo') n2 = Note.objects.create(note='n2', misc='bar') e1 = ExtraInfo.objects.create(info='e1', note=n1) e2 = ExtraInfo.objects.create(info='e2', note=n2) cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1) cls.a3 = Author.objects.create(name='a3', num=3003, extra=e2) cls.r1 = Report.objects.create(name='r1', creator=cls.a1) cls.r2 = Report.objects.create(name='r2', creator=cls.a3) cls.r3 = Report.objects.create(name='r3') Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=cls.a1) Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=cls.a3) def test_ticket24525(self): tag = Tag.objects.create() anth100 = tag.note_set.create(note='ANTH', misc='100') math101 = tag.note_set.create(note='MATH', misc='101') s1 = tag.annotation_set.create(name='1') s2 = tag.annotation_set.create(name='2') s1.notes.set([math101, anth100]) s2.notes.set([math101]) result = math101.annotation_set.all() & tag.annotation_set.exclude(notes__in=[anth100]) self.assertEqual(list(result), [s2]) def test_ticket11811(self): unsaved_category = NamedCategory(name="Other") msg = 'Unsaved model instance <NamedCategory: Other> cannot be used in an ORM query.' with self.assertRaisesMessage(ValueError, msg): Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category) def test_ticket14876(self): # Note: when combining the query we need to have information available # about the join type of the trimmed "creator__isnull" join. If we # don't have that information, then the join is created as INNER JOIN # and results will be incorrect. q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1')) q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1')) self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False) self.assertEqual(str(q1.query), str(q2.query)) q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True)) q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True)) self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by() q2 = ( Item.objects .filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1')) .order_by() ) self.assertQuerysetEqual(q1, ["<Item: i1>"]) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by() q2 = ( Item.objects.filter(Q(creator__report__name='e1')).order_by() | Item.objects.filter(Q(creator=self.a1)).order_by() ) self.assertQuerysetEqual(q1, ["<Item: i1>"]) self.assertEqual(str(q1.query), str(q2.query)) def test_combine_join_reuse(self): # Joins having identical connections are correctly recreated in the # rhs query, in case the query is ORed together (#18748). Report.objects.create(name='r4', creator=self.a1) q1 = Author.objects.filter(report__name='r5') q2 = Author.objects.filter(report__name='r4').filter(report__name='r1') combined = q1 | q2 self.assertEqual(str(combined.query).count('JOIN'), 2) self.assertEqual(len(combined), 1) self.assertEqual(combined[0].name, 'a1') def test_join_reuse_order(self): # Join aliases are reused in order. This shouldn't raise AssertionError # because change_map contains a circular reference (#26522). s1 = School.objects.create() s2 = School.objects.create() s3 = School.objects.create() t1 = Teacher.objects.create() otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1) qs1 = otherteachers.filter(schools=s1).filter(schools=s2) qs2 = otherteachers.filter(schools=s1).filter(schools=s3) self.assertQuerysetEqual(qs1 | qs2, []) def test_ticket7095(self): # Updates that are filtered on the model being updated are somewhat # tricky in MySQL. ManagedModel.objects.create(data='mm1', tag=self.t1, public=True) self.assertEqual(ManagedModel.objects.update(data='mm'), 1) # A values() or values_list() query across joined models must use outer # joins appropriately. # Note: In Oracle, we expect a null CharField to return '' instead of # None. if connection.features.interprets_empty_strings_as_nulls: expected_null_charfield_repr = '' else: expected_null_charfield_repr = None self.assertSequenceEqual( Report.objects.values_list("creator__extra__info", flat=True).order_by("name"), ['e1', 'e2', expected_null_charfield_repr], ) # Similarly for select_related(), joins beyond an initial nullable join # must use outer joins so that all results are included. self.assertQuerysetEqual( Report.objects.select_related("creator", "creator__extra").order_by("name"), ['<Report: r1>', '<Report: r2>', '<Report: r3>'] ) # When there are multiple paths to a table from another table, we have # to be careful not to accidentally reuse an inappropriate join when # using select_related(). We used to return the parent's Detail record # here by mistake. d1 = Detail.objects.create(data="d1") d2 = Detail.objects.create(data="d2") m1 = Member.objects.create(name="m1", details=d1) m2 = Member.objects.create(name="m2", details=d2) Child.objects.create(person=m2, parent=m1) obj = m1.children.select_related("person__details")[0] self.assertEqual(obj.person.details.data, 'd2') def test_order_by_resetting(self): # Calling order_by() with no parameters removes any existing ordering on the # model. But it should still be possible to add new ordering after that. qs = Author.objects.order_by().order_by('name') self.assertIn('ORDER BY', qs.query.get_compiler(qs.db).as_sql()[0]) def test_order_by_reverse_fk(self): # It is possible to order by reverse of foreign key, although that can lead # to duplicate results. c1 = SimpleCategory.objects.create(name="category1") c2 = SimpleCategory.objects.create(name="category2") CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c1) self.assertSequenceEqual(SimpleCategory.objects.order_by('categoryitem', 'pk'), [c1, c2, c1]) def test_filter_reverse_non_integer_pk(self): date_obj = DateTimePK.objects.create() extra_obj = ExtraInfo.objects.create(info='extra', date=date_obj) self.assertEqual( DateTimePK.objects.filter(extrainfo=extra_obj).get(), date_obj, ) def test_ticket10181(self): # Avoid raising an EmptyResultSet if an inner query is probably # empty (and hence, not executed). self.assertQuerysetEqual( Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])), [] ) def test_ticket15316_filter_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_exclude_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_filter_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_exclude_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_one2one_filter_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False).order_by('pk') self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_one2one_exclude_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) CategoryItem.objects.create(category=c0) CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_one2one_filter_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) CategoryItem.objects.create(category=c0) CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_one2one_exclude_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True).order_by('pk') self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) class Queries5Tests(TestCase): @classmethod def setUpTestData(cls): # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the # Meta.ordering will be rank3, rank2, rank1. n1 = Note.objects.create(note='n1', misc='foo', id=1) n2 = Note.objects.create(note='n2', misc='bar', id=2) e1 = ExtraInfo.objects.create(info='e1', note=n1) e2 = ExtraInfo.objects.create(info='e2', note=n2) a1 = Author.objects.create(name='a1', num=1001, extra=e1) a2 = Author.objects.create(name='a2', num=2002, extra=e1) a3 = Author.objects.create(name='a3', num=3003, extra=e2) cls.rank1 = Ranking.objects.create(rank=2, author=a2) Ranking.objects.create(rank=1, author=a3) Ranking.objects.create(rank=3, author=a1) def test_ordering(self): # Cross model ordering is possible in Meta, too. self.assertQuerysetEqual( Ranking.objects.all(), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) self.assertQuerysetEqual( Ranking.objects.all().order_by('rank'), ['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>'] ) # Ordering of extra() pieces is possible, too and you can mix extra # fields and model fields in the ordering. self.assertQuerysetEqual( Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']), ['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>'] ) sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank') qs = Ranking.objects.extra(select={'good': sql}) self.assertEqual( [o.good for o in qs.extra(order_by=('-good',))], [True, False, False] ) self.assertQuerysetEqual( qs.extra(order_by=('-good', 'id')), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) # Despite having some extra aliases in the query, we can still omit # them in a values() query. dicts = qs.values('id', 'rank').order_by('id') self.assertEqual( [d['rank'] for d in dicts], [2, 1, 3] ) def test_ticket7256(self): # An empty values() call includes all aliases, including those from an # extra() sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank') qs = Ranking.objects.extra(select={'good': sql}) dicts = qs.values().order_by('id') for d in dicts: del d['id'] del d['author_id'] self.assertEqual( [sorted(d.items()) for d in dicts], [[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]] ) def test_ticket7045(self): # Extra tables used to crash SQL construction on the second use. qs = Ranking.objects.extra(tables=['django_site']) qs.query.get_compiler(qs.db).as_sql() # test passes if this doesn't raise an exception. qs.query.get_compiler(qs.db).as_sql() def test_ticket9848(self): # Make sure that updates which only filter on sub-tables don't # inadvertently update the wrong records (bug #9848). author_start = Author.objects.get(name='a1') ranking_start = Ranking.objects.get(author__name='a1') # Make sure that the IDs from different tables don't happen to match. self.assertQuerysetEqual( Ranking.objects.filter(author__name='a1'), ['<Ranking: 3: a1>'] ) self.assertEqual( Ranking.objects.filter(author__name='a1').update(rank=4636), 1 ) r = Ranking.objects.get(author__name='a1') self.assertEqual(r.id, ranking_start.id) self.assertEqual(r.author.id, author_start.id) self.assertEqual(r.rank, 4636) r.rank = 3 r.save() self.assertQuerysetEqual( Ranking.objects.all(), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) def test_ticket5261(self): # Test different empty excludes. self.assertQuerysetEqual( Note.objects.exclude(Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.filter(~Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.filter(~Q() | ~Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.exclude(~Q() & ~Q()), ['<Note: n1>', '<Note: n2>'] ) def test_extra_select_literal_percent_s(self): # Allow %%s to escape select clauses self.assertEqual( Note.objects.extra(select={'foo': "'%%s'"})[0].foo, '%s' ) self.assertEqual( Note.objects.extra(select={'foo': "'%%s bar %%s'"})[0].foo, '%s bar %s' ) self.assertEqual( Note.objects.extra(select={'foo': "'bar %%s'"})[0].foo, 'bar %s' ) class SelectRelatedTests(TestCase): def test_tickets_3045_3288(self): # Once upon a time, select_related() with circular relations would loop # infinitely if you forgot to specify "depth". Now we set an arbitrary # default upper bound. self.assertQuerysetEqual(X.objects.all(), []) self.assertQuerysetEqual(X.objects.select_related(), []) class SubclassFKTests(TestCase): def test_ticket7778(self): # Model subclasses could not be deleted if a nullable foreign key # relates to a model that relates back. num_celebs = Celebrity.objects.count() tvc = TvChef.objects.create(name="Huey") self.assertEqual(Celebrity.objects.count(), num_celebs + 1) Fan.objects.create(fan_of=tvc) Fan.objects.create(fan_of=tvc) tvc.delete() # The parent object should have been deleted as well. self.assertEqual(Celebrity.objects.count(), num_celebs) class CustomPkTests(TestCase): def test_ticket7371(self): self.assertQuerysetEqual(Related.objects.order_by('custom'), []) class NullableRelOrderingTests(TestCase): def test_ticket10028(self): # Ordering by model related to nullable relations(!) should use outer # joins, so that all results are included. Plaything.objects.create(name="p1") self.assertQuerysetEqual( Plaything.objects.all(), ['<Plaything: p1>'] ) def test_join_already_in_query(self): # Ordering by model related to nullable relations should not change # the join type of already existing joins. Plaything.objects.create(name="p1") s = SingleObject.objects.create(name='s') r = RelatedObject.objects.create(single=s, f=1) Plaything.objects.create(name="p2", others=r) qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk') self.assertNotIn('JOIN', str(qs.query)) qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk') self.assertIn('INNER', str(qs.query)) qs = qs.order_by('others__single__name') # The ordering by others__single__pk will add one new join (to single) # and that join must be LEFT join. The already existing join to related # objects must be kept INNER. So, we have both an INNER and a LEFT join # in the query. self.assertEqual(str(qs.query).count('LEFT'), 1) self.assertEqual(str(qs.query).count('INNER'), 1) self.assertQuerysetEqual( qs, ['<Plaything: p2>'] ) class DisjunctiveFilterTests(TestCase): @classmethod def setUpTestData(cls): cls.n1 = Note.objects.create(note='n1', misc='foo', id=1) ExtraInfo.objects.create(info='e1', note=cls.n1) def test_ticket7872(self): # Another variation on the disjunctive filtering theme. # For the purposes of this regression test, it's important that there is no # Join object related to the LeafA we create. LeafA.objects.create(data='first') self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>']) self.assertQuerysetEqual( LeafA.objects.filter(Q(data='first') | Q(join__b__data='second')), ['<LeafA: first>'] ) def test_ticket8283(self): # Checking that applying filters after a disjunction works correctly. self.assertQuerysetEqual( (ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info='e2')).filter(note=self.n1), ['<ExtraInfo: e1>'] ) self.assertQuerysetEqual( (ExtraInfo.objects.filter(info='e2') | ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1), ['<ExtraInfo: e1>'] ) class Queries6Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic) cls.t3 = Tag.objects.create(name='t3', parent=cls.t1) cls.t4 = Tag.objects.create(name='t4', parent=cls.t3) cls.t5 = Tag.objects.create(name='t5', parent=cls.t3) n1 = Note.objects.create(note='n1', misc='foo', id=1) ann1 = Annotation.objects.create(name='a1', tag=cls.t1) ann1.notes.add(n1) Annotation.objects.create(name='a2', tag=cls.t4) def test_parallel_iterators(self): # Parallel iterators work. qs = Tag.objects.all() i1, i2 = iter(qs), iter(qs) self.assertEqual(repr(next(i1)), '<Tag: t1>') self.assertEqual(repr(next(i1)), '<Tag: t2>') self.assertEqual(repr(next(i2)), '<Tag: t1>') self.assertEqual(repr(next(i2)), '<Tag: t2>') self.assertEqual(repr(next(i2)), '<Tag: t3>') self.assertEqual(repr(next(i1)), '<Tag: t3>') qs = X.objects.all() self.assertFalse(qs) self.assertFalse(qs) def test_nested_queries_sql(self): # Nested queries should not evaluate the inner query as part of constructing the # SQL (so we should see a nested query here, indicated by two "SELECT" calls). qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy")) self.assertEqual( qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'), 2 ) def test_tickets_8921_9188(self): # Incorrect SQL was being generated for certain types of exclude() # queries that crossed multi-valued relations (#8921, #9188 and some # preemptively discovered cases). self.assertQuerysetEqual( PointerA.objects.filter(connection__pointerb__id=1), [] ) self.assertQuerysetEqual( PointerA.objects.exclude(connection__pointerb__id=1), [] ) self.assertQuerysetEqual( Tag.objects.exclude(children=None), ['<Tag: t1>', '<Tag: t3>'] ) # This example is tricky because the parent could be NULL, so only checking # parents with annotations omits some results (tag t1, in this case). self.assertQuerysetEqual( Tag.objects.exclude(parent__annotation__name="a1"), ['<Tag: t1>', '<Tag: t4>', '<Tag: t5>'] ) # The annotation->tag link is single values and tag->children links is # multi-valued. So we have to split the exclude filter in the middle # and then optimize the inner query without losing results. self.assertQuerysetEqual( Annotation.objects.exclude(tag__children__name="t2"), ['<Annotation: a2>'] ) # Nested queries are possible (although should be used with care, since # they have performance problems on backends like MySQL. self.assertQuerysetEqual( Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")), ['<Annotation: a1>'] ) def test_ticket3739(self): # The all() method on querysets returns a copy of the queryset. q1 = Tag.objects.order_by('name') self.assertIsNot(q1, q1.all()) def test_ticket_11320(self): qs = Tag.objects.exclude(category=None).exclude(category__name='foo') self.assertEqual(str(qs.query).count(' INNER JOIN '), 1) def test_distinct_ordered_sliced_subquery_aggregation(self): self.assertEqual(Tag.objects.distinct().order_by('category__name')[:3].count(), 3) def test_multiple_columns_with_the_same_name_slice(self): self.assertEqual( list(Tag.objects.order_by('name').values_list('name', 'category__name')[:2]), [('t1', 'Generic'), ('t2', 'Generic')], ) self.assertSequenceEqual( Tag.objects.order_by('name').select_related('category')[:2], [self.t1, self.t2], ) self.assertEqual( list(Tag.objects.order_by('-name').values_list('name', 'parent__name')[:2]), [('t5', 't3'), ('t4', 't3')], ) self.assertSequenceEqual( Tag.objects.order_by('-name').select_related('parent')[:2], [self.t5, self.t4], ) class RawQueriesTests(TestCase): @classmethod def setUpTestData(cls): Note.objects.create(note='n1', misc='foo', id=1) def test_ticket14729(self): # Test representation of raw query with one or few parameters passed as list query = "SELECT * FROM queries_note WHERE note = %s" params = ['n1'] qs = Note.objects.raw(query, params=params) self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>") query = "SELECT * FROM queries_note WHERE note = %s and misc = %s" params = ['n1', 'foo'] qs = Note.objects.raw(query, params=params) self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>") class GeneratorExpressionTests(SimpleTestCase): def test_ticket10432(self): # Using an empty iterator as the rvalue for an "__in" # lookup is legal. self.assertCountEqual(Note.objects.filter(pk__in=iter(())), []) class ComparisonTests(TestCase): @classmethod def setUpTestData(cls): cls.n1 = Note.objects.create(note='n1', misc='foo', id=1) e1 = ExtraInfo.objects.create(info='e1', note=cls.n1) cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1) def test_ticket8597(self): # Regression tests for case-insensitive comparisons Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1) Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1) self.assertQuerysetEqual( Item.objects.filter(name__iexact="A_b"), ['<Item: a_b>'] ) self.assertQuerysetEqual( Item.objects.filter(name__iexact="x%Y"), ['<Item: x%y>'] ) self.assertQuerysetEqual( Item.objects.filter(name__istartswith="A_b"), ['<Item: a_b>'] ) self.assertQuerysetEqual( Item.objects.filter(name__iendswith="A_b"), ['<Item: a_b>'] ) class ExistsSql(TestCase): def test_exists(self): with CaptureQueriesContext(connection) as captured_queries: self.assertFalse(Tag.objects.exists()) # Ok - so the exist query worked - but did it include too many columns? self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]['sql'] id, name = connection.ops.quote_name('id'), connection.ops.quote_name('name') self.assertNotIn(id, qstr) self.assertNotIn(name, qstr) def test_ticket_18414(self): Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) self.assertTrue(Article.objects.exists()) self.assertTrue(Article.objects.distinct().exists()) self.assertTrue(Article.objects.distinct()[1:3].exists()) self.assertFalse(Article.objects.distinct()[1:1].exists()) @skipUnlessDBFeature('can_distinct_on_fields') def test_ticket_18414_distinct_on(self): Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) self.assertTrue(Article.objects.distinct('name').exists()) self.assertTrue(Article.objects.distinct('name')[1:2].exists()) self.assertFalse(Article.objects.distinct('name')[2:3].exists()) class QuerysetOrderedTests(unittest.TestCase): """ Tests for the Queryset.ordered attribute. """ def test_no_default_or_explicit_ordering(self): self.assertIs(Annotation.objects.all().ordered, False) def test_cleared_default_ordering(self): self.assertIs(Tag.objects.all().ordered, True) self.assertIs(Tag.objects.all().order_by().ordered, False) def test_explicit_ordering(self): self.assertIs(Annotation.objects.all().order_by('id').ordered, True) def test_empty_queryset(self): self.assertIs(Annotation.objects.none().ordered, True) def test_order_by_extra(self): self.assertIs(Annotation.objects.all().extra(order_by=['id']).ordered, True) def test_annotated_ordering(self): qs = Annotation.objects.annotate(num_notes=Count('notes')) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by('num_notes').ordered, True) def test_annotated_default_ordering(self): qs = Tag.objects.annotate(num_notes=Count('pk')) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by('name').ordered, True) def test_annotated_values_default_ordering(self): qs = Tag.objects.values('name').annotate(num_notes=Count('pk')) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by('name').ordered, True) @skipUnlessDBFeature('allow_sliced_subqueries_with_in') class SubqueryTests(TestCase): @classmethod def setUpTestData(cls): NamedCategory.objects.create(id=1, name='first') NamedCategory.objects.create(id=2, name='second') NamedCategory.objects.create(id=3, name='third') NamedCategory.objects.create(id=4, name='fourth') def test_ordered_subselect(self): "Subselects honor any manual ordering" query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:]) self.assertEqual(set(query.values_list('id', flat=True)), {1, 2}) def test_slice_subquery_and_query(self): """ Slice a query that has a sliced subquery """ query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2] self.assertEqual({x.id for x in query}, {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3] self.assertEqual({x.id for x in query}, {3}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:] self.assertEqual({x.id for x in query}, {2}) def test_related_sliced_subquery(self): """ Related objects constraints can safely contain sliced subqueries. refs #22434 """ generic = NamedCategory.objects.create(id=5, name="Generic") t1 = Tag.objects.create(name='t1', category=generic) t2 = Tag.objects.create(name='t2', category=generic) ManagedModel.objects.create(data='mm1', tag=t1, public=True) mm2 = ManagedModel.objects.create(data='mm2', tag=t2, public=True) query = ManagedModel.normal_manager.filter( tag__in=Tag.objects.order_by('-id')[:1] ) self.assertEqual({x.id for x in query}, {mm2.id}) def test_sliced_delete(self): "Delete queries can safely contain sliced subqueries" DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 2, 3}) DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 3}) DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {3}) def test_distinct_ordered_sliced_subquery(self): # Implicit values('id'). self.assertSequenceEqual( NamedCategory.objects.filter( id__in=NamedCategory.objects.distinct().order_by('name')[0:2], ).order_by('name').values_list('name', flat=True), ['first', 'fourth'] ) # Explicit values('id'). self.assertSequenceEqual( NamedCategory.objects.filter( id__in=NamedCategory.objects.distinct().order_by('-name').values('id')[0:2], ).order_by('name').values_list('name', flat=True), ['second', 'third'] ) # Annotated value. self.assertSequenceEqual( DumbCategory.objects.filter( id__in=DumbCategory.objects.annotate( double_id=F('id') * 2 ).order_by('id').distinct().values('double_id')[0:2], ).order_by('id').values_list('id', flat=True), [2, 4] ) @skipUnlessDBFeature('allow_sliced_subqueries_with_in') class QuerySetBitwiseOperationTests(TestCase): @classmethod def setUpTestData(cls): school = School.objects.create() cls.room_1 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 1') cls.room_2 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 2') cls.room_3 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 3') cls.room_4 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 4') def test_or_with_rhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True) qs2 = Classroom.objects.filter(has_blackboard=False)[:1] self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3]) def test_or_with_lhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True)[:1] qs2 = Classroom.objects.filter(has_blackboard=False) self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4]) def test_or_with_both_slice(self): qs1 = Classroom.objects.filter(has_blackboard=False)[:1] qs2 = Classroom.objects.filter(has_blackboard=True)[:1] self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2]) def test_or_with_both_slice_and_ordering(self): qs1 = Classroom.objects.filter(has_blackboard=False).order_by('-pk')[:1] qs2 = Classroom.objects.filter(has_blackboard=True).order_by('-name')[:1] self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4]) class CloneTests(TestCase): def test_evaluated_queryset_as_argument(self): "#13227 -- If a queryset is already evaluated, it can still be used as a query arg" n = Note(note='Test1', misc='misc') n.save() e = ExtraInfo(info='good', note=n) e.save() n_list = Note.objects.all() # Evaluate the Note queryset, populating the query cache list(n_list) # Make one of cached results unpickable. n_list._result_cache[0].lock = Lock() with self.assertRaises(TypeError): pickle.dumps(n_list) # Use the note queryset in a query, and evaluate # that query in a way that involves cloning. self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good') def test_no_model_options_cloning(self): """ Cloning a queryset does not get out of hand. While complete testing is impossible, this is a sanity check against invalid use of deepcopy. refs #16759. """ opts_class = type(Note._meta) note_deepcopy = getattr(opts_class, "__deepcopy__", None) opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.") try: Note.objects.filter(pk__lte=F('pk') + 1).all() finally: if note_deepcopy is None: delattr(opts_class, "__deepcopy__") else: opts_class.__deepcopy__ = note_deepcopy def test_no_fields_cloning(self): """ Cloning a queryset does not get out of hand. While complete testing is impossible, this is a sanity check against invalid use of deepcopy. refs #16759. """ opts_class = type(Note._meta.get_field("misc")) note_deepcopy = getattr(opts_class, "__deepcopy__", None) opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned") try: Note.objects.filter(note=F('misc')).all() finally: if note_deepcopy is None: delattr(opts_class, "__deepcopy__") else: opts_class.__deepcopy__ = note_deepcopy class EmptyQuerySetTests(SimpleTestCase): def test_emptyqueryset_values(self): # #14366 -- Calling .values() on an empty QuerySet and then cloning # that should not cause an error self.assertCountEqual(Number.objects.none().values('num').order_by('num'), []) def test_values_subquery(self): self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values('pk')), []) self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values_list('pk')), []) def test_ticket_19151(self): # #19151 -- Calling .values() or .values_list() on an empty QuerySet # should return an empty QuerySet and not cause an error. q = Author.objects.none() self.assertCountEqual(q.values(), []) self.assertCountEqual(q.values_list(), []) class ValuesQuerysetTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=72) def test_flat_values_list(self): qs = Number.objects.values_list("num") qs = qs.values_list("num", flat=True) self.assertSequenceEqual(qs, [72]) def test_extra_values(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(1, 2)) qs = qs.order_by('value_minus_x') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_twice(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'}) qs = qs.order_by('value_minus_one').order_by('value_plus_one') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_multiple(self): # Postgres doesn't allow constants in order by, so check for that. qs = Number.objects.extra(select={ 'value_plus_one': 'num+1', 'value_minus_one': 'num-1', 'constant_value': '1' }) qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_in_extra(self): # testing for ticket 14930 issues qs = Number.objects.extra( select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'}, order_by=['value_minus_one'], ) qs = qs.values('num') def test_extra_select_params_values_order_in_extra(self): # testing for 23259 issue qs = Number.objects.extra( select={'value_plus_x': 'num+%s'}, select_params=[1], order_by=['value_plus_x'], ) qs = qs.filter(num=72) qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_multiple_select_params_values_order_by(self): # testing for 23259 issue qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(72, 72)) qs = qs.order_by('value_minus_x') qs = qs.filter(num=1) qs = qs.values('num') self.assertSequenceEqual(qs, []) def test_extra_values_list(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1'}) qs = qs.order_by('value_plus_one') qs = qs.values_list('num') self.assertSequenceEqual(qs, [(72,)]) def test_flat_extra_values_list(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1'}) qs = qs.order_by('value_plus_one') qs = qs.values_list('num', flat=True) self.assertSequenceEqual(qs, [72]) def test_field_error_values_list(self): # see #23443 msg = "Cannot resolve keyword %r into field. Join on 'name' not permitted." % 'foo' with self.assertRaisesMessage(FieldError, msg): Tag.objects.values_list('name__foo') def test_named_values_list_flat(self): msg = "'flat' and 'named' can't be used together." with self.assertRaisesMessage(TypeError, msg): Number.objects.values_list('num', flat=True, named=True) def test_named_values_list_bad_field_name(self): msg = "Type names and field names must be valid identifiers: '1'" with self.assertRaisesMessage(ValueError, msg): Number.objects.extra(select={'1': 'num+1'}).values_list('1', named=True).first() def test_named_values_list_with_fields(self): qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id')) values = qs.values_list('num', 'num2', named=True).first() self.assertEqual(type(values).__name__, 'Row') self.assertEqual(values._fields, ('num', 'num2')) self.assertEqual(values.num, 72) self.assertEqual(values.num2, 73) def test_named_values_list_without_fields(self): qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id')) values = qs.values_list(named=True).first() self.assertEqual(type(values).__name__, 'Row') self.assertEqual( values._fields, ('num2', 'id', 'num', 'other_num', 'another_num', 'id__count'), ) self.assertEqual(values.num, 72) self.assertEqual(values.num2, 73) self.assertEqual(values.id__count, 1) def test_named_values_list_expression_with_default_alias(self): expr = Count('id') values = Number.objects.annotate(id__count1=expr).values_list(expr, 'id__count1', named=True).first() self.assertEqual(values._fields, ('id__count2', 'id__count1')) def test_named_values_list_expression(self): expr = F('num') + 1 qs = Number.objects.annotate(combinedexpression1=expr).values_list(expr, 'combinedexpression1', named=True) values = qs.first() self.assertEqual(values._fields, ('combinedexpression2', 'combinedexpression1')) def test_named_values_pickle(self): value = Number.objects.values_list('num', 'other_num', named=True).get() self.assertEqual(value, (72, None)) self.assertEqual(pickle.loads(pickle.dumps(value)), value) class QuerySetSupportsPythonIdioms(TestCase): @classmethod def setUpTestData(cls): some_date = datetime.datetime(2014, 5, 16, 12, 1) for i in range(1, 8): Article.objects.create( name="Article {}".format(i), created=some_date) def get_ordered_articles(self): return Article.objects.all().order_by('name') def test_can_get_items_using_index_and_slice_notation(self): self.assertEqual(self.get_ordered_articles()[0].name, 'Article 1') self.assertQuerysetEqual( self.get_ordered_articles()[1:3], ["<Article: Article 2>", "<Article: Article 3>"] ) def test_slicing_with_steps_can_be_used(self): self.assertQuerysetEqual( self.get_ordered_articles()[::2], [ "<Article: Article 1>", "<Article: Article 3>", "<Article: Article 5>", "<Article: Article 7>" ] ) def test_slicing_without_step_is_lazy(self): with self.assertNumQueries(0): self.get_ordered_articles()[0:5] def test_slicing_with_tests_is_not_lazy(self): with self.assertNumQueries(1): self.get_ordered_articles()[0:5:3] def test_slicing_can_slice_again_after_slicing(self): self.assertQuerysetEqual( self.get_ordered_articles()[0:5][0:2], ["<Article: Article 1>", "<Article: Article 2>"] ) self.assertQuerysetEqual(self.get_ordered_articles()[0:5][4:], ["<Article: Article 5>"]) self.assertQuerysetEqual(self.get_ordered_articles()[0:5][5:], []) # Some more tests! self.assertQuerysetEqual( self.get_ordered_articles()[2:][0:2], ["<Article: Article 3>", "<Article: Article 4>"] ) self.assertQuerysetEqual( self.get_ordered_articles()[2:][:2], ["<Article: Article 3>", "<Article: Article 4>"] ) self.assertQuerysetEqual(self.get_ordered_articles()[2:][2:3], ["<Article: Article 5>"]) # Using an offset without a limit is also possible. self.assertQuerysetEqual( self.get_ordered_articles()[5:], ["<Article: Article 6>", "<Article: Article 7>"] ) def test_slicing_cannot_filter_queryset_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot filter a query once a slice has been taken."): Article.objects.all()[0:5].filter(id=1) def test_slicing_cannot_reorder_queryset_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot reorder a query once a slice has been taken."): Article.objects.all()[0:5].order_by('id') def test_slicing_cannot_combine_queries_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot combine queries once a slice has been taken."): Article.objects.all()[0:1] & Article.objects.all()[4:5] def test_slicing_negative_indexing_not_supported_for_single_element(self): """hint: inverting your ordering might do what you need""" with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."): Article.objects.all()[-1] def test_slicing_negative_indexing_not_supported_for_range(self): """hint: inverting your ordering might do what you need""" with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."): Article.objects.all()[0:-5] def test_invalid_index(self): msg = 'QuerySet indices must be integers or slices, not str.' with self.assertRaisesMessage(TypeError, msg): Article.objects.all()['foo'] def test_can_get_number_of_items_in_queryset_using_standard_len(self): self.assertEqual(len(Article.objects.filter(name__exact='Article 1')), 1) def test_can_combine_queries_using_and_and_or_operators(self): s1 = Article.objects.filter(name__exact='Article 1') s2 = Article.objects.filter(name__exact='Article 2') self.assertQuerysetEqual( (s1 | s2).order_by('name'), ["<Article: Article 1>", "<Article: Article 2>"] ) self.assertQuerysetEqual(s1 & s2, []) class WeirdQuerysetSlicingTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=1) Number.objects.create(num=2) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) Article.objects.create(name='three', created=datetime.datetime.now()) Article.objects.create(name='four', created=datetime.datetime.now()) food = Food.objects.create(name='spam') Eaten.objects.create(meal='spam with eggs', food=food) def test_tickets_7698_10202(self): # People like to slice with '0' as the high-water mark. self.assertQuerysetEqual(Article.objects.all()[0:0], []) self.assertQuerysetEqual(Article.objects.all()[0:0][:10], []) self.assertEqual(Article.objects.all()[:0].count(), 0) with self.assertRaisesMessage(TypeError, 'Cannot reverse a query once a slice has been taken.'): Article.objects.all()[:0].latest('created') def test_empty_resultset_sql(self): # ticket #12192 self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1])) def test_empty_sliced_subquery(self): self.assertEqual(Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0) def test_empty_sliced_subquery_exclude(self): self.assertEqual(Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1) def test_zero_length_values_slicing(self): n = 42 with self.assertNumQueries(0): self.assertQuerysetEqual(Article.objects.values()[n:n], []) self.assertQuerysetEqual(Article.objects.values_list()[n:n], []) class EscapingTests(TestCase): def test_ticket_7302(self): # Reserved names are appropriately escaped ReservedName.objects.create(name='a', order=42) ReservedName.objects.create(name='b', order=37) self.assertQuerysetEqual( ReservedName.objects.all().order_by('order'), ['<ReservedName: b>', '<ReservedName: a>'] ) self.assertQuerysetEqual( ReservedName.objects.extra(select={'stuff': 'name'}, order_by=('order', 'stuff')), ['<ReservedName: b>', '<ReservedName: a>'] ) class ToFieldTests(TestCase): def test_in_query(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Eaten.objects.filter(food__in=[apple, pear])), {lunch, dinner}, ) def test_in_subquery(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") self.assertEqual( set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple'))), {lunch} ) self.assertEqual( set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple').values('eaten__meal'))), set() ) self.assertEqual( set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal='lunch'))), {apple} ) def test_nested_in_subquery(self): extra = ExtraInfo.objects.create() author = Author.objects.create(num=42, extra=extra) report = Report.objects.create(creator=author) comment = ReportComment.objects.create(report=report) comments = ReportComment.objects.filter( report__in=Report.objects.filter( creator__in=extra.author_set.all(), ), ) self.assertSequenceEqual(comments, [comment]) def test_reverse_in(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch_apple = Eaten.objects.create(food=apple, meal="lunch") lunch_pear = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])), {apple, pear} ) def test_single_object(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=apple, meal="dinner") self.assertEqual( set(Eaten.objects.filter(food=apple)), {lunch, dinner} ) def test_single_object_reverse(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") self.assertEqual( set(Food.objects.filter(eaten=lunch)), {apple} ) def test_recursive_fk(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual( list(Node.objects.filter(parent=node1)), [node2] ) def test_recursive_fk_reverse(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual( list(Node.objects.filter(node=node2)), [node1] ) class IsNullTests(TestCase): def test_primary_key(self): custom = CustomPk.objects.create(name='pk') null = Related.objects.create() notnull = Related.objects.create(custom=custom) self.assertSequenceEqual(Related.objects.filter(custom__isnull=False), [notnull]) self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null]) def test_to_field(self): apple = Food.objects.create(name="apple") Eaten.objects.create(food=apple, meal="lunch") Eaten.objects.create(meal="lunch") self.assertQuerysetEqual( Eaten.objects.filter(food__isnull=False), ['<Eaten: apple at lunch>'] ) self.assertQuerysetEqual( Eaten.objects.filter(food__isnull=True), ['<Eaten: None at lunch>'] ) class ConditionalTests(TestCase): """Tests whose execution depend on different environment conditions like Python version or DB backend features""" @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") t1 = Tag.objects.create(name='t1', category=generic) Tag.objects.create(name='t2', parent=t1, category=generic) t3 = Tag.objects.create(name='t3', parent=t1) Tag.objects.create(name='t4', parent=t3) Tag.objects.create(name='t5', parent=t3) def test_infinite_loop(self): # If you're not careful, it's possible to introduce infinite loops via # default ordering on foreign keys in a cycle. We detect that. with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'): list(LoopX.objects.all()) # Force queryset evaluation with list() with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'): list(LoopZ.objects.all()) # Force queryset evaluation with list() # Note that this doesn't cause an infinite loop, since the default # ordering on the Tag model is empty (and thus defaults to using "id" # for the related field). self.assertEqual(len(Tag.objects.order_by('parent')), 5) # ... but you can still order in a non-recursive fashion among linked # fields (the previous test failed because the default ordering was # recursive). self.assertQuerysetEqual( LoopX.objects.all().order_by('y__x__y__x__id'), [] ) # When grouping without specifying ordering, we add an explicit "ORDER BY NULL" # portion in MySQL to prevent unnecessary sorting. @skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping') def test_null_ordering_added(self): query = Tag.objects.values_list('parent_id', flat=True).order_by().query query.group_by = ['parent_id'] sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0] fragment = "ORDER BY " pos = sql.find(fragment) self.assertEqual(sql.find(fragment, pos + 1), -1) self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment)) def test_in_list_limit(self): # The "in" lookup works with lists of 1000 items or more. # The numbers amount is picked to force three different IN batches # for Oracle, yet to be less than 2100 parameter limit for MSSQL. numbers = list(range(2050)) max_query_params = connection.features.max_query_params if max_query_params is None or max_query_params >= len(numbers): Number.objects.bulk_create(Number(num=num) for num in numbers) for number in [1000, 1001, 2000, len(numbers)]: with self.subTest(number=number): self.assertEqual(Number.objects.filter(num__in=numbers[:number]).count(), number) class UnionTests(unittest.TestCase): """ Tests for the union of two querysets. Bug #12252. """ @classmethod def setUpTestData(cls): objectas = [] objectbs = [] objectcs = [] a_info = ['one', 'two', 'three'] for name in a_info: o = ObjectA(name=name) o.save() objectas.append(o) b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])] for name, number, objecta in b_info: o = ObjectB(name=name, num=number, objecta=objecta) o.save() objectbs.append(o) c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])] for name, objecta, objectb in c_info: o = ObjectC(name=name, objecta=objecta, objectb=objectb) o.save() objectcs.append(o) def check_union(self, model, Q1, Q2): filter = model.objects.filter self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2))) self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2))) def test_A_AB(self): Q1 = Q(name='two') Q2 = Q(objectb__name='deux') self.check_union(ObjectA, Q1, Q2) def test_A_AB2(self): Q1 = Q(name='two') Q2 = Q(objectb__name='deux', objectb__num=2) self.check_union(ObjectA, Q1, Q2) def test_AB_ACB(self): Q1 = Q(objectb__name='deux') Q2 = Q(objectc__objectb__name='deux') self.check_union(ObjectA, Q1, Q2) def test_BAB_BAC(self): Q1 = Q(objecta__objectb__name='deux') Q2 = Q(objecta__objectc__name='ein') self.check_union(ObjectB, Q1, Q2) def test_BAB_BACB(self): Q1 = Q(objecta__objectb__name='deux') Q2 = Q(objecta__objectc__objectb__name='trois') self.check_union(ObjectB, Q1, Q2) def test_BA_BCA__BAB_BAC_BCA(self): Q1 = Q(objecta__name='one', objectc__objecta__name='two') Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois') self.check_union(ObjectB, Q1, Q2) class DefaultValuesInsertTest(TestCase): def test_no_extra_params(self): """ Can create an instance of a model with only the PK field (#17056)." """ DumbCategory.objects.create() class ExcludeTests(TestCase): @classmethod def setUpTestData(cls): f1 = Food.objects.create(name='apples') Food.objects.create(name='oranges') Eaten.objects.create(food=f1, meal='dinner') j1 = Job.objects.create(name='Manager') cls.r1 = Responsibility.objects.create(description='Playing golf') j2 = Job.objects.create(name='Programmer') r2 = Responsibility.objects.create(description='Programming') JobResponsibilities.objects.create(job=j1, responsibility=cls.r1) JobResponsibilities.objects.create(job=j2, responsibility=r2) def test_to_field(self): self.assertQuerysetEqual( Food.objects.exclude(eaten__meal='dinner'), ['<Food: oranges>']) self.assertQuerysetEqual( Job.objects.exclude(responsibilities__description='Playing golf'), ['<Job: Programmer>']) self.assertQuerysetEqual( Responsibility.objects.exclude(jobs__name='Manager'), ['<Responsibility: Programming>']) def test_ticket14511(self): alex = Person.objects.get_or_create(name='Alex')[0] jane = Person.objects.get_or_create(name='Jane')[0] oracle = Company.objects.get_or_create(name='Oracle')[0] google = Company.objects.get_or_create(name='Google')[0] microsoft = Company.objects.get_or_create(name='Microsoft')[0] intel = Company.objects.get_or_create(name='Intel')[0] def employ(employer, employee, title): Employment.objects.get_or_create(employee=employee, employer=employer, title=title) employ(oracle, alex, 'Engineer') employ(oracle, alex, 'Developer') employ(google, alex, 'Engineer') employ(google, alex, 'Manager') employ(microsoft, alex, 'Manager') employ(intel, alex, 'Manager') employ(microsoft, jane, 'Developer') employ(intel, jane, 'Manager') alex_tech_employers = alex.employers.filter( employment__title__in=('Engineer', 'Developer')).distinct().order_by('name') self.assertSequenceEqual(alex_tech_employers, [google, oracle]) alex_nontech_employers = alex.employers.exclude( employment__title__in=('Engineer', 'Developer')).distinct().order_by('name') self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft]) def test_exclude_reverse_fk_field_ref(self): tag = Tag.objects.create() Note.objects.create(tag=tag, note='note') annotation = Annotation.objects.create(name='annotation', tag=tag) self.assertEqual(Annotation.objects.exclude(tag__note__note=F('name')).get(), annotation) def test_exclude_with_circular_fk_relation(self): self.assertEqual(ObjectB.objects.exclude(objecta__objectb__name=F('name')).count(), 0) def test_subquery_exclude_outerref(self): qs = JobResponsibilities.objects.filter( Exists(Responsibility.objects.exclude(jobs=OuterRef('job'))), ) self.assertTrue(qs.exists()) self.r1.delete() self.assertFalse(qs.exists()) def test_exclude_nullable_fields(self): number = Number.objects.create(num=1, other_num=1) Number.objects.create(num=2, other_num=2, another_num=2) self.assertSequenceEqual( Number.objects.exclude(other_num=F('another_num')), [number], ) self.assertSequenceEqual( Number.objects.exclude(num=F('another_num')), [number], ) class ExcludeTest17600(TestCase): """ Some regressiontests for ticket #17600. Some of these likely duplicate other existing tests. """ @classmethod def setUpTestData(cls): # Create a few Orders. cls.o1 = Order.objects.create(pk=1) cls.o2 = Order.objects.create(pk=2) cls.o3 = Order.objects.create(pk=3) # Create some OrderItems for the first order with homogeneous # status_id values cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1) cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1) cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1) # Create some OrderItems for the second order with heterogeneous # status_id values cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1) cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2) cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3) # Create some OrderItems for the second order with heterogeneous # status_id values cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2) cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3) cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4) def test_exclude_plain(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(items__status=1), ['<Order: 3>']) def test_exclude_plain_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(items__status=1).distinct(), ['<Order: 3>']) def test_exclude_with_q_object_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(Q(items__status=1)).distinct(), ['<Order: 3>']) def test_exclude_with_q_object_no_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(Q(items__status=1)), ['<Order: 3>']) def test_exclude_with_q_is_equal_to_plain_exclude(self): """ Using exclude(condition) and exclude(Q(condition)) should yield the same QuerySet """ self.assertEqual( list(Order.objects.exclude(items__status=1).distinct()), list(Order.objects.exclude(Q(items__status=1)).distinct())) def test_exclude_with_q_is_equal_to_plain_exclude_variation(self): """ Using exclude(condition) and exclude(Q(condition)) should yield the same QuerySet """ self.assertEqual( list(Order.objects.exclude(items__status=1)), list(Order.objects.exclude(Q(items__status=1)).distinct())) @unittest.expectedFailure def test_only_orders_with_all_items_having_status_1(self): """ This should only return orders having ALL items set to status 1, or those items not having any orders at all. The correct way to write this query in SQL seems to be using two nested subqueries. """ self.assertQuerysetEqual( Order.objects.exclude(~Q(items__status=1)).distinct(), ['<Order: 1>']) class Exclude15786(TestCase): """Regression test for #15786""" def test_ticket15786(self): c1 = SimpleCategory.objects.create(name='c1') c2 = SimpleCategory.objects.create(name='c2') OneToOneCategory.objects.create(category=c1) OneToOneCategory.objects.create(category=c2) rel = CategoryRelationship.objects.create(first=c1, second=c2) self.assertEqual( CategoryRelationship.objects.exclude( first__onetoonecategory=F('second__onetoonecategory') ).get(), rel ) class NullInExcludeTest(TestCase): @classmethod def setUpTestData(cls): NullableName.objects.create(name='i1') NullableName.objects.create() def test_null_in_exclude_qs(self): none_val = '' if connection.features.interprets_empty_strings_as_nulls else None self.assertQuerysetEqual( NullableName.objects.exclude(name__in=[]), ['i1', none_val], attrgetter('name')) self.assertQuerysetEqual( NullableName.objects.exclude(name__in=['i1']), [none_val], attrgetter('name')) self.assertQuerysetEqual( NullableName.objects.exclude(name__in=['i3']), ['i1', none_val], attrgetter('name')) inner_qs = NullableName.objects.filter(name='i1').values_list('name') self.assertQuerysetEqual( NullableName.objects.exclude(name__in=inner_qs), [none_val], attrgetter('name')) # The inner queryset wasn't executed - it should be turned # into subquery above self.assertIs(inner_qs._result_cache, None) @unittest.expectedFailure def test_col_not_in_list_containing_null(self): """ The following case is not handled properly because SQL's COL NOT IN (list containing null) handling is too weird to abstract away. """ self.assertQuerysetEqual( NullableName.objects.exclude(name__in=[None]), ['i1'], attrgetter('name')) def test_double_exclude(self): self.assertEqual( list(NullableName.objects.filter(~~Q(name='i1'))), list(NullableName.objects.filter(Q(name='i1')))) self.assertNotIn( 'IS NOT NULL', str(NullableName.objects.filter(~~Q(name='i1')).query)) class EmptyStringsAsNullTest(TestCase): """ Filtering on non-null character fields works as expected. The reason for these tests is that Oracle treats '' as NULL, and this can cause problems in query construction. Refs #17957. """ @classmethod def setUpTestData(cls): cls.nc = NamedCategory.objects.create(name='') def test_direct_exclude(self): self.assertQuerysetEqual( NamedCategory.objects.exclude(name__in=['nonexistent']), [self.nc.pk], attrgetter('pk') ) def test_joined_exclude(self): self.assertQuerysetEqual( DumbCategory.objects.exclude(namedcategory__name__in=['nonexistent']), [self.nc.pk], attrgetter('pk') ) def test_21001(self): foo = NamedCategory.objects.create(name='foo') self.assertQuerysetEqual( NamedCategory.objects.exclude(name=''), [foo.pk], attrgetter('pk') ) class ProxyQueryCleanupTest(TestCase): def test_evaluated_proxy_count(self): """ Generating the query string doesn't alter the query's state in irreversible ways. Refs #18248. """ ProxyCategory.objects.create() qs = ProxyCategory.objects.all() self.assertEqual(qs.count(), 1) str(qs.query) self.assertEqual(qs.count(), 1) class WhereNodeTest(SimpleTestCase): class DummyNode: def as_sql(self, compiler, connection): return 'dummy', [] class MockCompiler: def compile(self, node): return node.as_sql(self, connection) def __call__(self, name): return connection.ops.quote_name(name) def test_empty_full_handling_conjunction(self): compiler = WhereNodeTest.MockCompiler() w = WhereNode(children=[NothingNode()]) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[self.DummyNode(), self.DummyNode()]) self.assertEqual(w.as_sql(compiler, connection), ('(dummy AND dummy)', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy AND dummy)', [])) w = WhereNode(children=[NothingNode(), self.DummyNode()]) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) def test_empty_full_handling_disjunction(self): compiler = WhereNodeTest.MockCompiler() w = WhereNode(children=[NothingNode()], connector='OR') with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('(dummy OR dummy)', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy OR dummy)', [])) w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('dummy', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy)', [])) def test_empty_nodes(self): compiler = WhereNodeTest.MockCompiler() empty_w = WhereNode() w = WhereNode(children=[empty_w, empty_w]) self.assertEqual(w.as_sql(compiler, connection), ('', [])) w.negate() with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.connector = 'OR' with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[empty_w, NothingNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[empty_w, NothingNode()], connector='AND') with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) class QuerySetExceptionTests(SimpleTestCase): def test_iter_exceptions(self): qs = ExtraInfo.objects.only('author') msg = "'ManyToOneRel' object has no attribute 'attname'" with self.assertRaisesMessage(AttributeError, msg): list(qs) def test_invalid_order_by(self): msg = ( "Cannot resolve keyword '*' into field. Choices are: created, id, " "name" ) with self.assertRaisesMessage(FieldError, msg): Article.objects.order_by('*') def test_invalid_queryset_model(self): msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".' with self.assertRaisesMessage(ValueError, msg): list(Author.objects.filter(extra=Article.objects.all())) class NullJoinPromotionOrTest(TestCase): @classmethod def setUpTestData(cls): cls.d1 = ModelD.objects.create(name='foo') d2 = ModelD.objects.create(name='bar') cls.a1 = ModelA.objects.create(name='a1', d=cls.d1) c = ModelC.objects.create(name='c') b = ModelB.objects.create(name='b', c=c) cls.a2 = ModelA.objects.create(name='a2', b=b, d=d2) def test_ticket_17886(self): # The first Q-object is generating the match, the rest of the filters # should not remove the match even if they do not match anything. The # problem here was that b__name generates a LOUTER JOIN, then # b__c__name generates join to c, which the ORM tried to promote but # failed as that join isn't nullable. q_obj = ( Q(d__name='foo') | Q(b__name='foo') | Q(b__c__name='foo') ) qset = ModelA.objects.filter(q_obj) self.assertEqual(list(qset), [self.a1]) # We generate one INNER JOIN to D. The join is direct and not nullable # so we can use INNER JOIN for it. However, we can NOT use INNER JOIN # for the b->c join, as a->b is nullable. self.assertEqual(str(qset.query).count('INNER JOIN'), 1) def test_isnull_filter_promotion(self): qs = ModelA.objects.filter(Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(~Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) qs = ModelA.objects.filter(~~Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) qs = ModelA.objects.filter(~Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(~~Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) def test_null_join_demotion(self): qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True)) self.assertIn(' INNER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False)) self.assertIn(' INNER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True)) self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False)) self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) def test_ticket_21366(self): n = Note.objects.create(note='n', misc='m') e = ExtraInfo.objects.create(info='info', note=n) a = Author.objects.create(name='Author1', num=1, extra=e) Ranking.objects.create(rank=1, author=a) r1 = Report.objects.create(name='Foo', creator=a) r2 = Report.objects.create(name='Bar') Report.objects.create(name='Bar', creator=a) qs = Report.objects.filter( Q(creator__ranking__isnull=True) | Q(creator__ranking__rank=1, name='Foo') ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count(' JOIN '), 2) self.assertSequenceEqual(qs.order_by('name'), [r2, r1]) def test_ticket_21748(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') i3 = Identifier.objects.create(name='i3') Program.objects.create(identifier=i1) Channel.objects.create(identifier=i1) Program.objects.create(identifier=i2) self.assertSequenceEqual(Identifier.objects.filter(program=None, channel=None), [i3]) self.assertSequenceEqual(Identifier.objects.exclude(program=None, channel=None).order_by('name'), [i1, i2]) def test_ticket_21748_double_negated_and(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) Program.objects.create(identifier=i2) # Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for # join promotion. qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk') qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk') self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x) self.assertEqual(str(qs1_filter.query).count('JOIN'), str(qs1_doubleneg.query).count('JOIN')) self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN')) self.assertEqual(str(qs1_filter.query).count('INNER JOIN'), str(qs1_doubleneg.query).count('INNER JOIN')) def test_ticket_21748_double_negated_or(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) p2 = Program.objects.create(identifier=i2) # Test OR + doubleneg. The expected result is that channel is LOUTER # joined, program INNER joined qs1_filter = Identifier.objects.filter( Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id) ).order_by('pk') qs1_doubleneg = Identifier.objects.exclude( ~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)) ).order_by('pk') self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x) self.assertEqual(str(qs1_filter.query).count('JOIN'), str(qs1_doubleneg.query).count('JOIN')) self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN')) self.assertEqual(str(qs1_filter.query).count('INNER JOIN'), str(qs1_doubleneg.query).count('INNER JOIN')) def test_ticket_21748_complex_filter(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) p2 = Program.objects.create(identifier=i2) # Finally, a more complex case, one time in a way where each # NOT is pushed to lowest level in the boolean tree, and # another query where this isn't done. qs1 = Identifier.objects.filter( ~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id)) ).order_by('pk') qs2 = Identifier.objects.filter( Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id)) ).order_by('pk') self.assertQuerysetEqual(qs1, qs2, lambda x: x) self.assertEqual(str(qs1.query).count('JOIN'), str(qs2.query).count('JOIN')) self.assertEqual(0, str(qs1.query).count('INNER JOIN')) self.assertEqual(str(qs1.query).count('INNER JOIN'), str(qs2.query).count('INNER JOIN')) class ReverseJoinTrimmingTest(TestCase): def test_reverse_trimming(self): # We don't accidentally trim reverse joins - we can't know if there is # anything on the other side of the join, so trimming reverse joins # can't be done, ever. t = Tag.objects.create() qs = Tag.objects.filter(annotation__tag=t.pk) self.assertIn('INNER JOIN', str(qs.query)) self.assertEqual(list(qs), []) class JoinReuseTest(TestCase): """ The queries reuse joins sensibly (for example, direct joins are always reused). """ def test_fk_reuse(self): qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_select_related(self): qs = Annotation.objects.filter(tag__name='foo').select_related('tag') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_annotation(self): qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_disjunction(self): qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_order_by(self): qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_revo2o_reuse(self): qs = Detail.objects.filter(member__name='foo').filter(member__name='foo') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_revfk_noreuse(self): qs = Author.objects.filter(report__name='r4').filter(report__name='r1') self.assertEqual(str(qs.query).count('JOIN'), 2) def test_inverted_q_across_relations(self): """ When a trimmable join is specified in the query (here school__), the ORM detects it and removes unnecessary joins. The set of reusable joins are updated after trimming the query so that other lookups don't consider that the outer query's filters are in effect for the subquery (#26551). """ springfield_elementary = School.objects.create() hogward = School.objects.create() Student.objects.create(school=springfield_elementary) hp = Student.objects.create(school=hogward) Classroom.objects.create(school=hogward, name='Potion') Classroom.objects.create(school=springfield_elementary, name='Main') qs = Student.objects.filter( ~(Q(school__classroom__name='Main') & Q(school__classroom__has_blackboard=None)) ) self.assertSequenceEqual(qs, [hp]) class DisjunctionPromotionTests(TestCase): def test_disjunction_promotion_select_related(self): fk1 = FK1.objects.create(f1='f1', f2='f2') basea = BaseA.objects.create(a=fk1) qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2)) self.assertEqual(str(qs.query).count(' JOIN '), 0) qs = qs.select_related('a', 'b') self.assertEqual(str(qs.query).count(' INNER JOIN '), 0) self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2) with self.assertNumQueries(1): self.assertSequenceEqual(qs, [basea]) self.assertEqual(qs[0].a, fk1) self.assertIs(qs[0].b, None) def test_disjunction_promotion1(self): # Pre-existing join, add two ORed filters to the same join, # all joins can be INNER JOINS. qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) # Reverse the order of AND and OR filters. qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 2) def test_disjunction_promotion2(self): qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # Now we have two different joins in an ORed condition, these # must be OUTER joins. The pre-existing join should remain INNER. qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) # Reverse case. qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) def test_disjunction_promotion3(self): qs = BaseA.objects.filter(a__f2='bar') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # The ANDed a__f2 filter allows us to use keep using INNER JOIN # even inside the ORed case. If the join to a__ returns nothing, # the ANDed filter for a__f2 can't be true. qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion3_demote(self): # This one needs demotion logic: the first filter causes a to be # outer joined, the second filter makes it inner join again. qs = BaseA.objects.filter( Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion4_demote(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) # Demote needed for the "a" join. It is marked as outer join by # above filter (even if it is trimmed away). qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion4(self): qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion5_demote(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) # Note that the above filters on a force the join to an # inner join even if it is trimmed. self.assertEqual(str(qs.query).count('JOIN'), 0) qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo')) # So, now the a__f1 join doesn't need promotion. self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # But b__f1 does. self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo')) # Now the join to a is created as LOUTER self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion6(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) def test_disjunction_promotion7(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar'))) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) qs = BaseA.objects.filter( (Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo')) ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) self.assertEqual(str(qs.query).count('INNER JOIN'), 0) qs = BaseA.objects.filter( Q(a__f1='foo') | Q(a__f1='bar') & (Q(b__f1='bar') | Q(c__f1='foo')) ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion_fexpression(self): qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2))) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count('INNER JOIN'), 0) class ManyToManyExcludeTest(TestCase): def test_exclude_many_to_many(self): Identifier.objects.create(name='extra') program = Program.objects.create(identifier=Identifier.objects.create(name='program')) channel = Channel.objects.create(identifier=Identifier.objects.create(name='channel')) channel.programs.add(program) # channel contains 'program1', so all Identifiers except that one # should be returned self.assertQuerysetEqual( Identifier.objects.exclude(program__channel=channel).order_by('name'), ['<Identifier: channel>', '<Identifier: extra>'] ) self.assertQuerysetEqual( Identifier.objects.exclude(program__channel=None).order_by('name'), ['<Identifier: program>'] ) def test_ticket_12823(self): pg3 = Page.objects.create(text='pg3') pg2 = Page.objects.create(text='pg2') pg1 = Page.objects.create(text='pg1') pa1 = Paragraph.objects.create(text='pa1') pa1.page.set([pg1, pg2]) pa2 = Paragraph.objects.create(text='pa2') pa2.page.set([pg2, pg3]) pa3 = Paragraph.objects.create(text='pa3') ch1 = Chapter.objects.create(title='ch1', paragraph=pa1) ch2 = Chapter.objects.create(title='ch2', paragraph=pa2) ch3 = Chapter.objects.create(title='ch3', paragraph=pa3) b1 = Book.objects.create(title='b1', chapter=ch1) b2 = Book.objects.create(title='b2', chapter=ch2) b3 = Book.objects.create(title='b3', chapter=ch3) q = Book.objects.exclude(chapter__paragraph__page__text='pg1') self.assertNotIn('IS NOT NULL', str(q.query)) self.assertEqual(len(q), 2) self.assertNotIn(b1, q) self.assertIn(b2, q) self.assertIn(b3, q) class RelabelCloneTest(TestCase): def test_ticket_19964(self): my1 = MyObject.objects.create(data='foo') my1.parent = my1 my1.save() my2 = MyObject.objects.create(data='bar', parent=my1) parents = MyObject.objects.filter(parent=F('id')) children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id')) self.assertEqual(list(parents), [my1]) # Evaluating the children query (which has parents as part of it) does # not change results for the parents query. self.assertEqual(list(children), [my2]) self.assertEqual(list(parents), [my1]) class Ticket20101Tests(TestCase): def test_ticket_20101(self): """ Tests QuerySet ORed combining in exclude subquery case. """ t = Tag.objects.create(name='foo') a1 = Annotation.objects.create(tag=t, name='a1') a2 = Annotation.objects.create(tag=t, name='a2') a3 = Annotation.objects.create(tag=t, name='a3') n = Note.objects.create(note='foo', misc='bar') qs1 = Note.objects.exclude(annotation__in=[a1, a2]) qs2 = Note.objects.filter(annotation__in=[a3]) self.assertIn(n, qs1) self.assertNotIn(n, qs2) self.assertIn(n, (qs1 | qs2)) class EmptyStringPromotionTests(SimpleTestCase): def test_empty_string_promotion(self): qs = RelatedObject.objects.filter(single__name='') if connection.features.interprets_empty_strings_as_nulls: self.assertIn('LEFT OUTER JOIN', str(qs.query)) else: self.assertNotIn('LEFT OUTER JOIN', str(qs.query)) class ValuesSubqueryTests(TestCase): def test_values_in_subquery(self): # If a values() queryset is used, then the given values # will be used instead of forcing use of the relation's field. o1 = Order.objects.create(id=-2) o2 = Order.objects.create(id=-1) oi1 = OrderItem.objects.create(order=o1, status=0) oi1.status = oi1.pk oi1.save() OrderItem.objects.create(order=o2, status=0) # The query below should match o1 as it has related order_item # with id == status. self.assertSequenceEqual(Order.objects.filter(items__in=OrderItem.objects.values_list('status')), [o1]) class DoubleInSubqueryTests(TestCase): def test_double_subquery_in(self): lfa1 = LeafA.objects.create(data='foo') lfa2 = LeafA.objects.create(data='bar') lfb1 = LeafB.objects.create(data='lfb1') lfb2 = LeafB.objects.create(data='lfb2') Join.objects.create(a=lfa1, b=lfb1) Join.objects.create(a=lfa2, b=lfb2) leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True) joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True) qs = LeafB.objects.filter(pk__in=joins) self.assertSequenceEqual(qs, [lfb1]) class Ticket18785Tests(SimpleTestCase): def test_ticket_18785(self): # Test join trimming from ticket18785 qs = Item.objects.exclude( note__isnull=False ).filter( name='something', creator__extra__isnull=True ).order_by() self.assertEqual(1, str(qs.query).count('INNER JOIN')) self.assertEqual(0, str(qs.query).count('OUTER JOIN')) class Ticket20788Tests(TestCase): def test_ticket_20788(self): Paragraph.objects.create() paragraph = Paragraph.objects.create() page = paragraph.page.create() chapter = Chapter.objects.create(paragraph=paragraph) Book.objects.create(chapter=chapter) paragraph2 = Paragraph.objects.create() Page.objects.create() chapter2 = Chapter.objects.create(paragraph=paragraph2) book2 = Book.objects.create(chapter=chapter2) sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page) self.assertSequenceEqual(sentences_not_in_pub, [book2]) class Ticket12807Tests(TestCase): def test_ticket_12807(self): p1 = Paragraph.objects.create() p2 = Paragraph.objects.create() # The ORed condition below should have no effect on the query - the # ~Q(pk__in=[]) will always be True. qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk)) self.assertSequenceEqual(qs, [p1]) class RelatedLookupTypeTests(TestCase): error = 'Cannot query "%s": Must be "%s" instance.' @classmethod def setUpTestData(cls): cls.oa = ObjectA.objects.create(name="oa") cls.poa = ProxyObjectA.objects.get(name="oa") cls.coa = ChildObjectA.objects.create(name="coa") cls.wrong_type = Order.objects.create(id=cls.oa.pk) cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1) ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2) cls.pob = ProxyObjectB.objects.all() ObjectC.objects.create(childobjecta=cls.coa) def test_wrong_type_lookup(self): """ A ValueError is raised when the incorrect object type is passed to a query lookup. """ # Passing incorrect object type with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.get(objecta=self.wrong_type) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta__in=[self.wrong_type]) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta=self.wrong_type) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)): ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob]) # Passing an object of the class on which query is done. with self.assertRaisesMessage(ValueError, self.error % (self.ob, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta__in=[self.poa, self.ob]) with self.assertRaisesMessage(ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)): ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob]) def test_wrong_backward_lookup(self): """ A ValueError is raised when the incorrect object type is passed to a query lookup for backward relations. """ with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)): ObjectA.objects.filter(objectb__in=[self.oa, self.ob]) with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)): ObjectA.objects.exclude(objectb=self.oa) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)): ObjectA.objects.get(objectb=self.wrong_type) def test_correct_lookup(self): """ When passing proxy model objects, child objects, or parent objects, lookups work fine. """ out_a = ['<ObjectA: oa>'] out_b = ['<ObjectB: ob>', '<ObjectB: pob>'] out_c = ['<ObjectC: >'] # proxy model objects self.assertQuerysetEqual(ObjectB.objects.filter(objecta=self.poa).order_by('name'), out_b) self.assertQuerysetEqual(ObjectA.objects.filter(objectb__in=self.pob).order_by('pk'), out_a * 2) # child objects self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.coa]), []) self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by('name'), out_b) self.assertQuerysetEqual( ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by('name'), out_b ) # parent objects self.assertQuerysetEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c) # QuerySet related object type checking shouldn't issue queries # (the querysets aren't evaluated here, hence zero queries) (#23266). with self.assertNumQueries(0): ObjectB.objects.filter(objecta__in=ObjectA.objects.all()) def test_values_queryset_lookup(self): """ #23396 - Ensure ValueQuerySets are not checked for compatibility with the lookup field """ # Make sure the num and objecta field values match. ob = ObjectB.objects.get(name='ob') ob.num = ob.objecta.pk ob.save() pob = ObjectB.objects.get(name='pob') pob.num = pob.objecta.pk pob.save() self.assertQuerysetEqual(ObjectB.objects.filter( objecta__in=ObjectB.objects.all().values_list('num') ).order_by('pk'), ['<ObjectB: ob>', '<ObjectB: pob>']) class Ticket14056Tests(TestCase): def test_ticket_14056(self): s1 = SharedConnection.objects.create(data='s1') s2 = SharedConnection.objects.create(data='s2') s3 = SharedConnection.objects.create(data='s3') PointerA.objects.create(connection=s2) expected_ordering = ( [s1, s3, s2] if connection.features.nulls_order_largest else [s2, s1, s3] ) self.assertSequenceEqual(SharedConnection.objects.order_by('-pointera__connection', 'pk'), expected_ordering) class Ticket20955Tests(TestCase): def test_ticket_20955(self): jack = Staff.objects.create(name='jackstaff') jackstaff = StaffUser.objects.create(staff=jack) jill = Staff.objects.create(name='jillstaff') jillstaff = StaffUser.objects.create(staff=jill) task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task") task_get = Task.objects.get(pk=task.pk) # Load data so that assertNumQueries doesn't complain about the get # version's queries. task_get.creator.staffuser.staff task_get.owner.staffuser.staff qs = Task.objects.select_related( 'creator__staffuser__staff', 'owner__staffuser__staff') self.assertEqual(str(qs.query).count(' JOIN '), 6) task_select_related = qs.get(pk=task.pk) with self.assertNumQueries(0): self.assertEqual(task_select_related.creator.staffuser.staff, task_get.creator.staffuser.staff) self.assertEqual(task_select_related.owner.staffuser.staff, task_get.owner.staffuser.staff) class Ticket21203Tests(TestCase): def test_ticket_21203(self): p = Ticket21203Parent.objects.create(parent_bool=True) c = Ticket21203Child.objects.create(parent=p) qs = Ticket21203Child.objects.select_related('parent').defer('parent__created') self.assertSequenceEqual(qs, [c]) self.assertIs(qs[0].parent.parent_bool, True) class ValuesJoinPromotionTests(TestCase): def test_values_no_promotion_for_existing(self): qs = Node.objects.filter(parent__parent__isnull=False) self.assertIn(' INNER JOIN ', str(qs.query)) qs = qs.values('parent__parent__id') self.assertIn(' INNER JOIN ', str(qs.query)) # Make sure there is a left outer join without the filter. qs = Node.objects.values('parent__parent__id') self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) def test_non_nullable_fk_not_promoted(self): qs = ObjectB.objects.values('objecta__name') self.assertIn(' INNER JOIN ', str(qs.query)) def test_ticket_21376(self): a = ObjectA.objects.create() ObjectC.objects.create(objecta=a) qs = ObjectC.objects.filter( Q(objecta=a) | Q(objectb__objecta=a), ) qs = qs.filter( Q(objectb=1) | Q(objecta=a), ) self.assertEqual(qs.count(), 1) tblname = connection.ops.quote_name(ObjectB._meta.db_table) self.assertIn(' LEFT OUTER JOIN %s' % tblname, str(qs.query)) class ForeignKeyToBaseExcludeTests(TestCase): def test_ticket_21787(self): sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1') sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2') sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3') c1 = CategoryItem.objects.create(category=sc1) CategoryItem.objects.create(category=sc2) self.assertSequenceEqual(SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by('name'), [sc2, sc3]) self.assertSequenceEqual(SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1]) class ReverseM2MCustomPkTests(TestCase): def test_ticket_21879(self): cpt1 = CustomPkTag.objects.create(id='cpt1', tag='cpt1') cp1 = CustomPk.objects.create(name='cp1', extra='extra') cp1.custompktag_set.add(cpt1) self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1]) self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1]) class Ticket22429Tests(TestCase): def test_ticket_22429(self): sc1 = School.objects.create() st1 = Student.objects.create(school=sc1) sc2 = School.objects.create() st2 = Student.objects.create(school=sc2) cr = Classroom.objects.create(school=sc1) cr.students.add(st1) queryset = Student.objects.filter(~Q(classroom__school=F('school'))) self.assertSequenceEqual(queryset, [st2]) class Ticket23605Tests(TestCase): def test_ticket_23605(self): # Test filtering on a complicated q-object from ticket's report. # The query structure is such that we have multiple nested subqueries. # The original problem was that the inner queries weren't relabeled # correctly. # See also #24090. a1 = Ticket23605A.objects.create() a2 = Ticket23605A.objects.create() c1 = Ticket23605C.objects.create(field_c0=10000.0) Ticket23605B.objects.create( field_b0=10000.0, field_b1=True, modelc_fk=c1, modela_fk=a1) complex_q = Q(pk__in=Ticket23605A.objects.filter( Q( # True for a1 as field_b0 = 10000, field_c0=10000 # False for a2 as no ticket23605b found ticket23605b__field_b0__gte=1000000 / F("ticket23605b__modelc_fk__field_c0") ) & # True for a1 (field_b1=True) Q(ticket23605b__field_b1=True) & ~Q(ticket23605b__pk__in=Ticket23605B.objects.filter( ~( # Same filters as above commented filters, but # double-negated (one for Q() above, one for # parentheses). So, again a1 match, a2 not. Q(field_b1=True) & Q(field_b0__gte=1000000 / F("modelc_fk__field_c0")) ) ))).filter(ticket23605b__field_b1=True)) qs1 = Ticket23605A.objects.filter(complex_q) self.assertSequenceEqual(qs1, [a1]) qs2 = Ticket23605A.objects.exclude(complex_q) self.assertSequenceEqual(qs2, [a2]) class TestTicket24279(TestCase): def test_ticket_24278(self): School.objects.create() qs = School.objects.filter(Q(pk__in=()) | Q()) self.assertQuerysetEqual(qs, []) class TestInvalidValuesRelation(SimpleTestCase): def test_invalid_values(self): msg = "Field 'id' expected a number but got 'abc'." with self.assertRaisesMessage(ValueError, msg): Annotation.objects.filter(tag='abc') with self.assertRaisesMessage(ValueError, msg): Annotation.objects.filter(tag__in=[123, 'abc']) class TestTicket24605(TestCase): def test_ticket_24605(self): """ Subquery table names should be quoted. """ i1 = Individual.objects.create(alive=True) RelatedIndividual.objects.create(related=i1) i2 = Individual.objects.create(alive=False) RelatedIndividual.objects.create(related=i2) i3 = Individual.objects.create(alive=True) i4 = Individual.objects.create(alive=False) self.assertSequenceEqual(Individual.objects.filter(Q(alive=False), Q(related_individual__isnull=True)), [i4]) self.assertSequenceEqual( Individual.objects.exclude(Q(alive=False), Q(related_individual__isnull=True)).order_by('pk'), [i1, i2, i3] ) class Ticket23622Tests(TestCase): @skipUnlessDBFeature('can_distinct_on_fields') def test_ticket_23622(self): """ Make sure __pk__in and __in work the same for related fields when using a distinct on subquery. """ a1 = Ticket23605A.objects.create() a2 = Ticket23605A.objects.create() c1 = Ticket23605C.objects.create(field_c0=0.0) Ticket23605B.objects.create( modela_fk=a1, field_b0=123, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=23, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=234, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=12, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=567, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=76, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=7, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=56, field_b1=True, modelc_fk=c1, ) qx = ( Q(ticket23605b__pk__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) & Q(ticket23605b__field_b0__gte=300) ) qy = ( Q(ticket23605b__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) & Q(ticket23605b__field_b0__gte=300) ) self.assertEqual( set(Ticket23605A.objects.filter(qx).values_list('pk', flat=True)), set(Ticket23605A.objects.filter(qy).values_list('pk', flat=True)) ) self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
2959269eb5e06652bb845ad56e37e157923951d0ea5eb258f6e244dcbdd9644f
""" Various complex queries that have been problematic in the past. """ from django.db import models from django.db.models.functions import Now class DumbCategory(models.Model): pass class ProxyCategory(DumbCategory): class Meta: proxy = True class NamedCategory(DumbCategory): name = models.CharField(max_length=10) def __str__(self): return self.name class Tag(models.Model): name = models.CharField(max_length=10) parent = models.ForeignKey( 'self', models.SET_NULL, blank=True, null=True, related_name='children', ) category = models.ForeignKey(NamedCategory, models.SET_NULL, null=True, default=None) class Meta: ordering = ['name'] def __str__(self): return self.name class Note(models.Model): note = models.CharField(max_length=100) misc = models.CharField(max_length=25) tag = models.ForeignKey(Tag, models.SET_NULL, blank=True, null=True) negate = models.BooleanField(default=True) class Meta: ordering = ['note'] def __str__(self): return self.note class Annotation(models.Model): name = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.CASCADE) notes = models.ManyToManyField(Note) def __str__(self): return self.name class DateTimePK(models.Model): date = models.DateTimeField(primary_key=True, auto_now_add=True) class ExtraInfo(models.Model): info = models.CharField(max_length=100) note = models.ForeignKey(Note, models.CASCADE, null=True) value = models.IntegerField(null=True) date = models.ForeignKey(DateTimePK, models.SET_NULL, null=True) filterable = models.BooleanField(default=True) class Meta: ordering = ['info'] def __str__(self): return self.info class Author(models.Model): name = models.CharField(max_length=10) num = models.IntegerField(unique=True) extra = models.ForeignKey(ExtraInfo, models.CASCADE) class Meta: ordering = ['name'] def __str__(self): return self.name class Item(models.Model): name = models.CharField(max_length=10) created = models.DateTimeField() modified = models.DateTimeField(blank=True, null=True) tags = models.ManyToManyField(Tag, blank=True) creator = models.ForeignKey(Author, models.CASCADE) note = models.ForeignKey(Note, models.CASCADE) class Meta: ordering = ['-note', 'name'] def __str__(self): return self.name class Report(models.Model): name = models.CharField(max_length=10) creator = models.ForeignKey(Author, models.SET_NULL, to_field='num', null=True) def __str__(self): return self.name class ReportComment(models.Model): report = models.ForeignKey(Report, models.CASCADE) class Ranking(models.Model): rank = models.IntegerField() author = models.ForeignKey(Author, models.CASCADE) class Meta: # A complex ordering specification. Should stress the system a bit. ordering = ('author__extra__note', 'author__name', 'rank') def __str__(self): return '%d: %s' % (self.rank, self.author.name) class Cover(models.Model): title = models.CharField(max_length=50) item = models.ForeignKey(Item, models.CASCADE) class Meta: ordering = ['item'] def __str__(self): return self.title class Number(models.Model): num = models.IntegerField() other_num = models.IntegerField(null=True) another_num = models.IntegerField(null=True) def __str__(self): return str(self.num) # Symmetrical m2m field with a normal field using the reverse accessor name # ("valid"). class Valid(models.Model): valid = models.CharField(max_length=10) parent = models.ManyToManyField('self') class Meta: ordering = ['valid'] # Some funky cross-linked models for testing a couple of infinite recursion # cases. class X(models.Model): y = models.ForeignKey('Y', models.CASCADE) class Y(models.Model): x1 = models.ForeignKey(X, models.CASCADE, related_name='y1') # Some models with a cycle in the default ordering. This would be bad if we # didn't catch the infinite loop. class LoopX(models.Model): y = models.ForeignKey('LoopY', models.CASCADE) class Meta: ordering = ['y'] class LoopY(models.Model): x = models.ForeignKey(LoopX, models.CASCADE) class Meta: ordering = ['x'] class LoopZ(models.Model): z = models.ForeignKey('self', models.CASCADE) class Meta: ordering = ['z'] # A model and custom default manager combination. class CustomManager(models.Manager): def get_queryset(self): qs = super().get_queryset() return qs.filter(public=True, tag__name='t1') class ManagedModel(models.Model): data = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.CASCADE) public = models.BooleanField(default=True) objects = CustomManager() normal_manager = models.Manager() def __str__(self): return self.data # An inter-related setup with multiple paths from Child to Detail. class Detail(models.Model): data = models.CharField(max_length=10) class MemberManager(models.Manager): def get_queryset(self): return super().get_queryset().select_related("details") class Member(models.Model): name = models.CharField(max_length=10) details = models.OneToOneField(Detail, models.CASCADE, primary_key=True) objects = MemberManager() class Child(models.Model): person = models.OneToOneField(Member, models.CASCADE, primary_key=True) parent = models.ForeignKey(Member, models.CASCADE, related_name="children") # Custom primary keys interfered with ordering in the past. class CustomPk(models.Model): name = models.CharField(max_length=10, primary_key=True) extra = models.CharField(max_length=10) class Meta: ordering = ['name', 'extra'] class Related(models.Model): custom = models.ForeignKey(CustomPk, models.CASCADE, null=True) class CustomPkTag(models.Model): id = models.CharField(max_length=20, primary_key=True) custom_pk = models.ManyToManyField(CustomPk) tag = models.CharField(max_length=20) # An inter-related setup with a model subclass that has a nullable # path to another model, and a return path from that model. class Celebrity(models.Model): name = models.CharField("Name", max_length=20) greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True) def __str__(self): return self.name class TvChef(Celebrity): pass class Fan(models.Model): fan_of = models.ForeignKey(Celebrity, models.CASCADE) # Multiple foreign keys class LeafA(models.Model): data = models.CharField(max_length=10) def __str__(self): return self.data class LeafB(models.Model): data = models.CharField(max_length=10) class Join(models.Model): a = models.ForeignKey(LeafA, models.CASCADE) b = models.ForeignKey(LeafB, models.CASCADE) class ReservedName(models.Model): name = models.CharField(max_length=20) order = models.IntegerField() def __str__(self): return self.name # A simpler shared-foreign-key setup that can expose some problems. class SharedConnection(models.Model): data = models.CharField(max_length=10) def __str__(self): return self.data class PointerA(models.Model): connection = models.ForeignKey(SharedConnection, models.CASCADE) class PointerB(models.Model): connection = models.ForeignKey(SharedConnection, models.CASCADE) # Multi-layer ordering class SingleObject(models.Model): name = models.CharField(max_length=10) class Meta: ordering = ['name'] def __str__(self): return self.name class RelatedObject(models.Model): single = models.ForeignKey(SingleObject, models.SET_NULL, null=True) f = models.IntegerField(null=True) class Meta: ordering = ['single'] class Plaything(models.Model): name = models.CharField(max_length=10) others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True) class Meta: ordering = ['others'] def __str__(self): return self.name class Article(models.Model): name = models.CharField(max_length=20) created = models.DateTimeField() def __str__(self): return self.name class Food(models.Model): name = models.CharField(max_length=20, unique=True) def __str__(self): return self.name class Eaten(models.Model): food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True) meal = models.CharField(max_length=20) def __str__(self): return "%s at %s" % (self.food, self.meal) class Node(models.Model): num = models.IntegerField(unique=True) parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True) def __str__(self): return str(self.num) # Bug #12252 class ObjectA(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name def __iter__(self): # Ticket #23721 assert False, 'type checking should happen without calling model __iter__' class ProxyObjectA(ObjectA): class Meta: proxy = True class ChildObjectA(ObjectA): pass class ObjectB(models.Model): name = models.CharField(max_length=50) objecta = models.ForeignKey(ObjectA, models.CASCADE) num = models.PositiveIntegerField() def __str__(self): return self.name class ProxyObjectB(ObjectB): class Meta: proxy = True class ObjectC(models.Model): name = models.CharField(max_length=50) objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True) objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True) childobjecta = models.ForeignKey(ChildObjectA, models.SET_NULL, null=True, related_name='ca_pk') def __str__(self): return self.name class SimpleCategory(models.Model): name = models.CharField(max_length=25) def __str__(self): return self.name class SpecialCategory(SimpleCategory): special_name = models.CharField(max_length=35) def __str__(self): return self.name + " " + self.special_name class CategoryItem(models.Model): category = models.ForeignKey(SimpleCategory, models.CASCADE) def __str__(self): return "category item: " + str(self.category) class MixedCaseFieldCategoryItem(models.Model): CaTeGoRy = models.ForeignKey(SimpleCategory, models.CASCADE) class MixedCaseDbColumnCategoryItem(models.Model): category = models.ForeignKey(SimpleCategory, models.CASCADE, db_column='CaTeGoRy_Id') class OneToOneCategory(models.Model): new_name = models.CharField(max_length=15) category = models.OneToOneField(SimpleCategory, models.CASCADE) def __str__(self): return "one2one " + self.new_name class CategoryRelationship(models.Model): first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='first_rel') second = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='second_rel') class CommonMixedCaseForeignKeys(models.Model): category = models.ForeignKey(CategoryItem, models.CASCADE) mixed_case_field_category = models.ForeignKey(MixedCaseFieldCategoryItem, models.CASCADE) mixed_case_db_column_category = models.ForeignKey(MixedCaseDbColumnCategoryItem, models.CASCADE) class NullableName(models.Model): name = models.CharField(max_length=20, null=True) class Meta: ordering = ['id'] class ModelD(models.Model): name = models.TextField() class ModelC(models.Model): name = models.TextField() class ModelB(models.Model): name = models.TextField() c = models.ForeignKey(ModelC, models.CASCADE) class ModelA(models.Model): name = models.TextField() b = models.ForeignKey(ModelB, models.SET_NULL, null=True) d = models.ForeignKey(ModelD, models.CASCADE) class Job(models.Model): name = models.CharField(max_length=20, unique=True) def __str__(self): return self.name class JobResponsibilities(models.Model): job = models.ForeignKey(Job, models.CASCADE, to_field='name') responsibility = models.ForeignKey('Responsibility', models.CASCADE, to_field='description') class Responsibility(models.Model): description = models.CharField(max_length=20, unique=True) jobs = models.ManyToManyField(Job, through=JobResponsibilities, related_name='responsibilities') def __str__(self): return self.description # Models for disjunction join promotion low level testing. class FK1(models.Model): f1 = models.TextField() f2 = models.TextField() class FK2(models.Model): f1 = models.TextField() f2 = models.TextField() class FK3(models.Model): f1 = models.TextField() f2 = models.TextField() class BaseA(models.Model): a = models.ForeignKey(FK1, models.SET_NULL, null=True) b = models.ForeignKey(FK2, models.SET_NULL, null=True) c = models.ForeignKey(FK3, models.SET_NULL, null=True) class Identifier(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name class Program(models.Model): identifier = models.OneToOneField(Identifier, models.CASCADE) class Channel(models.Model): programs = models.ManyToManyField(Program) identifier = models.OneToOneField(Identifier, models.CASCADE) class Book(models.Model): title = models.TextField() chapter = models.ForeignKey('Chapter', models.CASCADE) class Chapter(models.Model): title = models.TextField() paragraph = models.ForeignKey('Paragraph', models.CASCADE) class Paragraph(models.Model): text = models.TextField() page = models.ManyToManyField('Page') class Page(models.Model): text = models.TextField() class MyObject(models.Model): parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children') data = models.CharField(max_length=100) created_at = models.DateTimeField(auto_now_add=True) # Models for #17600 regressions class Order(models.Model): id = models.IntegerField(primary_key=True) name = models.CharField(max_length=12, null=True, default='') class Meta: ordering = ('pk',) def __str__(self): return str(self.pk) class OrderItem(models.Model): order = models.ForeignKey(Order, models.CASCADE, related_name='items') status = models.IntegerField() class Meta: ordering = ('pk',) def __str__(self): return str(self.pk) class BaseUser(models.Model): pass class Task(models.Model): title = models.CharField(max_length=10) owner = models.ForeignKey(BaseUser, models.CASCADE, related_name='owner') creator = models.ForeignKey(BaseUser, models.CASCADE, related_name='creator') def __str__(self): return self.title class Staff(models.Model): name = models.CharField(max_length=10) def __str__(self): return self.name class StaffUser(BaseUser): staff = models.OneToOneField(Staff, models.CASCADE, related_name='user') def __str__(self): return self.staff class Ticket21203Parent(models.Model): parentid = models.AutoField(primary_key=True) parent_bool = models.BooleanField(default=True) created = models.DateTimeField(auto_now=True) class Ticket21203Child(models.Model): childid = models.AutoField(primary_key=True) parent = models.ForeignKey(Ticket21203Parent, models.CASCADE) class Person(models.Model): name = models.CharField(max_length=128) class Company(models.Model): name = models.CharField(max_length=128) employees = models.ManyToManyField(Person, related_name='employers', through='Employment') def __str__(self): return self.name class Employment(models.Model): employer = models.ForeignKey(Company, models.CASCADE) employee = models.ForeignKey(Person, models.CASCADE) title = models.CharField(max_length=128) class School(models.Model): pass class Student(models.Model): school = models.ForeignKey(School, models.CASCADE) class Classroom(models.Model): name = models.CharField(max_length=20) has_blackboard = models.BooleanField(null=True) school = models.ForeignKey(School, models.CASCADE) students = models.ManyToManyField(Student, related_name='classroom') class Teacher(models.Model): schools = models.ManyToManyField(School) friends = models.ManyToManyField('self') class Ticket23605AParent(models.Model): pass class Ticket23605A(Ticket23605AParent): pass class Ticket23605B(models.Model): modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE) modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE) field_b0 = models.IntegerField(null=True) field_b1 = models.BooleanField(default=False) class Ticket23605C(models.Model): field_c0 = models.FloatField() # db_table names have capital letters to ensure they are quoted in queries. class Individual(models.Model): alive = models.BooleanField() class Meta: db_table = 'Individual' class RelatedIndividual(models.Model): related = models.ForeignKey(Individual, models.CASCADE, related_name='related_individual') class Meta: db_table = 'RelatedIndividual' class CustomDbColumn(models.Model): custom_column = models.IntegerField(db_column='custom_name', null=True) ip_address = models.GenericIPAddressField(null=True) class CreatedField(models.DateTimeField): db_returning = True def __init__(self, *args, **kwargs): kwargs.setdefault('default', Now) super().__init__(*args, **kwargs) class ReturningModel(models.Model): created = CreatedField(editable=False) class NonIntegerPKReturningModel(models.Model): created = CreatedField(editable=False, primary_key=True) class JSONFieldNullable(models.Model): json_field = models.JSONField(blank=True, null=True) class Meta: required_db_features = {'supports_json_field'}
d6ebf251541da82ddd465e03003e09cb34fb80a99b19192e22c9f210198eb3a1
import operator from django.db import DatabaseError, NotSupportedError, connection from django.db.models import Exists, F, IntegerField, OuterRef, Value from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from .models import Number, ReservedName @skipUnlessDBFeature('supports_select_union') class QuerySetSetOperationTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.bulk_create(Number(num=i, other_num=10 - i) for i in range(10)) def assertNumbersEqual(self, queryset, expected_numbers, ordered=True): self.assertQuerysetEqual(queryset, expected_numbers, operator.attrgetter('num'), ordered) def test_simple_union(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=8) qs3 = Number.objects.filter(num=5) self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False) @skipUnlessDBFeature('supports_select_intersection') def test_simple_intersection(self): qs1 = Number.objects.filter(num__lte=5) qs2 = Number.objects.filter(num__gte=5) qs3 = Number.objects.filter(num__gte=4, num__lte=6) self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False) @skipUnlessDBFeature('supports_select_intersection') def test_intersection_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() reserved_name = qs1.intersection(qs1).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.intersection(qs1).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) @skipUnlessDBFeature('supports_select_difference') def test_simple_difference(self): qs1 = Number.objects.filter(num__lte=5) qs2 = Number.objects.filter(num__lte=4) self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False) def test_union_distinct(self): qs1 = Number.objects.all() qs2 = Number.objects.all() self.assertEqual(len(list(qs1.union(qs2, all=True))), 20) self.assertEqual(len(list(qs1.union(qs2))), 10) def test_union_none(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=8) qs3 = qs1.union(qs2) self.assertSequenceEqual(qs3.none(), []) self.assertNumbersEqual(qs3, [0, 1, 8, 9], ordered=False) @skipUnlessDBFeature('supports_select_intersection') def test_intersection_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.intersection(qs2)), 0) self.assertEqual(len(qs1.intersection(qs3)), 0) self.assertEqual(len(qs2.intersection(qs1)), 0) self.assertEqual(len(qs3.intersection(qs1)), 0) self.assertEqual(len(qs2.intersection(qs2)), 0) self.assertEqual(len(qs3.intersection(qs3)), 0) @skipUnlessDBFeature('supports_select_difference') def test_difference_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.difference(qs2)), 10) self.assertEqual(len(qs1.difference(qs3)), 10) self.assertEqual(len(qs2.difference(qs1)), 0) self.assertEqual(len(qs3.difference(qs1)), 0) self.assertEqual(len(qs2.difference(qs2)), 0) self.assertEqual(len(qs3.difference(qs3)), 0) @skipUnlessDBFeature('supports_select_difference') def test_difference_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() qs2 = ReservedName.objects.none() reserved_name = qs1.difference(qs2).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.difference(qs2).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) def test_union_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.union(qs2)), 10) self.assertEqual(len(qs2.union(qs1)), 10) self.assertEqual(len(qs1.union(qs3)), 10) self.assertEqual(len(qs3.union(qs1)), 10) self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10) self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20) self.assertEqual(len(qs2.union(qs2)), 0) self.assertEqual(len(qs3.union(qs3)), 0) def test_empty_qs_union_with_ordered_qs(self): qs1 = Number.objects.all().order_by('num') qs2 = Number.objects.none().union(qs1).order_by('num') self.assertEqual(list(qs1), list(qs2)) def test_limits(self): qs1 = Number.objects.all() qs2 = Number.objects.all() self.assertEqual(len(list(qs1.union(qs2)[:2])), 2) def test_ordering(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=2, num__lte=3) self.assertNumbersEqual(qs1.union(qs2).order_by('-num'), [3, 2, 1, 0]) def test_ordering_by_alias(self): qs1 = Number.objects.filter(num__lte=1).values(alias=F('num')) qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F('num')) self.assertQuerysetEqual( qs1.union(qs2).order_by('-alias'), [3, 2, 1, 0], operator.itemgetter('alias'), ) def test_ordering_by_f_expression(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=2, num__lte=3) self.assertNumbersEqual(qs1.union(qs2).order_by(F('num').desc()), [3, 2, 1, 0]) def test_ordering_by_f_expression_and_alias(self): qs1 = Number.objects.filter(num__lte=1).values(alias=F('other_num')) qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F('other_num')) self.assertQuerysetEqual( qs1.union(qs2).order_by(F('alias').desc()), [10, 9, 8, 7], operator.itemgetter('alias'), ) Number.objects.create(num=-1) self.assertQuerysetEqual( qs1.union(qs2).order_by(F('alias').desc(nulls_last=True)), [10, 9, 8, 7, None], operator.itemgetter('alias'), ) def test_union_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() reserved_name = qs1.union(qs1).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.union(qs1).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) # List of columns can be changed. reserved_name = qs1.union(qs1).values_list('order').get() self.assertEqual(reserved_name, (2,)) def test_union_with_two_annotated_values_list(self): qs1 = Number.objects.filter(num=1).annotate( count=Value(0, IntegerField()), ).values_list('num', 'count') qs2 = Number.objects.filter(num=2).values('pk').annotate( count=F('num'), ).annotate( num=Value(1, IntegerField()), ).values_list('num', 'count') self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)]) def test_union_with_extra_and_values_list(self): qs1 = Number.objects.filter(num=1).extra( select={'count': 0}, ).values_list('num', 'count') qs2 = Number.objects.filter(num=2).extra(select={'count': 1}) self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)]) def test_union_with_values_list_on_annotated_and_unannotated(self): ReservedName.objects.create(name='rn1', order=1) qs1 = Number.objects.annotate( has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef('num'))) ).filter(has_reserved_name=True) qs2 = Number.objects.filter(num=9) self.assertCountEqual(qs1.union(qs2).values_list('num', flat=True), [1, 9]) def test_union_with_values_list_and_order(self): ReservedName.objects.bulk_create([ ReservedName(name='rn1', order=7), ReservedName(name='rn2', order=5), ReservedName(name='rn0', order=6), ReservedName(name='rn9', order=-1), ]) qs1 = ReservedName.objects.filter(order__gte=6) qs2 = ReservedName.objects.filter(order__lte=5) union_qs = qs1.union(qs2) for qs, expected_result in ( # Order by a single column. (union_qs.order_by('-pk').values_list('order', flat=True), [-1, 6, 5, 7]), (union_qs.order_by('pk').values_list('order', flat=True), [7, 5, 6, -1]), (union_qs.values_list('order', flat=True).order_by('-pk'), [-1, 6, 5, 7]), (union_qs.values_list('order', flat=True).order_by('pk'), [7, 5, 6, -1]), # Order by multiple columns. (union_qs.order_by('-name', 'pk').values_list('order', flat=True), [-1, 5, 7, 6]), (union_qs.values_list('order', flat=True).order_by('-name', 'pk'), [-1, 5, 7, 6]), ): with self.subTest(qs=qs): self.assertEqual(list(qs), expected_result) def test_count_union(self): qs1 = Number.objects.filter(num__lte=1).values('num') qs2 = Number.objects.filter(num__gte=2, num__lte=3).values('num') self.assertEqual(qs1.union(qs2).count(), 4) def test_count_union_empty_result(self): qs = Number.objects.filter(pk__in=[]) self.assertEqual(qs.union(qs).count(), 0) @skipUnlessDBFeature('supports_select_difference') def test_count_difference(self): qs1 = Number.objects.filter(num__lt=10) qs2 = Number.objects.filter(num__lt=9) self.assertEqual(qs1.difference(qs2).count(), 1) @skipUnlessDBFeature('supports_select_intersection') def test_count_intersection(self): qs1 = Number.objects.filter(num__gte=5) qs2 = Number.objects.filter(num__lte=5) self.assertEqual(qs1.intersection(qs2).count(), 1) def test_get_union(self): qs = Number.objects.filter(num=2) self.assertEqual(qs.union(qs).get().num, 2) @skipUnlessDBFeature('supports_select_difference') def test_get_difference(self): qs1 = Number.objects.all() qs2 = Number.objects.exclude(num=2) self.assertEqual(qs1.difference(qs2).get().num, 2) @skipUnlessDBFeature('supports_select_intersection') def test_get_intersection(self): qs1 = Number.objects.all() qs2 = Number.objects.filter(num=2) self.assertEqual(qs1.intersection(qs2).get().num, 2) @skipUnlessDBFeature('supports_slicing_ordering_in_compound') def test_ordering_subqueries(self): qs1 = Number.objects.order_by('num')[:2] qs2 = Number.objects.order_by('-num')[:2] self.assertNumbersEqual(qs1.union(qs2).order_by('-num')[:4], [9, 8, 1, 0]) @skipIfDBFeature('supports_slicing_ordering_in_compound') def test_unsupported_ordering_slicing_raises_db_error(self): qs1 = Number.objects.all() qs2 = Number.objects.all() qs3 = Number.objects.all() msg = 'LIMIT/OFFSET not allowed in subqueries of compound statements' with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2[:10])) msg = 'ORDER BY not allowed in subqueries of compound statements' with self.assertRaisesMessage(DatabaseError, msg): list(qs1.order_by('id').union(qs2)) with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by('id').union(qs3)) @skipIfDBFeature('supports_select_intersection') def test_unsupported_intersection_raises_db_error(self): qs1 = Number.objects.all() qs2 = Number.objects.all() msg = 'intersection is not supported on this database backend' with self.assertRaisesMessage(NotSupportedError, msg): list(qs1.intersection(qs2)) def test_combining_multiple_models(self): ReservedName.objects.create(name='99 little bugs', order=99) qs1 = Number.objects.filter(num=1).values_list('num', flat=True) qs2 = ReservedName.objects.values_list('order') self.assertEqual(list(qs1.union(qs2).order_by('num')), [1, 99]) def test_order_raises_on_non_selected_column(self): qs1 = Number.objects.filter().annotate( annotation=Value(1, IntegerField()), ).values('annotation', num2=F('num')) qs2 = Number.objects.filter().values('id', 'num') # Should not raise list(qs1.union(qs2).order_by('annotation')) list(qs1.union(qs2).order_by('num2')) msg = 'ORDER BY term does not match any column in the result set' # 'id' is not part of the select with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by('id')) # 'num' got realiased to num2 with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by('num')) with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by(F('num'))) with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by(F('num').desc())) # switched order, now 'exists' again: list(qs2.union(qs1).order_by('num')) @skipUnlessDBFeature('supports_select_difference', 'supports_select_intersection') def test_qs_with_subcompound_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.intersection(Number.objects.filter(num__gt=1)) self.assertEqual(qs1.difference(qs2).count(), 2) def test_order_by_same_type(self): qs = Number.objects.all() union = qs.union(qs) numbers = list(range(10)) self.assertNumbersEqual(union.order_by('num'), numbers) self.assertNumbersEqual(union.order_by('other_num'), reversed(numbers)) def test_unsupported_operations_on_combined_qs(self): qs = Number.objects.all() msg = 'Calling QuerySet.%s() after %s() is not supported.' combinators = ['union'] if connection.features.supports_select_difference: combinators.append('difference') if connection.features.supports_select_intersection: combinators.append('intersection') for combinator in combinators: for operation in ( 'alias', 'annotate', 'defer', 'delete', 'distinct', 'exclude', 'extra', 'filter', 'only', 'prefetch_related', 'select_related', 'update', ): with self.subTest(combinator=combinator, operation=operation): with self.assertRaisesMessage( NotSupportedError, msg % (operation, combinator), ): getattr(getattr(qs, combinator)(qs), operation)() def test_get_with_filters_unsupported_on_combined_qs(self): qs = Number.objects.all() msg = 'Calling QuerySet.get(...) with filters after %s() is not supported.' combinators = ['union'] if connection.features.supports_select_difference: combinators.append('difference') if connection.features.supports_select_intersection: combinators.append('intersection') for combinator in combinators: with self.subTest(combinator=combinator): with self.assertRaisesMessage(NotSupportedError, msg % combinator): getattr(qs, combinator)(qs).get(num=2)
4a5516dcd9eec40875ce741f5f892a6d7f3789c75f1ac593bd4cfefcdc4d6a70
import unittest from django.db import NotSupportedError, connection, transaction from django.db.models import Count from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from django.test.utils import CaptureQueriesContext from .models import Tag @skipUnlessDBFeature('supports_explaining_query_execution') class ExplainTests(TestCase): def test_basic(self): querysets = [ Tag.objects.filter(name='test'), Tag.objects.filter(name='test').select_related('parent'), Tag.objects.filter(name='test').prefetch_related('children'), Tag.objects.filter(name='test').annotate(Count('children')), Tag.objects.filter(name='test').values_list('name'), Tag.objects.order_by().union(Tag.objects.order_by().filter(name='test')), Tag.objects.all().select_for_update().filter(name='test'), ] supported_formats = connection.features.supported_explain_formats all_formats = (None,) + tuple(supported_formats) + tuple(f.lower() for f in supported_formats) for idx, queryset in enumerate(querysets): for format in all_formats: with self.subTest(format=format, queryset=idx): with self.assertNumQueries(1), CaptureQueriesContext(connection) as captured_queries: result = queryset.explain(format=format) self.assertTrue(captured_queries[0]['sql'].startswith(connection.ops.explain_prefix)) self.assertIsInstance(result, str) self.assertTrue(result) @skipUnlessDBFeature('validates_explain_options') def test_unknown_options(self): with self.assertRaisesMessage(ValueError, 'Unknown options: test, test2'): Tag.objects.all().explain(test=1, test2=1) def test_unknown_format(self): msg = 'DOES NOT EXIST is not a recognized format.' if connection.features.supported_explain_formats: msg += ' Allowed formats: %s' % ', '.join(sorted(connection.features.supported_explain_formats)) with self.assertRaisesMessage(ValueError, msg): Tag.objects.all().explain(format='does not exist') @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific') def test_postgres_options(self): qs = Tag.objects.filter(name='test') test_options = [ {'COSTS': False, 'BUFFERS': True, 'ANALYZE': True}, {'costs': False, 'buffers': True, 'analyze': True}, {'verbose': True, 'timing': True, 'analyze': True}, {'verbose': False, 'timing': False, 'analyze': True}, ] if connection.features.is_postgresql_10: test_options.append({'summary': True}) if connection.features.is_postgresql_12: test_options.append({'settings': True}) if connection.features.is_postgresql_13: test_options.append({'analyze': True, 'wal': True}) for options in test_options: with self.subTest(**options), transaction.atomic(): with CaptureQueriesContext(connection) as captured_queries: qs.explain(format='text', **options) self.assertEqual(len(captured_queries), 1) for name, value in options.items(): option = '{} {}'.format(name.upper(), 'true' if value else 'false') self.assertIn(option, captured_queries[0]['sql']) @unittest.skipUnless(connection.vendor == 'mysql', 'MySQL specific') def test_mysql_text_to_traditional(self): # Ensure these cached properties are initialized to prevent queries for # the MariaDB or MySQL version during the QuerySet evaluation. connection.features.supported_explain_formats with CaptureQueriesContext(connection) as captured_queries: Tag.objects.filter(name='test').explain(format='text') self.assertEqual(len(captured_queries), 1) self.assertIn('FORMAT=TRADITIONAL', captured_queries[0]['sql']) @unittest.skipUnless(connection.vendor == 'mysql', 'MariaDB and MySQL >= 8.0.18 specific.') def test_mysql_analyze(self): # Inner skip to avoid module level query for MySQL version. if not connection.features.supports_explain_analyze: raise unittest.SkipTest('MariaDB and MySQL >= 8.0.18 specific.') qs = Tag.objects.filter(name='test') with CaptureQueriesContext(connection) as captured_queries: qs.explain(analyze=True) self.assertEqual(len(captured_queries), 1) prefix = 'ANALYZE ' if connection.mysql_is_mariadb else 'EXPLAIN ANALYZE ' self.assertTrue(captured_queries[0]['sql'].startswith(prefix)) with CaptureQueriesContext(connection) as captured_queries: qs.explain(analyze=True, format='JSON') self.assertEqual(len(captured_queries), 1) if connection.mysql_is_mariadb: self.assertIn('FORMAT=JSON', captured_queries[0]['sql']) else: self.assertNotIn('FORMAT=JSON', captured_queries[0]['sql']) @skipIfDBFeature('supports_explaining_query_execution') class ExplainUnsupportedTests(TestCase): def test_message(self): msg = 'This backend does not support explaining query execution.' with self.assertRaisesMessage(NotSupportedError, msg): Tag.objects.filter(name='test').explain()
f8a962849b142dafa7ed3537886013bfc589df5d9b185ef2b699661eca546c55
import pickle import time from datetime import datetime from django.template import engines from django.template.response import ( ContentNotRenderedError, SimpleTemplateResponse, TemplateResponse, ) from django.test import ( RequestFactory, SimpleTestCase, modify_settings, override_settings, ) from django.test.utils import require_jinja2 from .utils import TEMPLATE_DIR def test_processor(request): return {'processors': 'yes'} test_processor_name = 'template_tests.test_response.test_processor' # A test middleware that installs a temporary URLConf def custom_urlconf_middleware(get_response): def middleware(request): request.urlconf = 'template_tests.alternate_urls' return get_response(request) return middleware class SimpleTemplateResponseTest(SimpleTestCase): def _response(self, template='foo', *args, **kwargs): template = engines['django'].from_string(template) return SimpleTemplateResponse(template, *args, **kwargs) def test_template_resolving(self): response = SimpleTemplateResponse('first/test.html') response.render() self.assertEqual(response.content, b'First template\n') templates = ['foo.html', 'second/test.html', 'first/test.html'] response = SimpleTemplateResponse(templates) response.render() self.assertEqual(response.content, b'Second template\n') response = self._response() response.render() self.assertEqual(response.content, b'foo') def test_explicit_baking(self): # explicit baking response = self._response() self.assertFalse(response.is_rendered) response.render() self.assertTrue(response.is_rendered) def test_render(self): # response is not re-rendered without the render call response = self._response().render() self.assertEqual(response.content, b'foo') # rebaking doesn't change the rendered content template = engines['django'].from_string('bar{{ baz }}') response.template_name = template response.render() self.assertEqual(response.content, b'foo') # but rendered content can be overridden by manually # setting content response.content = 'bar' self.assertEqual(response.content, b'bar') def test_iteration_unrendered(self): # unrendered response raises an exception on iteration response = self._response() self.assertFalse(response.is_rendered) def iteration(): list(response) msg = 'The response content must be rendered before it can be iterated over.' with self.assertRaisesMessage(ContentNotRenderedError, msg): iteration() self.assertFalse(response.is_rendered) def test_iteration_rendered(self): # iteration works for rendered responses response = self._response().render() self.assertEqual(list(response), [b'foo']) def test_content_access_unrendered(self): # unrendered response raises an exception when content is accessed response = self._response() self.assertFalse(response.is_rendered) with self.assertRaises(ContentNotRenderedError): response.content self.assertFalse(response.is_rendered) def test_content_access_rendered(self): # rendered response content can be accessed response = self._response().render() self.assertEqual(response.content, b'foo') def test_set_content(self): # content can be overridden response = self._response() self.assertFalse(response.is_rendered) response.content = 'spam' self.assertTrue(response.is_rendered) self.assertEqual(response.content, b'spam') response.content = 'baz' self.assertEqual(response.content, b'baz') def test_dict_context(self): response = self._response('{{ foo }}{{ processors }}', {'foo': 'bar'}) self.assertEqual(response.context_data, {'foo': 'bar'}) response.render() self.assertEqual(response.content, b'bar') def test_kwargs(self): response = self._response(content_type='application/json', status=504, charset='ascii') self.assertEqual(response.headers['content-type'], 'application/json') self.assertEqual(response.status_code, 504) self.assertEqual(response.charset, 'ascii') def test_args(self): response = SimpleTemplateResponse('', {}, 'application/json', 504) self.assertEqual(response.headers['content-type'], 'application/json') self.assertEqual(response.status_code, 504) @require_jinja2 def test_using(self): response = SimpleTemplateResponse('template_tests/using.html').render() self.assertEqual(response.content, b'DTL\n') response = SimpleTemplateResponse('template_tests/using.html', using='django').render() self.assertEqual(response.content, b'DTL\n') response = SimpleTemplateResponse('template_tests/using.html', using='jinja2').render() self.assertEqual(response.content, b'Jinja2\n') def test_post_callbacks(self): "Rendering a template response triggers the post-render callbacks" post = [] def post1(obj): post.append('post1') def post2(obj): post.append('post2') response = SimpleTemplateResponse('first/test.html', {}) response.add_post_render_callback(post1) response.add_post_render_callback(post2) # When the content is rendered, all the callbacks are invoked, too. response.render() self.assertEqual(response.content, b'First template\n') self.assertEqual(post, ['post1', 'post2']) def test_pickling(self): # Create a template response. The context is # known to be unpicklable (e.g., a function). response = SimpleTemplateResponse('first/test.html', { 'value': 123, 'fn': datetime.now, }) with self.assertRaises(ContentNotRenderedError): pickle.dumps(response) # But if we render the response, we can pickle it. response.render() pickled_response = pickle.dumps(response) unpickled_response = pickle.loads(pickled_response) self.assertEqual(unpickled_response.content, response.content) self.assertEqual(unpickled_response.headers['content-type'], response.headers['content-type']) self.assertEqual(unpickled_response.status_code, response.status_code) # ...and the unpickled response doesn't have the # template-related attributes, so it can't be re-rendered template_attrs = ('template_name', 'context_data', '_post_render_callbacks') for attr in template_attrs: self.assertFalse(hasattr(unpickled_response, attr)) # ...and requesting any of those attributes raises an exception for attr in template_attrs: with self.assertRaises(AttributeError): getattr(unpickled_response, attr) def test_repickling(self): response = SimpleTemplateResponse('first/test.html', { 'value': 123, 'fn': datetime.now, }) with self.assertRaises(ContentNotRenderedError): pickle.dumps(response) response.render() pickled_response = pickle.dumps(response) unpickled_response = pickle.loads(pickled_response) pickle.dumps(unpickled_response) def test_pickling_cookie(self): response = SimpleTemplateResponse('first/test.html', { 'value': 123, 'fn': datetime.now, }) response.cookies['key'] = 'value' response.render() pickled_response = pickle.dumps(response, pickle.HIGHEST_PROTOCOL) unpickled_response = pickle.loads(pickled_response) self.assertEqual(unpickled_response.cookies['key'].value, 'value') def test_headers(self): response = SimpleTemplateResponse( 'first/test.html', {'value': 123, 'fn': datetime.now}, headers={'X-Foo': 'foo'}, ) self.assertEqual(response.headers['X-Foo'], 'foo') @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [TEMPLATE_DIR], 'OPTIONS': { 'context_processors': [test_processor_name], }, }]) class TemplateResponseTest(SimpleTestCase): factory = RequestFactory() def _response(self, template='foo', *args, **kwargs): self._request = self.factory.get('/') template = engines['django'].from_string(template) return TemplateResponse(self._request, template, *args, **kwargs) def test_render(self): response = self._response('{{ foo }}{{ processors }}').render() self.assertEqual(response.content, b'yes') def test_render_with_requestcontext(self): response = self._response('{{ foo }}{{ processors }}', {'foo': 'bar'}).render() self.assertEqual(response.content, b'baryes') def test_context_processor_priority(self): # context processors should be overridden by passed-in context response = self._response('{{ foo }}{{ processors }}', {'processors': 'no'}).render() self.assertEqual(response.content, b'no') def test_kwargs(self): response = self._response(content_type='application/json', status=504) self.assertEqual(response.headers['content-type'], 'application/json') self.assertEqual(response.status_code, 504) def test_args(self): response = TemplateResponse(self.factory.get('/'), '', {}, 'application/json', 504) self.assertEqual(response.headers['content-type'], 'application/json') self.assertEqual(response.status_code, 504) @require_jinja2 def test_using(self): request = self.factory.get('/') response = TemplateResponse(request, 'template_tests/using.html').render() self.assertEqual(response.content, b'DTL\n') response = TemplateResponse(request, 'template_tests/using.html', using='django').render() self.assertEqual(response.content, b'DTL\n') response = TemplateResponse(request, 'template_tests/using.html', using='jinja2').render() self.assertEqual(response.content, b'Jinja2\n') def test_pickling(self): # Create a template response. The context is # known to be unpicklable (e.g., a function). response = TemplateResponse( self.factory.get('/'), 'first/test.html', { 'value': 123, 'fn': datetime.now, } ) with self.assertRaises(ContentNotRenderedError): pickle.dumps(response) # But if we render the response, we can pickle it. response.render() pickled_response = pickle.dumps(response) unpickled_response = pickle.loads(pickled_response) self.assertEqual(unpickled_response.content, response.content) self.assertEqual(unpickled_response.headers['content-type'], response.headers['content-type']) self.assertEqual(unpickled_response.status_code, response.status_code) # ...and the unpickled response doesn't have the # template-related attributes, so it can't be re-rendered template_attrs = ( 'template_name', 'context_data', '_post_render_callbacks', '_request', ) for attr in template_attrs: self.assertFalse(hasattr(unpickled_response, attr)) # ...and requesting any of those attributes raises an exception for attr in template_attrs: with self.assertRaises(AttributeError): getattr(unpickled_response, attr) def test_repickling(self): response = SimpleTemplateResponse('first/test.html', { 'value': 123, 'fn': datetime.now, }) with self.assertRaises(ContentNotRenderedError): pickle.dumps(response) response.render() pickled_response = pickle.dumps(response) unpickled_response = pickle.loads(pickled_response) pickle.dumps(unpickled_response) def test_headers(self): response = TemplateResponse( self.factory.get('/'), 'first/test.html', {'value': 123, 'fn': datetime.now}, headers={'X-Foo': 'foo'}, ) self.assertEqual(response.headers['X-Foo'], 'foo') @modify_settings(MIDDLEWARE={'append': ['template_tests.test_response.custom_urlconf_middleware']}) @override_settings(ROOT_URLCONF='template_tests.urls') class CustomURLConfTest(SimpleTestCase): def test_custom_urlconf(self): response = self.client.get('/template_response_view/') self.assertContains(response, 'This is where you can find the snark: /snark/') @modify_settings( MIDDLEWARE={ 'append': [ 'django.middleware.cache.FetchFromCacheMiddleware', 'django.middleware.cache.UpdateCacheMiddleware', ], }, ) @override_settings(CACHE_MIDDLEWARE_SECONDS=2.0, ROOT_URLCONF='template_tests.alternate_urls') class CacheMiddlewareTest(SimpleTestCase): def test_middleware_caching(self): response = self.client.get('/template_response_view/') self.assertEqual(response.status_code, 200) time.sleep(1.0) response2 = self.client.get('/template_response_view/') self.assertEqual(response2.status_code, 200) self.assertEqual(response.content, response2.content) time.sleep(2.0) # Let the cache expire and test again response2 = self.client.get('/template_response_view/') self.assertEqual(response2.status_code, 200) self.assertNotEqual(response.content, response2.content)
dc573da178fe630edaa6b1fb9b0cab7ed52ad6dbeec2f6204e4cc537d631ae07
from django.db import models class CurrentTranslation(models.ForeignObject): """ Creates virtual relation to the translation with model cache enabled. """ # Avoid validation requires_unique_target = False def __init__(self, to, on_delete, from_fields, to_fields, **kwargs): # Disable reverse relation kwargs['related_name'] = '+' # Set unique to enable model cache. kwargs['unique'] = True super().__init__(to, on_delete, from_fields, to_fields, **kwargs) class ArticleTranslation(models.Model): article = models.ForeignKey('indexes.Article', models.CASCADE) article_no_constraint = models.ForeignKey('indexes.Article', models.CASCADE, db_constraint=False, related_name='+') language = models.CharField(max_length=10, unique=True) content = models.TextField() class Article(models.Model): headline = models.CharField(max_length=100) pub_date = models.DateTimeField() published = models.BooleanField(default=False) # Add virtual relation to the ArticleTranslation model. translation = CurrentTranslation(ArticleTranslation, models.CASCADE, ['id'], ['article']) class Meta: index_together = [ ["headline", "pub_date"], ] # Model for index_together being used only with single list class IndexTogetherSingleList(models.Model): headline = models.CharField(max_length=100) pub_date = models.DateTimeField() class Meta: index_together = ["headline", "pub_date"] class IndexedArticle(models.Model): headline = models.CharField(max_length=100, db_index=True) body = models.TextField(db_index=True) slug = models.CharField(max_length=40, unique=True) class Meta: required_db_features = {'supports_index_on_text_field'} class IndexedArticle2(models.Model): headline = models.CharField(max_length=100) body = models.TextField()
8faf023a714de838d8e264df37b71835178f49049680a498498b32af95895ee1
import datetime import re from datetime import date from decimal import Decimal from django import forms from django.core.exceptions import ImproperlyConfigured, ValidationError from django.db import models from django.forms.models import ( BaseModelFormSet, _get_foreign_key, inlineformset_factory, modelformset_factory, ) from django.http import QueryDict from django.test import TestCase, skipUnlessDBFeature from .models import ( AlternateBook, Author, AuthorMeeting, BetterAuthor, Book, BookWithCustomPK, BookWithOptionalAltEditor, ClassyMexicanRestaurant, CustomPrimaryKey, Location, Membership, MexicanRestaurant, Owner, OwnerProfile, Person, Place, Player, Poem, Poet, Post, Price, Product, Repository, Restaurant, Revision, Team, ) class DeletionTests(TestCase): def test_deletion(self): PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True) poet = Poet.objects.create(name='test') data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '1', 'form-MAX_NUM_FORMS': '0', 'form-0-id': str(poet.pk), 'form-0-name': 'test', 'form-0-DELETE': 'on', } formset = PoetFormSet(data, queryset=Poet.objects.all()) formset.save(commit=False) self.assertEqual(Poet.objects.count(), 1) formset.save() self.assertTrue(formset.is_valid()) self.assertEqual(Poet.objects.count(), 0) def test_add_form_deletion_when_invalid(self): """ Make sure that an add form that is filled out, but marked for deletion doesn't cause validation errors. """ PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True) poet = Poet.objects.create(name='test') # One existing untouched and two new unvalid forms data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '1', 'form-MAX_NUM_FORMS': '0', 'form-0-id': str(poet.id), 'form-0-name': 'test', 'form-1-id': '', 'form-1-name': 'x' * 1000, # Too long 'form-2-id': str(poet.id), # Violate unique constraint 'form-2-name': 'test2', } formset = PoetFormSet(data, queryset=Poet.objects.all()) # Make sure this form doesn't pass validation. self.assertIs(formset.is_valid(), False) self.assertEqual(Poet.objects.count(), 1) # Then make sure that it *does* pass validation and delete the object, # even though the data in new forms aren't actually valid. data['form-0-DELETE'] = 'on' data['form-1-DELETE'] = 'on' data['form-2-DELETE'] = 'on' formset = PoetFormSet(data, queryset=Poet.objects.all()) self.assertIs(formset.is_valid(), True) formset.save() self.assertEqual(Poet.objects.count(), 0) def test_change_form_deletion_when_invalid(self): """ Make sure that a change form that is filled out, but marked for deletion doesn't cause validation errors. """ PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True) poet = Poet.objects.create(name='test') data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '1', 'form-MAX_NUM_FORMS': '0', 'form-0-id': str(poet.id), 'form-0-name': 'x' * 1000, } formset = PoetFormSet(data, queryset=Poet.objects.all()) # Make sure this form doesn't pass validation. self.assertIs(formset.is_valid(), False) self.assertEqual(Poet.objects.count(), 1) # Then make sure that it *does* pass validation and delete the object, # even though the data isn't actually valid. data['form-0-DELETE'] = 'on' formset = PoetFormSet(data, queryset=Poet.objects.all()) self.assertIs(formset.is_valid(), True) formset.save() self.assertEqual(Poet.objects.count(), 0) def test_outdated_deletion(self): poet = Poet.objects.create(name='test') poem = Poem.objects.create(name='Brevity is the soul of wit', poet=poet) PoemFormSet = inlineformset_factory(Poet, Poem, fields="__all__", can_delete=True) # Simulate deletion of an object that doesn't exist in the database data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '2', 'form-0-id': str(poem.pk), 'form-0-name': 'foo', 'form-1-id': str(poem.pk + 1), # doesn't exist 'form-1-name': 'bar', 'form-1-DELETE': 'on', } formset = PoemFormSet(data, instance=poet, prefix="form") # The formset is valid even though poem.pk + 1 doesn't exist, # because it's marked for deletion anyway self.assertTrue(formset.is_valid()) formset.save() # Make sure the save went through correctly self.assertEqual(Poem.objects.get(pk=poem.pk).name, "foo") self.assertEqual(poet.poem_set.count(), 1) self.assertFalse(Poem.objects.filter(pk=poem.pk + 1).exists()) class ModelFormsetTest(TestCase): def test_modelformset_factory_without_fields(self): """ Regression for #19733 """ message = ( "Calling modelformset_factory without defining 'fields' or 'exclude' " "explicitly is prohibited." ) with self.assertRaisesMessage(ImproperlyConfigured, message): modelformset_factory(Author) def test_simple_save(self): qs = Author.objects.all() AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=3) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 3) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-name">Name:</label>' '<input id="id_form-0-name" type="text" name="form-0-name" maxlength="100">' '<input type="hidden" name="form-0-id" id="id_form-0-id"></p>' ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_form-1-name">Name:</label>' '<input id="id_form-1-name" type="text" name="form-1-name" maxlength="100">' '<input type="hidden" name="form-1-id" id="id_form-1-id"></p>' ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_form-2-name">Name:</label>' ' <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100">' '<input type="hidden" name="form-2-id" id="id_form-2-id"></p>' ) data = { 'form-TOTAL_FORMS': '3', # the number of forms rendered 'form-INITIAL_FORMS': '0', # the number of forms with initial data 'form-MAX_NUM_FORMS': '', # the max number of forms 'form-0-name': 'Charles Baudelaire', 'form-1-name': 'Arthur Rimbaud', 'form-2-name': '', } formset = AuthorFormSet(data=data, queryset=qs) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 2) author1, author2 = saved self.assertEqual(author1, Author.objects.get(name='Charles Baudelaire')) self.assertEqual(author2, Author.objects.get(name='Arthur Rimbaud')) authors = list(Author.objects.order_by('name')) self.assertEqual(authors, [author2, author1]) # Gah! We forgot Paul Verlaine. Let's create a formset to edit the # existing authors with an extra form to add him. We *could* pass in a # queryset to restrict the Author objects we edit, but in this case # we'll use it to display them in alphabetical order by name. qs = Author.objects.order_by('name') AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=1, can_delete=False) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 3) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-name">Name:</label>' '<input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100">' '<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id"></p>' % author2.id ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_form-1-name">Name:</label>' '<input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100">' '<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id"></p>' % author1.id ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_form-2-name">Name:</label>' '<input id="id_form-2-name" type="text" name="form-2-name" maxlength="100">' '<input type="hidden" name="form-2-id" id="id_form-2-id"></p>' ) data = { 'form-TOTAL_FORMS': '3', # the number of forms rendered 'form-INITIAL_FORMS': '2', # the number of forms with initial data 'form-MAX_NUM_FORMS': '', # the max number of forms 'form-0-id': str(author2.id), 'form-0-name': 'Arthur Rimbaud', 'form-1-id': str(author1.id), 'form-1-name': 'Charles Baudelaire', 'form-2-name': 'Paul Verlaine', } formset = AuthorFormSet(data=data, queryset=qs) self.assertTrue(formset.is_valid()) # Only changed or new objects are returned from formset.save() saved = formset.save() self.assertEqual(len(saved), 1) author3 = saved[0] self.assertEqual(author3, Author.objects.get(name='Paul Verlaine')) authors = list(Author.objects.order_by('name')) self.assertEqual(authors, [author2, author1, author3]) # This probably shouldn't happen, but it will. If an add form was # marked for deletion, make sure we don't save that form. qs = Author.objects.order_by('name') AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=1, can_delete=True) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 4) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-name">Name:</label>' '<input id="id_form-0-name" type="text" name="form-0-name" ' 'value="Arthur Rimbaud" maxlength="100"></p>' '<p><label for="id_form-0-DELETE">Delete:</label>' '<input type="checkbox" name="form-0-DELETE" id="id_form-0-DELETE">' '<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id"></p>' % author2.id ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_form-1-name">Name:</label>' '<input id="id_form-1-name" type="text" name="form-1-name" ' 'value="Charles Baudelaire" maxlength="100"></p>' '<p><label for="id_form-1-DELETE">Delete:</label>' '<input type="checkbox" name="form-1-DELETE" id="id_form-1-DELETE">' '<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id"></p>' % author1.id ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_form-2-name">Name:</label>' '<input id="id_form-2-name" type="text" name="form-2-name" ' 'value="Paul Verlaine" maxlength="100"></p>' '<p><label for="id_form-2-DELETE">Delete:</label>' '<input type="checkbox" name="form-2-DELETE" id="id_form-2-DELETE">' '<input type="hidden" name="form-2-id" value="%d" id="id_form-2-id"></p>' % author3.id ) self.assertHTMLEqual( formset.forms[3].as_p(), '<p><label for="id_form-3-name">Name:</label>' '<input id="id_form-3-name" type="text" name="form-3-name" maxlength="100"></p>' '<p><label for="id_form-3-DELETE">Delete:</label>' '<input type="checkbox" name="form-3-DELETE" id="id_form-3-DELETE">' '<input type="hidden" name="form-3-id" id="id_form-3-id"></p>' ) data = { 'form-TOTAL_FORMS': '4', # the number of forms rendered 'form-INITIAL_FORMS': '3', # the number of forms with initial data 'form-MAX_NUM_FORMS': '', # the max number of forms 'form-0-id': str(author2.id), 'form-0-name': 'Arthur Rimbaud', 'form-1-id': str(author1.id), 'form-1-name': 'Charles Baudelaire', 'form-2-id': str(author3.id), 'form-2-name': 'Paul Verlaine', 'form-3-name': 'Walt Whitman', 'form-3-DELETE': 'on', } formset = AuthorFormSet(data=data, queryset=qs) self.assertTrue(formset.is_valid()) # No objects were changed or saved so nothing will come back. self.assertEqual(formset.save(), []) authors = list(Author.objects.order_by('name')) self.assertEqual(authors, [author2, author1, author3]) # Let's edit a record to ensure save only returns that one record. data = { 'form-TOTAL_FORMS': '4', # the number of forms rendered 'form-INITIAL_FORMS': '3', # the number of forms with initial data 'form-MAX_NUM_FORMS': '', # the max number of forms 'form-0-id': str(author2.id), 'form-0-name': 'Walt Whitman', 'form-1-id': str(author1.id), 'form-1-name': 'Charles Baudelaire', 'form-2-id': str(author3.id), 'form-2-name': 'Paul Verlaine', 'form-3-name': '', 'form-3-DELETE': '', } formset = AuthorFormSet(data=data, queryset=qs) self.assertTrue(formset.is_valid()) # One record has changed. saved = formset.save() self.assertEqual(len(saved), 1) self.assertEqual(saved[0], Author.objects.get(name='Walt Whitman')) def test_commit_false(self): # Test the behavior of commit=False and save_m2m author1 = Author.objects.create(name='Charles Baudelaire') author2 = Author.objects.create(name='Paul Verlaine') author3 = Author.objects.create(name='Walt Whitman') meeting = AuthorMeeting.objects.create(created=date.today()) meeting.authors.set(Author.objects.all()) # create an Author instance to add to the meeting. author4 = Author.objects.create(name='John Steinbeck') AuthorMeetingFormSet = modelformset_factory(AuthorMeeting, fields="__all__", extra=1, can_delete=True) data = { 'form-TOTAL_FORMS': '2', # the number of forms rendered 'form-INITIAL_FORMS': '1', # the number of forms with initial data 'form-MAX_NUM_FORMS': '', # the max number of forms 'form-0-id': str(meeting.id), 'form-0-name': '2nd Tuesday of the Week Meeting', 'form-0-authors': [author2.id, author1.id, author3.id, author4.id], 'form-1-name': '', 'form-1-authors': '', 'form-1-DELETE': '', } formset = AuthorMeetingFormSet(data=data, queryset=AuthorMeeting.objects.all()) self.assertTrue(formset.is_valid()) instances = formset.save(commit=False) for instance in instances: instance.created = date.today() instance.save() formset.save_m2m() self.assertQuerysetEqual(instances[0].authors.all(), [ '<Author: Charles Baudelaire>', '<Author: John Steinbeck>', '<Author: Paul Verlaine>', '<Author: Walt Whitman>', ]) def test_max_num(self): # Test the behavior of max_num with model formsets. It should allow # all existing related objects/inlines for a given object to be # displayed, but not allow the creation of new inlines beyond max_num. Author.objects.create(name='Charles Baudelaire') Author.objects.create(name='Paul Verlaine') Author.objects.create(name='Walt Whitman') qs = Author.objects.order_by('name') AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None, extra=3) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 6) self.assertEqual(len(formset.extra_forms), 3) AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4, extra=3) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 4) self.assertEqual(len(formset.extra_forms), 1) AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0, extra=3) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 3) self.assertEqual(len(formset.extra_forms), 0) AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None) formset = AuthorFormSet(queryset=qs) self.assertQuerysetEqual(formset.get_queryset(), [ '<Author: Charles Baudelaire>', '<Author: Paul Verlaine>', '<Author: Walt Whitman>', ]) AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0) formset = AuthorFormSet(queryset=qs) self.assertQuerysetEqual(formset.get_queryset(), [ '<Author: Charles Baudelaire>', '<Author: Paul Verlaine>', '<Author: Walt Whitman>', ]) AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4) formset = AuthorFormSet(queryset=qs) self.assertQuerysetEqual(formset.get_queryset(), [ '<Author: Charles Baudelaire>', '<Author: Paul Verlaine>', '<Author: Walt Whitman>', ]) def test_min_num(self): # Test the behavior of min_num with model formsets. It should be # added to extra. qs = Author.objects.none() AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 0) AuthorFormSet = modelformset_factory(Author, fields="__all__", min_num=1, extra=0) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 1) AuthorFormSet = modelformset_factory(Author, fields="__all__", min_num=1, extra=1) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 2) def test_min_num_with_existing(self): # Test the behavior of min_num with existing objects. Author.objects.create(name='Charles Baudelaire') qs = Author.objects.all() AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0, min_num=1) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 1) def test_custom_save_method(self): class PoetForm(forms.ModelForm): def save(self, commit=True): # change the name to "Vladimir Mayakovsky" just to be a jerk. author = super().save(commit=False) author.name = "Vladimir Mayakovsky" if commit: author.save() return author PoetFormSet = modelformset_factory(Poet, fields="__all__", form=PoetForm) data = { 'form-TOTAL_FORMS': '3', # the number of forms rendered 'form-INITIAL_FORMS': '0', # the number of forms with initial data 'form-MAX_NUM_FORMS': '', # the max number of forms 'form-0-name': 'Walt Whitman', 'form-1-name': 'Charles Baudelaire', 'form-2-name': '', } qs = Poet.objects.all() formset = PoetFormSet(data=data, queryset=qs) self.assertTrue(formset.is_valid()) poets = formset.save() self.assertEqual(len(poets), 2) poet1, poet2 = poets self.assertEqual(poet1.name, 'Vladimir Mayakovsky') self.assertEqual(poet2.name, 'Vladimir Mayakovsky') def test_custom_form(self): """ model_formset_factory() respects fields and exclude parameters of a custom form. """ class PostForm1(forms.ModelForm): class Meta: model = Post fields = ('title', 'posted') class PostForm2(forms.ModelForm): class Meta: model = Post exclude = ('subtitle',) PostFormSet = modelformset_factory(Post, form=PostForm1) formset = PostFormSet() self.assertNotIn("subtitle", formset.forms[0].fields) PostFormSet = modelformset_factory(Post, form=PostForm2) formset = PostFormSet() self.assertNotIn("subtitle", formset.forms[0].fields) def test_custom_queryset_init(self): """ A queryset can be overridden in the formset's __init__() method. """ Author.objects.create(name='Charles Baudelaire') Author.objects.create(name='Paul Verlaine') class BaseAuthorFormSet(BaseModelFormSet): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.queryset = Author.objects.filter(name__startswith='Charles') AuthorFormSet = modelformset_factory(Author, fields='__all__', formset=BaseAuthorFormSet) formset = AuthorFormSet() self.assertEqual(len(formset.get_queryset()), 1) def test_model_inheritance(self): BetterAuthorFormSet = modelformset_factory(BetterAuthor, fields="__all__") formset = BetterAuthorFormSet() self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-name">Name:</label>' '<input id="id_form-0-name" type="text" name="form-0-name" maxlength="100"></p>' '<p><label for="id_form-0-write_speed">Write speed:</label>' '<input type="number" name="form-0-write_speed" id="id_form-0-write_speed">' '<input type="hidden" name="form-0-author_ptr" id="id_form-0-author_ptr"></p>' ) data = { 'form-TOTAL_FORMS': '1', # the number of forms rendered 'form-INITIAL_FORMS': '0', # the number of forms with initial data 'form-MAX_NUM_FORMS': '', # the max number of forms 'form-0-author_ptr': '', 'form-0-name': 'Ernest Hemingway', 'form-0-write_speed': '10', } formset = BetterAuthorFormSet(data) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) author1, = saved self.assertEqual(author1, BetterAuthor.objects.get(name='Ernest Hemingway')) hemingway_id = BetterAuthor.objects.get(name="Ernest Hemingway").pk formset = BetterAuthorFormSet() self.assertEqual(len(formset.forms), 2) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-name">Name:</label>' '<input id="id_form-0-name" type="text" name="form-0-name" value="Ernest Hemingway" maxlength="100"></p>' '<p><label for="id_form-0-write_speed">Write speed:</label>' '<input type="number" name="form-0-write_speed" value="10" id="id_form-0-write_speed">' '<input type="hidden" name="form-0-author_ptr" value="%d" id="id_form-0-author_ptr"></p>' % hemingway_id ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_form-1-name">Name:</label>' '<input id="id_form-1-name" type="text" name="form-1-name" maxlength="100"></p>' '<p><label for="id_form-1-write_speed">Write speed:</label>' '<input type="number" name="form-1-write_speed" id="id_form-1-write_speed">' '<input type="hidden" name="form-1-author_ptr" id="id_form-1-author_ptr"></p>' ) data = { 'form-TOTAL_FORMS': '2', # the number of forms rendered 'form-INITIAL_FORMS': '1', # the number of forms with initial data 'form-MAX_NUM_FORMS': '', # the max number of forms 'form-0-author_ptr': hemingway_id, 'form-0-name': 'Ernest Hemingway', 'form-0-write_speed': '10', 'form-1-author_ptr': '', 'form-1-name': '', 'form-1-write_speed': '', } formset = BetterAuthorFormSet(data) self.assertTrue(formset.is_valid()) self.assertEqual(formset.save(), []) def test_inline_formsets(self): # We can also create a formset that is tied to a parent model. This is # how the admin system's edit inline functionality works. AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=3, fields="__all__") author = Author.objects.create(name='Charles Baudelaire') formset = AuthorBooksFormSet(instance=author) self.assertEqual(len(formset.forms), 3) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" ' 'name="book_set-0-title" maxlength="100"><input type="hidden" name="book_set-0-author" value="%d" ' 'id="id_book_set-0-author"><input type="hidden" name="book_set-0-id" id="id_book_set-0-id">' '</p>' % author.id ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_book_set-1-title">Title:</label>' '<input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100">' '<input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author">' '<input type="hidden" name="book_set-1-id" id="id_book_set-1-id"></p>' % author.id ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_book_set-2-title">Title:</label>' '<input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100">' '<input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author">' '<input type="hidden" name="book_set-2-id" id="id_book_set-2-id"></p>' % author.id ) data = { 'book_set-TOTAL_FORMS': '3', # the number of forms rendered 'book_set-INITIAL_FORMS': '0', # the number of forms with initial data 'book_set-MAX_NUM_FORMS': '', # the max number of forms 'book_set-0-title': 'Les Fleurs du Mal', 'book_set-1-title': '', 'book_set-2-title': '', } formset = AuthorBooksFormSet(data, instance=author) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) book1, = saved self.assertEqual(book1, Book.objects.get(title='Les Fleurs du Mal')) self.assertQuerysetEqual(author.book_set.all(), ['<Book: Les Fleurs du Mal>']) # Now that we've added a book to Charles Baudelaire, let's try adding # another one. This time though, an edit form will be available for # every existing book. AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__") author = Author.objects.get(name='Charles Baudelaire') formset = AuthorBooksFormSet(instance=author) self.assertEqual(len(formset.forms), 3) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_book_set-0-title">Title:</label>' '<input id="id_book_set-0-title" type="text" name="book_set-0-title" ' 'value="Les Fleurs du Mal" maxlength="100">' '<input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author">' '<input type="hidden" name="book_set-0-id" value="%d" id="id_book_set-0-id"></p>' % ( author.id, book1.id, ) ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_book_set-1-title">Title:</label>' '<input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100">' '<input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author">' '<input type="hidden" name="book_set-1-id" id="id_book_set-1-id"></p>' % author.id ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_book_set-2-title">Title:</label>' '<input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100">' '<input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author">' '<input type="hidden" name="book_set-2-id" id="id_book_set-2-id"></p>' % author.id ) data = { 'book_set-TOTAL_FORMS': '3', # the number of forms rendered 'book_set-INITIAL_FORMS': '1', # the number of forms with initial data 'book_set-MAX_NUM_FORMS': '', # the max number of forms 'book_set-0-id': str(book1.id), 'book_set-0-title': 'Les Fleurs du Mal', 'book_set-1-title': 'Les Paradis Artificiels', 'book_set-2-title': '', } formset = AuthorBooksFormSet(data, instance=author) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) book2, = saved self.assertEqual(book2, Book.objects.get(title='Les Paradis Artificiels')) # As you can see, 'Les Paradis Artificiels' is now a book belonging to # Charles Baudelaire. self.assertQuerysetEqual(author.book_set.order_by('title'), [ '<Book: Les Fleurs du Mal>', '<Book: Les Paradis Artificiels>', ]) def test_inline_formsets_save_as_new(self): # The save_as_new parameter lets you re-associate the data to a new # instance. This is used in the admin for save_as functionality. AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__") Author.objects.create(name='Charles Baudelaire') # An immutable QueryDict simulates request.POST. data = QueryDict(mutable=True) data.update({ 'book_set-TOTAL_FORMS': '3', # the number of forms rendered 'book_set-INITIAL_FORMS': '2', # the number of forms with initial data 'book_set-MAX_NUM_FORMS': '', # the max number of forms 'book_set-0-id': '1', 'book_set-0-title': 'Les Fleurs du Mal', 'book_set-1-id': '2', 'book_set-1-title': 'Les Paradis Artificiels', 'book_set-2-title': '', }) data._mutable = False formset = AuthorBooksFormSet(data, instance=Author(), save_as_new=True) self.assertTrue(formset.is_valid()) self.assertIs(data._mutable, False) new_author = Author.objects.create(name='Charles Baudelaire') formset = AuthorBooksFormSet(data, instance=new_author, save_as_new=True) saved = formset.save() self.assertEqual(len(saved), 2) book1, book2 = saved self.assertEqual(book1.title, 'Les Fleurs du Mal') self.assertEqual(book2.title, 'Les Paradis Artificiels') # Test using a custom prefix on an inline formset. formset = AuthorBooksFormSet(prefix="test") self.assertEqual(len(formset.forms), 2) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_test-0-title">Title:</label>' '<input id="id_test-0-title" type="text" name="test-0-title" maxlength="100">' '<input type="hidden" name="test-0-author" id="id_test-0-author">' '<input type="hidden" name="test-0-id" id="id_test-0-id"></p>' ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_test-1-title">Title:</label>' '<input id="id_test-1-title" type="text" name="test-1-title" maxlength="100">' '<input type="hidden" name="test-1-author" id="id_test-1-author">' '<input type="hidden" name="test-1-id" id="id_test-1-id"></p>' ) def test_inline_formsets_with_custom_pk(self): # Test inline formsets where the inline-edited object has a custom # primary key that is not the fk to the parent object. self.maxDiff = 1024 AuthorBooksFormSet2 = inlineformset_factory( Author, BookWithCustomPK, can_delete=False, extra=1, fields="__all__" ) author = Author.objects.create(pk=1, name='Charles Baudelaire') formset = AuthorBooksFormSet2(instance=author) self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_bookwithcustompk_set-0-my_pk">My pk:</label>' '<input id="id_bookwithcustompk_set-0-my_pk" type="number" ' 'name="bookwithcustompk_set-0-my_pk" step="1"></p>' '<p><label for="id_bookwithcustompk_set-0-title">Title:</label>' '<input id="id_bookwithcustompk_set-0-title" type="text" ' 'name="bookwithcustompk_set-0-title" maxlength="100">' '<input type="hidden" name="bookwithcustompk_set-0-author" ' 'value="1" id="id_bookwithcustompk_set-0-author"></p>' ) data = { 'bookwithcustompk_set-TOTAL_FORMS': '1', # the number of forms rendered 'bookwithcustompk_set-INITIAL_FORMS': '0', # the number of forms with initial data 'bookwithcustompk_set-MAX_NUM_FORMS': '', # the max number of forms 'bookwithcustompk_set-0-my_pk': '77777', 'bookwithcustompk_set-0-title': 'Les Fleurs du Mal', } formset = AuthorBooksFormSet2(data, instance=author) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) book1, = saved self.assertEqual(book1.pk, 77777) book1 = author.bookwithcustompk_set.get() self.assertEqual(book1.title, 'Les Fleurs du Mal') def test_inline_formsets_with_multi_table_inheritance(self): # Test inline formsets where the inline-edited object uses multi-table # inheritance, thus has a non AutoField yet auto-created primary key. AuthorBooksFormSet3 = inlineformset_factory(Author, AlternateBook, can_delete=False, extra=1, fields="__all__") author = Author.objects.create(pk=1, name='Charles Baudelaire') formset = AuthorBooksFormSet3(instance=author) self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_alternatebook_set-0-title">Title:</label>' '<input id="id_alternatebook_set-0-title" type="text" ' 'name="alternatebook_set-0-title" maxlength="100"></p>' '<p><label for="id_alternatebook_set-0-notes">Notes:</label>' '<input id="id_alternatebook_set-0-notes" type="text" ' 'name="alternatebook_set-0-notes" maxlength="100">' '<input type="hidden" name="alternatebook_set-0-author" value="1" ' 'id="id_alternatebook_set-0-author">' '<input type="hidden" name="alternatebook_set-0-book_ptr" ' 'id="id_alternatebook_set-0-book_ptr"></p>' ) data = { 'alternatebook_set-TOTAL_FORMS': '1', # the number of forms rendered 'alternatebook_set-INITIAL_FORMS': '0', # the number of forms with initial data 'alternatebook_set-MAX_NUM_FORMS': '', # the max number of forms 'alternatebook_set-0-title': 'Flowers of Evil', 'alternatebook_set-0-notes': 'English translation of Les Fleurs du Mal' } formset = AuthorBooksFormSet3(data, instance=author) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) book1, = saved self.assertEqual(book1.title, 'Flowers of Evil') self.assertEqual(book1.notes, 'English translation of Les Fleurs du Mal') @skipUnlessDBFeature('supports_partially_nullable_unique_constraints') def test_inline_formsets_with_nullable_unique_together(self): # Test inline formsets where the inline-edited object has a # unique_together constraint with a nullable member AuthorBooksFormSet4 = inlineformset_factory( Author, BookWithOptionalAltEditor, can_delete=False, extra=2, fields="__all__" ) author = Author.objects.create(pk=1, name='Charles Baudelaire') data = { 'bookwithoptionalalteditor_set-TOTAL_FORMS': '2', # the number of forms rendered 'bookwithoptionalalteditor_set-INITIAL_FORMS': '0', # the number of forms with initial data 'bookwithoptionalalteditor_set-MAX_NUM_FORMS': '', # the max number of forms 'bookwithoptionalalteditor_set-0-author': '1', 'bookwithoptionalalteditor_set-0-title': 'Les Fleurs du Mal', 'bookwithoptionalalteditor_set-1-author': '1', 'bookwithoptionalalteditor_set-1-title': 'Les Fleurs du Mal', } formset = AuthorBooksFormSet4(data, instance=author) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 2) book1, book2 = saved self.assertEqual(book1.author_id, 1) self.assertEqual(book1.title, 'Les Fleurs du Mal') self.assertEqual(book2.author_id, 1) self.assertEqual(book2.title, 'Les Fleurs du Mal') def test_inline_formsets_with_custom_save_method(self): AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__") author = Author.objects.create(pk=1, name='Charles Baudelaire') book1 = Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels') book2 = Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal') book3 = Book.objects.create(pk=3, author=author, title='Flowers of Evil') class PoemForm(forms.ModelForm): def save(self, commit=True): # change the name to "Brooklyn Bridge" just to be a jerk. poem = super().save(commit=False) poem.name = "Brooklyn Bridge" if commit: poem.save() return poem PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm, fields="__all__") data = { 'poem_set-TOTAL_FORMS': '3', # the number of forms rendered 'poem_set-INITIAL_FORMS': '0', # the number of forms with initial data 'poem_set-MAX_NUM_FORMS': '', # the max number of forms 'poem_set-0-name': 'The Cloud in Trousers', 'poem_set-1-name': 'I', 'poem_set-2-name': '', } poet = Poet.objects.create(name='Vladimir Mayakovsky') formset = PoemFormSet(data=data, instance=poet) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 2) poem1, poem2 = saved self.assertEqual(poem1.name, 'Brooklyn Bridge') self.assertEqual(poem2.name, 'Brooklyn Bridge') # We can provide a custom queryset to our InlineFormSet: custom_qs = Book.objects.order_by('-title') formset = AuthorBooksFormSet(instance=author, queryset=custom_qs) self.assertEqual(len(formset.forms), 5) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_book_set-0-title">Title:</label>' '<input id="id_book_set-0-title" type="text" name="book_set-0-title" ' 'value="Les Paradis Artificiels" maxlength="100">' '<input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author">' '<input type="hidden" name="book_set-0-id" value="1" id="id_book_set-0-id"></p>' ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_book_set-1-title">Title:</label>' '<input id="id_book_set-1-title" type="text" name="book_set-1-title" ' 'value="Les Fleurs du Mal" maxlength="100">' '<input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author">' '<input type="hidden" name="book_set-1-id" value="2" id="id_book_set-1-id"></p>' ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_book_set-2-title">Title:</label>' '<input id="id_book_set-2-title" type="text" name="book_set-2-title" ' 'value="Flowers of Evil" maxlength="100">' '<input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author">' '<input type="hidden" name="book_set-2-id" value="3" id="id_book_set-2-id"></p>' ) self.assertHTMLEqual( formset.forms[3].as_p(), '<p><label for="id_book_set-3-title">Title:</label>' '<input id="id_book_set-3-title" type="text" name="book_set-3-title" maxlength="100">' '<input type="hidden" name="book_set-3-author" value="1" id="id_book_set-3-author">' '<input type="hidden" name="book_set-3-id" id="id_book_set-3-id"></p>' ) self.assertHTMLEqual( formset.forms[4].as_p(), '<p><label for="id_book_set-4-title">Title:</label>' '<input id="id_book_set-4-title" type="text" name="book_set-4-title" maxlength="100">' '<input type="hidden" name="book_set-4-author" value="1" id="id_book_set-4-author">' '<input type="hidden" name="book_set-4-id" id="id_book_set-4-id"></p>' ) data = { 'book_set-TOTAL_FORMS': '5', # the number of forms rendered 'book_set-INITIAL_FORMS': '3', # the number of forms with initial data 'book_set-MAX_NUM_FORMS': '', # the max number of forms 'book_set-0-id': str(book1.id), 'book_set-0-title': 'Les Paradis Artificiels', 'book_set-1-id': str(book2.id), 'book_set-1-title': 'Les Fleurs du Mal', 'book_set-2-id': str(book3.id), 'book_set-2-title': 'Flowers of Evil', 'book_set-3-title': 'Revue des deux mondes', 'book_set-4-title': '', } formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs) self.assertTrue(formset.is_valid()) custom_qs = Book.objects.filter(title__startswith='F') formset = AuthorBooksFormSet(instance=author, queryset=custom_qs) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_book_set-0-title">Title:</label>' '<input id="id_book_set-0-title" type="text" name="book_set-0-title" ' 'value="Flowers of Evil" maxlength="100">' '<input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author">' '<input type="hidden" name="book_set-0-id" value="3" id="id_book_set-0-id"></p>' ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_book_set-1-title">Title:</label>' '<input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100">' '<input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author">' '<input type="hidden" name="book_set-1-id" id="id_book_set-1-id"></p>' ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_book_set-2-title">Title:</label>' '<input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100">' '<input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author">' '<input type="hidden" name="book_set-2-id" id="id_book_set-2-id"></p>' ) data = { 'book_set-TOTAL_FORMS': '3', # the number of forms rendered 'book_set-INITIAL_FORMS': '1', # the number of forms with initial data 'book_set-MAX_NUM_FORMS': '', # the max number of forms 'book_set-0-id': str(book3.id), 'book_set-0-title': 'Flowers of Evil', 'book_set-1-title': 'Revue des deux mondes', 'book_set-2-title': '', } formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs) self.assertTrue(formset.is_valid()) def test_inline_formsets_with_custom_save_method_related_instance(self): """ The ModelForm.save() method should be able to access the related object if it exists in the database (#24395). """ class PoemForm2(forms.ModelForm): def save(self, commit=True): poem = super().save(commit=False) poem.name = "%s by %s" % (poem.name, poem.poet.name) if commit: poem.save() return poem PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm2, fields="__all__") data = { 'poem_set-TOTAL_FORMS': '1', 'poem_set-INITIAL_FORMS': '0', 'poem_set-MAX_NUM_FORMS': '', 'poem_set-0-name': 'Le Lac', } poet = Poet() formset = PoemFormSet(data=data, instance=poet) self.assertTrue(formset.is_valid()) # The Poet instance is saved after the formset instantiation. This # happens in admin's changeform_view() when adding a new object and # some inlines in the same request. poet.name = 'Lamartine' poet.save() poem = formset.save()[0] self.assertEqual(poem.name, 'Le Lac by Lamartine') def test_inline_formsets_with_wrong_fk_name(self): """ Regression for #23451 """ message = "fk_name 'title' is not a ForeignKey to 'model_formsets.Author'." with self.assertRaisesMessage(ValueError, message): inlineformset_factory(Author, Book, fields="__all__", fk_name='title') def test_custom_pk(self): # We need to ensure that it is displayed CustomPrimaryKeyFormSet = modelformset_factory(CustomPrimaryKey, fields="__all__") formset = CustomPrimaryKeyFormSet() self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-my_pk">My pk:</label> <input id="id_form-0-my_pk" type="text" ' 'name="form-0-my_pk" maxlength="10"></p>' '<p><label for="id_form-0-some_field">Some field:</label>' '<input id="id_form-0-some_field" type="text" name="form-0-some_field" maxlength="100"></p>' ) # Custom primary keys with ForeignKey, OneToOneField and AutoField ############ place = Place.objects.create(pk=1, name='Giordanos', city='Chicago') FormSet = inlineformset_factory(Place, Owner, extra=2, can_delete=False, fields="__all__") formset = FormSet(instance=place) self.assertEqual(len(formset.forms), 2) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_owner_set-0-name">Name:</label>' '<input id="id_owner_set-0-name" type="text" name="owner_set-0-name" maxlength="100">' '<input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place">' '<input type="hidden" name="owner_set-0-auto_id" id="id_owner_set-0-auto_id"></p>' ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_owner_set-1-name">Name:</label>' '<input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100">' '<input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place">' '<input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id"></p>' ) data = { 'owner_set-TOTAL_FORMS': '2', 'owner_set-INITIAL_FORMS': '0', 'owner_set-MAX_NUM_FORMS': '', 'owner_set-0-auto_id': '', 'owner_set-0-name': 'Joe Perry', 'owner_set-1-auto_id': '', 'owner_set-1-name': '', } formset = FormSet(data, instance=place) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) owner1, = saved self.assertEqual(owner1.name, 'Joe Perry') self.assertEqual(owner1.place.name, 'Giordanos') formset = FormSet(instance=place) self.assertEqual(len(formset.forms), 3) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_owner_set-0-name">Name:</label>' '<input id="id_owner_set-0-name" type="text" name="owner_set-0-name" value="Joe Perry" maxlength="100">' '<input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place">' '<input type="hidden" name="owner_set-0-auto_id" value="%d" id="id_owner_set-0-auto_id"></p>' % owner1.auto_id ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_owner_set-1-name">Name:</label>' '<input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100">' '<input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place">' '<input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id"></p>' ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_owner_set-2-name">Name:</label>' '<input id="id_owner_set-2-name" type="text" name="owner_set-2-name" maxlength="100">' '<input type="hidden" name="owner_set-2-place" value="1" id="id_owner_set-2-place">' '<input type="hidden" name="owner_set-2-auto_id" id="id_owner_set-2-auto_id"></p>' ) data = { 'owner_set-TOTAL_FORMS': '3', 'owner_set-INITIAL_FORMS': '1', 'owner_set-MAX_NUM_FORMS': '', 'owner_set-0-auto_id': str(owner1.auto_id), 'owner_set-0-name': 'Joe Perry', 'owner_set-1-auto_id': '', 'owner_set-1-name': 'Jack Berry', 'owner_set-2-auto_id': '', 'owner_set-2-name': '', } formset = FormSet(data, instance=place) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) owner2, = saved self.assertEqual(owner2.name, 'Jack Berry') self.assertEqual(owner2.place.name, 'Giordanos') # Ensure a custom primary key that is a ForeignKey or OneToOneField get rendered for the user to choose. FormSet = modelformset_factory(OwnerProfile, fields="__all__") formset = FormSet() self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-owner">Owner:</label>' '<select name="form-0-owner" id="id_form-0-owner">' '<option value="" selected>---------</option>' '<option value="%d">Joe Perry at Giordanos</option>' '<option value="%d">Jack Berry at Giordanos</option>' '</select></p>' '<p><label for="id_form-0-age">Age:</label>' '<input type="number" name="form-0-age" id="id_form-0-age" min="0"></p>' % (owner1.auto_id, owner2.auto_id) ) owner1 = Owner.objects.get(name='Joe Perry') FormSet = inlineformset_factory(Owner, OwnerProfile, max_num=1, can_delete=False, fields="__all__") self.assertEqual(FormSet.max_num, 1) formset = FormSet(instance=owner1) self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_ownerprofile-0-age">Age:</label>' '<input type="number" name="ownerprofile-0-age" id="id_ownerprofile-0-age" min="0">' '<input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner"></p>' % owner1.auto_id ) data = { 'ownerprofile-TOTAL_FORMS': '1', 'ownerprofile-INITIAL_FORMS': '0', 'ownerprofile-MAX_NUM_FORMS': '1', 'ownerprofile-0-owner': '', 'ownerprofile-0-age': '54', } formset = FormSet(data, instance=owner1) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) profile1, = saved self.assertEqual(profile1.owner, owner1) self.assertEqual(profile1.age, 54) formset = FormSet(instance=owner1) self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_ownerprofile-0-age">Age:</label>' '<input type="number" name="ownerprofile-0-age" value="54" id="id_ownerprofile-0-age" min="0">' '<input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner"></p>' % owner1.auto_id ) data = { 'ownerprofile-TOTAL_FORMS': '1', 'ownerprofile-INITIAL_FORMS': '1', 'ownerprofile-MAX_NUM_FORMS': '1', 'ownerprofile-0-owner': str(owner1.auto_id), 'ownerprofile-0-age': '55', } formset = FormSet(data, instance=owner1) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) profile1, = saved self.assertEqual(profile1.owner, owner1) self.assertEqual(profile1.age, 55) def test_unique_true_enforces_max_num_one(self): # ForeignKey with unique=True should enforce max_num=1 place = Place.objects.create(pk=1, name='Giordanos', city='Chicago') FormSet = inlineformset_factory(Place, Location, can_delete=False, fields="__all__") self.assertEqual(FormSet.max_num, 1) formset = FormSet(instance=place) self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_location_set-0-lat">Lat:</label>' '<input id="id_location_set-0-lat" type="text" name="location_set-0-lat" maxlength="100"></p>' '<p><label for="id_location_set-0-lon">Lon:</label> ' '<input id="id_location_set-0-lon" type="text" name="location_set-0-lon" maxlength="100">' '<input type="hidden" name="location_set-0-place" value="1" id="id_location_set-0-place">' '<input type="hidden" name="location_set-0-id" id="id_location_set-0-id"></p>' ) def test_foreign_keys_in_parents(self): self.assertEqual(type(_get_foreign_key(Restaurant, Owner)), models.ForeignKey) self.assertEqual(type(_get_foreign_key(MexicanRestaurant, Owner)), models.ForeignKey) def test_unique_validation(self): FormSet = modelformset_factory(Product, fields="__all__", extra=1) data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-slug': 'car-red', } formset = FormSet(data) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) product1, = saved self.assertEqual(product1.slug, 'car-red') data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-slug': 'car-red', } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual(formset.errors, [{'slug': ['Product with this Slug already exists.']}]) def test_modelformset_validate_max_flag(self): # If validate_max is set and max_num is less than TOTAL_FORMS in the # data, then throw an exception. MAX_NUM_FORMS in the data is # irrelevant here (it's output as a hint for the client but its # value in the returned data is not checked) data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '2', # should be ignored 'form-0-price': '12.00', 'form-0-quantity': '1', 'form-1-price': '24.00', 'form-1-quantity': '2', } FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1, validate_max=True) formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual(formset.non_form_errors(), ['Please submit at most 1 form.']) # Now test the same thing without the validate_max flag to ensure # default behavior is unchanged FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1) formset = FormSet(data) self.assertTrue(formset.is_valid()) def test_modelformset_min_num_equals_max_num_less_than(self): data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '2', 'form-0-slug': 'car-red', 'form-1-slug': 'car-blue', 'form-2-slug': 'car-black', } FormSet = modelformset_factory( Product, fields='__all__', extra=1, max_num=2, validate_max=True, min_num=2, validate_min=True, ) formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual(formset.non_form_errors(), ['Please submit at most 2 forms.']) def test_modelformset_min_num_equals_max_num_more_than(self): data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '2', 'form-0-slug': 'car-red', } FormSet = modelformset_factory( Product, fields='__all__', extra=1, max_num=2, validate_max=True, min_num=2, validate_min=True, ) formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual(formset.non_form_errors(), ['Please submit at least 2 forms.']) def test_unique_together_validation(self): FormSet = modelformset_factory(Price, fields="__all__", extra=1) data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-price': '12.00', 'form-0-quantity': '1', } formset = FormSet(data) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) price1, = saved self.assertEqual(price1.price, Decimal('12.00')) self.assertEqual(price1.quantity, 1) data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-price': '12.00', 'form-0-quantity': '1', } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual(formset.errors, [{'__all__': ['Price with this Price and Quantity already exists.']}]) def test_unique_together_with_inlineformset_factory(self): # Also see bug #8882. repository = Repository.objects.create(name='Test Repo') FormSet = inlineformset_factory(Repository, Revision, extra=1, fields="__all__") data = { 'revision_set-TOTAL_FORMS': '1', 'revision_set-INITIAL_FORMS': '0', 'revision_set-MAX_NUM_FORMS': '', 'revision_set-0-repository': repository.pk, 'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76', 'revision_set-0-DELETE': '', } formset = FormSet(data, instance=repository) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) revision1, = saved self.assertEqual(revision1.repository, repository) self.assertEqual(revision1.revision, '146239817507f148d448db38840db7c3cbf47c76') # attempt to save the same revision against the same repo. data = { 'revision_set-TOTAL_FORMS': '1', 'revision_set-INITIAL_FORMS': '0', 'revision_set-MAX_NUM_FORMS': '', 'revision_set-0-repository': repository.pk, 'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76', 'revision_set-0-DELETE': '', } formset = FormSet(data, instance=repository) self.assertFalse(formset.is_valid()) self.assertEqual(formset.errors, [{'__all__': ['Revision with this Repository and Revision already exists.']}]) # unique_together with inlineformset_factory with overridden form fields # Also see #9494 FormSet = inlineformset_factory(Repository, Revision, fields=('revision',), extra=1) data = { 'revision_set-TOTAL_FORMS': '1', 'revision_set-INITIAL_FORMS': '0', 'revision_set-MAX_NUM_FORMS': '', 'revision_set-0-repository': repository.pk, 'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76', 'revision_set-0-DELETE': '', } formset = FormSet(data, instance=repository) self.assertFalse(formset.is_valid()) def test_callable_defaults(self): # Use of callable defaults (see bug #7975). person = Person.objects.create(name='Ringo') FormSet = inlineformset_factory(Person, Membership, can_delete=False, extra=1, fields="__all__") formset = FormSet(instance=person) # Django will render a hidden field for model fields that have a callable # default. This is required to ensure the value is tested for change correctly # when determine what extra forms have changed to save. self.assertEqual(len(formset.forms), 1) # this formset only has one form form = formset.forms[0] now = form.fields['date_joined'].initial() result = form.as_p() result = re.sub(r'[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}(?:\.[0-9]+)?', '__DATETIME__', result) self.assertHTMLEqual( result, '<p><label for="id_membership_set-0-date_joined">Date joined:</label>' '<input type="text" name="membership_set-0-date_joined" ' 'value="__DATETIME__" id="id_membership_set-0-date_joined">' '<input type="hidden" name="initial-membership_set-0-date_joined" value="__DATETIME__" ' 'id="initial-membership_set-0-id_membership_set-0-date_joined"></p>' '<p><label for="id_membership_set-0-karma">Karma:</label>' '<input type="number" name="membership_set-0-karma" id="id_membership_set-0-karma">' '<input type="hidden" name="membership_set-0-person" value="%d" id="id_membership_set-0-person">' '<input type="hidden" name="membership_set-0-id" id="id_membership_set-0-id"></p>' % person.id) # test for validation with callable defaults. Validations rely on hidden fields data = { 'membership_set-TOTAL_FORMS': '1', 'membership_set-INITIAL_FORMS': '0', 'membership_set-MAX_NUM_FORMS': '', 'membership_set-0-date_joined': now.strftime('%Y-%m-%d %H:%M:%S'), 'initial-membership_set-0-date_joined': now.strftime('%Y-%m-%d %H:%M:%S'), 'membership_set-0-karma': '', } formset = FormSet(data, instance=person) self.assertTrue(formset.is_valid()) # now test for when the data changes one_day_later = now + datetime.timedelta(days=1) filled_data = { 'membership_set-TOTAL_FORMS': '1', 'membership_set-INITIAL_FORMS': '0', 'membership_set-MAX_NUM_FORMS': '', 'membership_set-0-date_joined': one_day_later.strftime('%Y-%m-%d %H:%M:%S'), 'initial-membership_set-0-date_joined': now.strftime('%Y-%m-%d %H:%M:%S'), 'membership_set-0-karma': '', } formset = FormSet(filled_data, instance=person) self.assertFalse(formset.is_valid()) # now test with split datetime fields class MembershipForm(forms.ModelForm): date_joined = forms.SplitDateTimeField(initial=now) class Meta: model = Membership fields = "__all__" def __init__(self, **kwargs): super().__init__(**kwargs) self.fields['date_joined'].widget = forms.SplitDateTimeWidget() FormSet = inlineformset_factory( Person, Membership, form=MembershipForm, can_delete=False, extra=1, fields="__all__", ) data = { 'membership_set-TOTAL_FORMS': '1', 'membership_set-INITIAL_FORMS': '0', 'membership_set-MAX_NUM_FORMS': '', 'membership_set-0-date_joined_0': now.strftime('%Y-%m-%d'), 'membership_set-0-date_joined_1': now.strftime('%H:%M:%S'), 'initial-membership_set-0-date_joined': now.strftime('%Y-%m-%d %H:%M:%S'), 'membership_set-0-karma': '', } formset = FormSet(data, instance=person) self.assertTrue(formset.is_valid()) def test_inlineformset_factory_with_null_fk(self): # inlineformset_factory tests with fk having null=True. see #9462. # create some data that will exhibit the issue team = Team.objects.create(name="Red Vipers") Player(name="Timmy").save() Player(name="Bobby", team=team).save() PlayerInlineFormSet = inlineformset_factory(Team, Player, fields="__all__") formset = PlayerInlineFormSet() self.assertQuerysetEqual(formset.get_queryset(), []) formset = PlayerInlineFormSet(instance=team) players = formset.get_queryset() self.assertEqual(len(players), 1) player1, = players self.assertEqual(player1.team, team) self.assertEqual(player1.name, 'Bobby') def test_inlineformset_with_arrayfield(self): class SimpleArrayField(forms.CharField): """A proxy for django.contrib.postgres.forms.SimpleArrayField.""" def to_python(self, value): value = super().to_python(value) return value.split(',') if value else [] class BookForm(forms.ModelForm): title = SimpleArrayField() class Meta: model = Book fields = ('title',) BookFormSet = inlineformset_factory(Author, Book, form=BookForm) data = { 'book_set-TOTAL_FORMS': '3', 'book_set-INITIAL_FORMS': '0', 'book_set-MAX_NUM_FORMS': '', 'book_set-0-title': 'test1,test2', 'book_set-1-title': 'test1,test2', 'book_set-2-title': 'test3,test4', } author = Author.objects.create(name='test') formset = BookFormSet(data, instance=author) self.assertEqual(formset.errors, [{}, {'__all__': ['Please correct the duplicate values below.']}, {}]) def test_model_formset_with_custom_pk(self): # a formset for a Model that has a custom primary key that still needs to be # added to the formset automatically FormSet = modelformset_factory(ClassyMexicanRestaurant, fields=["tacos_are_yummy"]) self.assertEqual(sorted(FormSet().forms[0].fields), ['tacos_are_yummy', 'the_restaurant']) def test_model_formset_with_initial_model_instance(self): # has_changed should compare model instance and primary key # see #18898 FormSet = modelformset_factory(Poem, fields='__all__') john_milton = Poet(name="John Milton") john_milton.save() data = { 'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 0, 'form-MAX_NUM_FORMS': '', 'form-0-name': '', 'form-0-poet': str(john_milton.id), } formset = FormSet(initial=[{'poet': john_milton}], data=data) self.assertFalse(formset.extra_forms[0].has_changed()) def test_model_formset_with_initial_queryset(self): # has_changed should work with queryset and list of pk's # see #18898 FormSet = modelformset_factory(AuthorMeeting, fields='__all__') Author.objects.create(pk=1, name='Charles Baudelaire') data = { 'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 0, 'form-MAX_NUM_FORMS': '', 'form-0-name': '', 'form-0-created': '', 'form-0-authors': list(Author.objects.values_list('id', flat=True)), } formset = FormSet(initial=[{'authors': Author.objects.all()}], data=data) self.assertFalse(formset.extra_forms[0].has_changed()) def test_prevent_duplicates_from_with_the_same_formset(self): FormSet = modelformset_factory(Product, fields="__all__", extra=2) data = { 'form-TOTAL_FORMS': 2, 'form-INITIAL_FORMS': 0, 'form-MAX_NUM_FORMS': '', 'form-0-slug': 'red_car', 'form-1-slug': 'red_car', } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual(formset._non_form_errors, ['Please correct the duplicate data for slug.']) FormSet = modelformset_factory(Price, fields="__all__", extra=2) data = { 'form-TOTAL_FORMS': 2, 'form-INITIAL_FORMS': 0, 'form-MAX_NUM_FORMS': '', 'form-0-price': '25', 'form-0-quantity': '7', 'form-1-price': '25', 'form-1-quantity': '7', } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual( formset._non_form_errors, ['Please correct the duplicate data for price and quantity, which must be unique.'] ) # Only the price field is specified, this should skip any unique checks since # the unique_together is not fulfilled. This will fail with a KeyError if broken. FormSet = modelformset_factory(Price, fields=("price",), extra=2) data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-price': '24', 'form-1-price': '24', } formset = FormSet(data) self.assertTrue(formset.is_valid()) FormSet = inlineformset_factory(Author, Book, extra=0, fields="__all__") author = Author.objects.create(pk=1, name='Charles Baudelaire') Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels') Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal') Book.objects.create(pk=3, author=author, title='Flowers of Evil') book_ids = author.book_set.order_by('id').values_list('id', flat=True) data = { 'book_set-TOTAL_FORMS': '2', 'book_set-INITIAL_FORMS': '2', 'book_set-MAX_NUM_FORMS': '', 'book_set-0-title': 'The 2008 Election', 'book_set-0-author': str(author.id), 'book_set-0-id': str(book_ids[0]), 'book_set-1-title': 'The 2008 Election', 'book_set-1-author': str(author.id), 'book_set-1-id': str(book_ids[1]), } formset = FormSet(data=data, instance=author) self.assertFalse(formset.is_valid()) self.assertEqual(formset._non_form_errors, ['Please correct the duplicate data for title.']) self.assertEqual(formset.errors, [{}, {'__all__': ['Please correct the duplicate values below.']}]) FormSet = modelformset_factory(Post, fields="__all__", extra=2) data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'blah', 'form-0-slug': 'Morning', 'form-0-subtitle': 'foo', 'form-0-posted': '2009-01-01', 'form-1-title': 'blah', 'form-1-slug': 'Morning in Prague', 'form-1-subtitle': 'rawr', 'form-1-posted': '2009-01-01' } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual( formset._non_form_errors, ['Please correct the duplicate data for title which must be unique for the date in posted.'] ) self.assertEqual( formset.errors, [{}, {'__all__': ['Please correct the duplicate values below.']}] ) data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', 'form-0-slug': 'Morning in Prague', 'form-0-subtitle': 'foo', 'form-0-posted': '2009-01-01', 'form-1-title': 'blah', 'form-1-slug': 'Morning in Prague', 'form-1-subtitle': 'rawr', 'form-1-posted': '2009-08-02' } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual( formset._non_form_errors, ['Please correct the duplicate data for slug which must be unique for the year in posted.'] ) data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', 'form-0-slug': 'Morning in Prague', 'form-0-subtitle': 'rawr', 'form-0-posted': '2008-08-01', 'form-1-title': 'blah', 'form-1-slug': 'Prague', 'form-1-subtitle': 'rawr', 'form-1-posted': '2009-08-02' } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual( formset._non_form_errors, ['Please correct the duplicate data for subtitle which must be unique for the month in posted.'] ) def test_prevent_change_outer_model_and_create_invalid_data(self): author = Author.objects.create(name='Charles') other_author = Author.objects.create(name='Walt') AuthorFormSet = modelformset_factory(Author, fields='__all__') data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '2', 'form-MAX_NUM_FORMS': '', 'form-0-id': str(author.id), 'form-0-name': 'Charles', 'form-1-id': str(other_author.id), # A model not in the formset's queryset. 'form-1-name': 'Changed name', } # This formset is only for Walt Whitman and shouldn't accept data for # other_author. formset = AuthorFormSet(data=data, queryset=Author.objects.filter(id__in=(author.id,))) self.assertTrue(formset.is_valid()) formset.save() # The name of other_author shouldn't be changed and new models aren't # created. self.assertQuerysetEqual(Author.objects.all(), ['<Author: Charles>', '<Author: Walt>']) def test_validation_without_id(self): AuthorFormSet = modelformset_factory(Author, fields='__all__') data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '1', 'form-MAX_NUM_FORMS': '', 'form-0-name': 'Charles', } formset = AuthorFormSet(data) self.assertEqual( formset.errors, [{'id': ['This field is required.']}], ) def test_validation_with_child_model_without_id(self): BetterAuthorFormSet = modelformset_factory(BetterAuthor, fields='__all__') data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '1', 'form-MAX_NUM_FORMS': '', 'form-0-name': 'Charles', 'form-0-write_speed': '10', } formset = BetterAuthorFormSet(data) self.assertEqual( formset.errors, [{'author_ptr': ['This field is required.']}], ) def test_validation_with_invalid_id(self): AuthorFormSet = modelformset_factory(Author, fields='__all__') data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '1', 'form-MAX_NUM_FORMS': '', 'form-0-id': 'abc', 'form-0-name': 'Charles', } formset = AuthorFormSet(data) self.assertEqual( formset.errors, [{'id': ['Select a valid choice. That choice is not one of the available choices.']}], ) def test_validation_with_nonexistent_id(self): AuthorFormSet = modelformset_factory(Author, fields='__all__') data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '1', 'form-MAX_NUM_FORMS': '', 'form-0-id': '12345', 'form-0-name': 'Charles', } formset = AuthorFormSet(data) self.assertEqual( formset.errors, [{'id': ['Select a valid choice. That choice is not one of the available choices.']}], ) def test_initial_form_count_empty_data_raises_validation_error(self): AuthorFormSet = modelformset_factory(Author, fields='__all__') msg = 'ManagementForm data is missing or has been tampered with' with self.assertRaisesMessage(ValidationError, msg): AuthorFormSet({}).initial_form_count() class TestModelFormsetOverridesTroughFormMeta(TestCase): def test_modelformset_factory_widgets(self): widgets = { 'name': forms.TextInput(attrs={'class': 'poet'}) } PoetFormSet = modelformset_factory(Poet, fields="__all__", widgets=widgets) form = PoetFormSet.form() self.assertHTMLEqual( str(form['name']), '<input id="id_name" maxlength="100" type="text" class="poet" name="name" required>' ) def test_inlineformset_factory_widgets(self): widgets = { 'title': forms.TextInput(attrs={'class': 'book'}) } BookFormSet = inlineformset_factory(Author, Book, widgets=widgets, fields="__all__") form = BookFormSet.form() self.assertHTMLEqual( str(form['title']), '<input class="book" id="id_title" maxlength="100" name="title" type="text" required>' ) def test_modelformset_factory_labels_overrides(self): BookFormSet = modelformset_factory(Book, fields="__all__", labels={ 'title': 'Name' }) form = BookFormSet.form() self.assertHTMLEqual(form['title'].label_tag(), '<label for="id_title">Name:</label>') def test_inlineformset_factory_labels_overrides(self): BookFormSet = inlineformset_factory(Author, Book, fields="__all__", labels={ 'title': 'Name' }) form = BookFormSet.form() self.assertHTMLEqual(form['title'].label_tag(), '<label for="id_title">Name:</label>') def test_modelformset_factory_help_text_overrides(self): BookFormSet = modelformset_factory(Book, fields="__all__", help_texts={ 'title': 'Choose carefully.' }) form = BookFormSet.form() self.assertEqual(form['title'].help_text, 'Choose carefully.') def test_inlineformset_factory_help_text_overrides(self): BookFormSet = inlineformset_factory(Author, Book, fields="__all__", help_texts={ 'title': 'Choose carefully.' }) form = BookFormSet.form() self.assertEqual(form['title'].help_text, 'Choose carefully.') def test_modelformset_factory_error_messages_overrides(self): author = Author.objects.create(pk=1, name='Charles Baudelaire') BookFormSet = modelformset_factory(Book, fields="__all__", error_messages={ 'title': { 'max_length': 'Title too long!!' } }) form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id}) form.full_clean() self.assertEqual(form.errors, {'title': ['Title too long!!']}) def test_inlineformset_factory_error_messages_overrides(self): author = Author.objects.create(pk=1, name='Charles Baudelaire') BookFormSet = inlineformset_factory(Author, Book, fields="__all__", error_messages={ 'title': { 'max_length': 'Title too long!!' } }) form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id}) form.full_clean() self.assertEqual(form.errors, {'title': ['Title too long!!']}) def test_modelformset_factory_field_class_overrides(self): author = Author.objects.create(pk=1, name='Charles Baudelaire') BookFormSet = modelformset_factory(Book, fields="__all__", field_classes={ 'title': forms.SlugField, }) form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id}) self.assertIs(Book._meta.get_field('title').__class__, models.CharField) self.assertIsInstance(form.fields['title'], forms.SlugField) def test_inlineformset_factory_field_class_overrides(self): author = Author.objects.create(pk=1, name='Charles Baudelaire') BookFormSet = inlineformset_factory(Author, Book, fields="__all__", field_classes={ 'title': forms.SlugField, }) form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id}) self.assertIs(Book._meta.get_field('title').__class__, models.CharField) self.assertIsInstance(form.fields['title'], forms.SlugField) def test_modelformset_factory_absolute_max(self): AuthorFormSet = modelformset_factory(Author, fields='__all__', absolute_max=1500) data = { 'form-TOTAL_FORMS': '1501', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '0', } formset = AuthorFormSet(data=data) self.assertIs(formset.is_valid(), False) self.assertEqual(len(formset.forms), 1500) self.assertEqual( formset.non_form_errors(), ['Please submit at most 1000 forms.'], ) def test_modelformset_factory_absolute_max_with_max_num(self): AuthorFormSet = modelformset_factory( Author, fields='__all__', max_num=20, absolute_max=100, ) data = { 'form-TOTAL_FORMS': '101', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '0', } formset = AuthorFormSet(data=data) self.assertIs(formset.is_valid(), False) self.assertEqual(len(formset.forms), 100) self.assertEqual( formset.non_form_errors(), ['Please submit at most 20 forms.'], ) def test_inlineformset_factory_absolute_max(self): author = Author.objects.create(name='Charles Baudelaire') BookFormSet = inlineformset_factory( Author, Book, fields='__all__', absolute_max=1500, ) data = { 'book_set-TOTAL_FORMS': '1501', 'book_set-INITIAL_FORMS': '0', 'book_set-MAX_NUM_FORMS': '0', } formset = BookFormSet(data, instance=author) self.assertIs(formset.is_valid(), False) self.assertEqual(len(formset.forms), 1500) self.assertEqual( formset.non_form_errors(), ['Please submit at most 1000 forms.'], ) def test_inlineformset_factory_absolute_max_with_max_num(self): author = Author.objects.create(name='Charles Baudelaire') BookFormSet = inlineformset_factory( Author, Book, fields='__all__', max_num=20, absolute_max=100, ) data = { 'book_set-TOTAL_FORMS': '101', 'book_set-INITIAL_FORMS': '0', 'book_set-MAX_NUM_FORMS': '0', } formset = BookFormSet(data, instance=author) self.assertIs(formset.is_valid(), False) self.assertEqual(len(formset.forms), 100) self.assertEqual( formset.non_form_errors(), ['Please submit at most 20 forms.'], ) def test_modelformset_factory_can_delete_extra(self): AuthorFormSet = modelformset_factory( Author, fields='__all__', can_delete=True, can_delete_extra=True, extra=2, ) formset = AuthorFormSet() self.assertEqual(len(formset), 2) self.assertIn('DELETE', formset.forms[0].fields) self.assertIn('DELETE', formset.forms[1].fields) def test_modelformset_factory_disable_delete_extra(self): AuthorFormSet = modelformset_factory( Author, fields='__all__', can_delete=True, can_delete_extra=False, extra=2, ) formset = AuthorFormSet() self.assertEqual(len(formset), 2) self.assertNotIn('DELETE', formset.forms[0].fields) self.assertNotIn('DELETE', formset.forms[1].fields) def test_inlineformset_factory_can_delete_extra(self): BookFormSet = inlineformset_factory( Author, Book, fields='__all__', can_delete=True, can_delete_extra=True, extra=2, ) formset = BookFormSet() self.assertEqual(len(formset), 2) self.assertIn('DELETE', formset.forms[0].fields) self.assertIn('DELETE', formset.forms[1].fields) def test_inlineformset_factory_can_not_delete_extra(self): BookFormSet = inlineformset_factory( Author, Book, fields='__all__', can_delete=True, can_delete_extra=False, extra=2, ) formset = BookFormSet() self.assertEqual(len(formset), 2) self.assertNotIn('DELETE', formset.forms[0].fields) self.assertNotIn('DELETE', formset.forms[1].fields)
dd4ae4764084a22611a193a864a0c9ee5797933ce126a31aab249b889e3bc485
import base64 import os import shutil import string import tempfile import unittest from datetime import timedelta from http import cookies from pathlib import Path from django.conf import settings from django.contrib.sessions.backends.base import UpdateError from django.contrib.sessions.backends.cache import SessionStore as CacheSession from django.contrib.sessions.backends.cached_db import ( SessionStore as CacheDBSession, ) from django.contrib.sessions.backends.db import SessionStore as DatabaseSession from django.contrib.sessions.backends.file import SessionStore as FileSession from django.contrib.sessions.backends.signed_cookies import ( SessionStore as CookieSession, ) from django.contrib.sessions.exceptions import ( InvalidSessionKey, SessionInterrupted, ) from django.contrib.sessions.middleware import SessionMiddleware from django.contrib.sessions.models import Session from django.contrib.sessions.serializers import ( JSONSerializer, PickleSerializer, ) from django.core import management from django.core.cache import caches from django.core.cache.backends.base import InvalidCacheBackendError from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponse from django.test import ( RequestFactory, SimpleTestCase, TestCase, ignore_warnings, override_settings, ) from django.utils import timezone from django.utils.deprecation import RemovedInDjango40Warning from .models import SessionStore as CustomDatabaseSession class SessionTestsMixin: # This does not inherit from TestCase to avoid any tests being run with this # class, which wouldn't work, and to allow different TestCase subclasses to # be used. backend = None # subclasses must specify def setUp(self): self.session = self.backend() def tearDown(self): # NB: be careful to delete any sessions created; stale sessions fill up # the /tmp (with some backends) and eventually overwhelm it after lots # of runs (think buildbots) self.session.delete() def test_new_session(self): self.assertIs(self.session.modified, False) self.assertIs(self.session.accessed, False) def test_get_empty(self): self.assertIsNone(self.session.get('cat')) def test_store(self): self.session['cat'] = "dog" self.assertIs(self.session.modified, True) self.assertEqual(self.session.pop('cat'), 'dog') def test_pop(self): self.session['some key'] = 'exists' # Need to reset these to pretend we haven't accessed it: self.accessed = False self.modified = False self.assertEqual(self.session.pop('some key'), 'exists') self.assertIs(self.session.accessed, True) self.assertIs(self.session.modified, True) self.assertIsNone(self.session.get('some key')) def test_pop_default(self): self.assertEqual(self.session.pop('some key', 'does not exist'), 'does not exist') self.assertIs(self.session.accessed, True) self.assertIs(self.session.modified, False) def test_pop_default_named_argument(self): self.assertEqual(self.session.pop('some key', default='does not exist'), 'does not exist') self.assertIs(self.session.accessed, True) self.assertIs(self.session.modified, False) def test_pop_no_default_keyerror_raised(self): with self.assertRaises(KeyError): self.session.pop('some key') def test_setdefault(self): self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar') self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar') self.assertIs(self.session.accessed, True) self.assertIs(self.session.modified, True) def test_update(self): self.session.update({'update key': 1}) self.assertIs(self.session.accessed, True) self.assertIs(self.session.modified, True) self.assertEqual(self.session.get('update key', None), 1) def test_has_key(self): self.session['some key'] = 1 self.session.modified = False self.session.accessed = False self.assertIn('some key', self.session) self.assertIs(self.session.accessed, True) self.assertIs(self.session.modified, False) def test_values(self): self.assertEqual(list(self.session.values()), []) self.assertIs(self.session.accessed, True) self.session['some key'] = 1 self.session.modified = False self.session.accessed = False self.assertEqual(list(self.session.values()), [1]) self.assertIs(self.session.accessed, True) self.assertIs(self.session.modified, False) def test_keys(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False self.assertEqual(list(self.session.keys()), ['x']) self.assertIs(self.session.accessed, True) self.assertIs(self.session.modified, False) def test_items(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False self.assertEqual(list(self.session.items()), [('x', 1)]) self.assertIs(self.session.accessed, True) self.assertIs(self.session.modified, False) def test_clear(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False self.assertEqual(list(self.session.items()), [('x', 1)]) self.session.clear() self.assertEqual(list(self.session.items()), []) self.assertIs(self.session.accessed, True) self.assertIs(self.session.modified, True) def test_save(self): self.session.save() self.assertIs(self.session.exists(self.session.session_key), True) def test_delete(self): self.session.save() self.session.delete(self.session.session_key) self.assertIs(self.session.exists(self.session.session_key), False) def test_flush(self): self.session['foo'] = 'bar' self.session.save() prev_key = self.session.session_key self.session.flush() self.assertIs(self.session.exists(prev_key), False) self.assertNotEqual(self.session.session_key, prev_key) self.assertIsNone(self.session.session_key) self.assertIs(self.session.modified, True) self.assertIs(self.session.accessed, True) def test_cycle(self): self.session['a'], self.session['b'] = 'c', 'd' self.session.save() prev_key = self.session.session_key prev_data = list(self.session.items()) self.session.cycle_key() self.assertIs(self.session.exists(prev_key), False) self.assertNotEqual(self.session.session_key, prev_key) self.assertEqual(list(self.session.items()), prev_data) def test_cycle_with_no_session_cache(self): self.session['a'], self.session['b'] = 'c', 'd' self.session.save() prev_data = self.session.items() self.session = self.backend(self.session.session_key) self.assertIs(hasattr(self.session, '_session_cache'), False) self.session.cycle_key() self.assertCountEqual(self.session.items(), prev_data) def test_save_doesnt_clear_data(self): self.session['a'] = 'b' self.session.save() self.assertEqual(self.session['a'], 'b') def test_invalid_key(self): # Submitting an invalid session key (either by guessing, or if the db has # removed the key) results in a new key being generated. try: session = self.backend('1') session.save() self.assertNotEqual(session.session_key, '1') self.assertIsNone(session.get('cat')) session.delete() finally: # Some backends leave a stale cache entry for the invalid # session key; make sure that entry is manually deleted session.delete('1') def test_session_key_empty_string_invalid(self): """Falsey values (Such as an empty string) are rejected.""" self.session._session_key = '' self.assertIsNone(self.session.session_key) def test_session_key_too_short_invalid(self): """Strings shorter than 8 characters are rejected.""" self.session._session_key = '1234567' self.assertIsNone(self.session.session_key) def test_session_key_valid_string_saved(self): """Strings of length 8 and up are accepted and stored.""" self.session._session_key = '12345678' self.assertEqual(self.session.session_key, '12345678') def test_session_key_is_read_only(self): def set_session_key(session): session.session_key = session._get_new_session_key() with self.assertRaises(AttributeError): set_session_key(self.session) # Custom session expiry def test_default_expiry(self): # A normal session has a max age equal to settings self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) # So does a custom session with an idle expiration time of 0 (but it'll # expire at browser close) self.session.set_expiry(0) self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) def test_custom_expiry_seconds(self): modification = timezone.now() self.session.set_expiry(10) date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_timedelta(self): modification = timezone.now() # Mock timezone.now, because set_expiry calls it on this code path. original_now = timezone.now try: timezone.now = lambda: modification self.session.set_expiry(timedelta(seconds=10)) finally: timezone.now = original_now date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_datetime(self): modification = timezone.now() self.session.set_expiry(modification + timedelta(seconds=10)) date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_reset(self): self.session.set_expiry(None) self.session.set_expiry(10) self.session.set_expiry(None) self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) def test_get_expire_at_browser_close(self): # Tests get_expire_at_browser_close with different settings and different # set_expiry calls with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False): self.session.set_expiry(10) self.assertIs(self.session.get_expire_at_browser_close(), False) self.session.set_expiry(0) self.assertIs(self.session.get_expire_at_browser_close(), True) self.session.set_expiry(None) self.assertIs(self.session.get_expire_at_browser_close(), False) with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True): self.session.set_expiry(10) self.assertIs(self.session.get_expire_at_browser_close(), False) self.session.set_expiry(0) self.assertIs(self.session.get_expire_at_browser_close(), True) self.session.set_expiry(None) self.assertIs(self.session.get_expire_at_browser_close(), True) def test_decode(self): # Ensure we can decode what we encode data = {'a test key': 'a test value'} encoded = self.session.encode(data) self.assertEqual(self.session.decode(encoded), data) @override_settings(SECRET_KEY='django_tests_secret_key') def test_decode_legacy(self): # RemovedInDjango40Warning: pre-Django 3.1 sessions will be invalid. legacy_encoded = ( 'OWUzNTNmNWQxNTBjOWExZmM4MmQ3NzNhMDRmMjU4NmYwNDUyNGI2NDp7ImEgdGVzd' 'CBrZXkiOiJhIHRlc3QgdmFsdWUifQ==' ) self.assertEqual( self.session.decode(legacy_encoded), {'a test key': 'a test value'}, ) @ignore_warnings(category=RemovedInDjango40Warning) def test_default_hashing_algorith_legacy_decode(self): with self.settings(DEFAULT_HASHING_ALGORITHM='sha1'): data = {'a test key': 'a test value'} encoded = self.session.encode(data) self.assertEqual(self.session._legacy_decode(encoded), data) def test_decode_failure_logged_to_security(self): tests = [ base64.b64encode(b'flaskdj:alkdjf').decode('ascii'), 'bad:encoded:value', ] for encoded in tests: with self.subTest(encoded=encoded): with self.assertLogs('django.security.SuspiciousSession', 'WARNING') as cm: self.assertEqual(self.session.decode(encoded), {}) # The failed decode is logged. self.assertIn('Session data corrupted', cm.output[0]) def test_actual_expiry(self): # this doesn't work with JSONSerializer (serializing timedelta) with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'): self.session = self.backend() # reinitialize after overriding settings # Regression test for #19200 old_session_key = None new_session_key = None try: self.session['foo'] = 'bar' self.session.set_expiry(-timedelta(seconds=10)) self.session.save() old_session_key = self.session.session_key # With an expiry date in the past, the session expires instantly. new_session = self.backend(self.session.session_key) new_session_key = new_session.session_key self.assertNotIn('foo', new_session) finally: self.session.delete(old_session_key) self.session.delete(new_session_key) def test_session_load_does_not_create_record(self): """ Loading an unknown session key does not create a session record. Creating session records on load is a DOS vulnerability. """ session = self.backend('someunknownkey') session.load() self.assertIsNone(session.session_key) self.assertIs(session.exists(session.session_key), False) # provided unknown key was cycled, not reused self.assertNotEqual(session.session_key, 'someunknownkey') def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self): """ Sessions shouldn't be resurrected by a concurrent request. """ # Create new session. s1 = self.backend() s1['test_data'] = 'value1' s1.save(must_create=True) # Logout in another context. s2 = self.backend(s1.session_key) s2.delete() # Modify session in first context. s1['test_data'] = 'value2' with self.assertRaises(UpdateError): # This should throw an exception as the session is deleted, not # resurrect the session. s1.save() self.assertEqual(s1.load(), {}) class DatabaseSessionTests(SessionTestsMixin, TestCase): backend = DatabaseSession session_engine = 'django.contrib.sessions.backends.db' @property def model(self): return self.backend.get_model_class() def test_session_str(self): "Session repr should be the session key." self.session['x'] = 1 self.session.save() session_key = self.session.session_key s = self.model.objects.get(session_key=session_key) self.assertEqual(str(s), session_key) def test_session_get_decoded(self): """ Test we can use Session.get_decoded to retrieve data stored in normal way """ self.session['x'] = 1 self.session.save() s = self.model.objects.get(session_key=self.session.session_key) self.assertEqual(s.get_decoded(), {'x': 1}) def test_sessionmanager_save(self): """ Test SessionManager.save method """ # Create a session self.session['y'] = 1 self.session.save() s = self.model.objects.get(session_key=self.session.session_key) # Change it self.model.objects.save(s.session_key, {'y': 2}, s.expire_date) # Clear cache, so that it will be retrieved from DB del self.session._session_cache self.assertEqual(self.session['y'], 2) def test_clearsessions_command(self): """ Test clearsessions command for clearing expired sessions. """ self.assertEqual(0, self.model.objects.count()) # One object in the future self.session['foo'] = 'bar' self.session.set_expiry(3600) self.session.save() # One object in the past other_session = self.backend() other_session['foo'] = 'bar' other_session.set_expiry(-3600) other_session.save() # Two sessions are in the database before clearsessions... self.assertEqual(2, self.model.objects.count()) with override_settings(SESSION_ENGINE=self.session_engine): management.call_command('clearsessions') # ... and one is deleted. self.assertEqual(1, self.model.objects.count()) @override_settings(USE_TZ=True) class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests): pass class CustomDatabaseSessionTests(DatabaseSessionTests): backend = CustomDatabaseSession session_engine = 'sessions_tests.models' custom_session_cookie_age = 60 * 60 * 24 # One day. def test_extra_session_field(self): # Set the account ID to be picked up by a custom session storage # and saved to a custom session model database column. self.session['_auth_user_id'] = 42 self.session.save() # Make sure that the customized create_model_instance() was called. s = self.model.objects.get(session_key=self.session.session_key) self.assertEqual(s.account_id, 42) # Make the session "anonymous". self.session.pop('_auth_user_id') self.session.save() # Make sure that save() on an existing session did the right job. s = self.model.objects.get(session_key=self.session.session_key) self.assertIsNone(s.account_id) def test_custom_expiry_reset(self): self.session.set_expiry(None) self.session.set_expiry(10) self.session.set_expiry(None) self.assertEqual(self.session.get_expiry_age(), self.custom_session_cookie_age) def test_default_expiry(self): self.assertEqual(self.session.get_expiry_age(), self.custom_session_cookie_age) self.session.set_expiry(0) self.assertEqual(self.session.get_expiry_age(), self.custom_session_cookie_age) class CacheDBSessionTests(SessionTestsMixin, TestCase): backend = CacheDBSession def test_exists_searches_cache_first(self): self.session.save() with self.assertNumQueries(0): self.assertIs(self.session.exists(self.session.session_key), True) # Some backends might issue a warning @ignore_warnings(module="django.core.cache.backends.base") def test_load_overlong_key(self): self.session._session_key = (string.ascii_letters + string.digits) * 20 self.assertEqual(self.session.load(), {}) @override_settings(SESSION_CACHE_ALIAS='sessions') def test_non_default_cache(self): # 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS. with self.assertRaises(InvalidCacheBackendError): self.backend() @override_settings(USE_TZ=True) class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests): pass class FileSessionTests(SessionTestsMixin, SimpleTestCase): backend = FileSession def setUp(self): # Do file session tests in an isolated directory, and kill it after we're done. self.original_session_file_path = settings.SESSION_FILE_PATH self.temp_session_store = settings.SESSION_FILE_PATH = self.mkdtemp() # Reset the file session backend's internal caches if hasattr(self.backend, '_storage_path'): del self.backend._storage_path super().setUp() def tearDown(self): super().tearDown() settings.SESSION_FILE_PATH = self.original_session_file_path shutil.rmtree(self.temp_session_store) def mkdtemp(self): return tempfile.mkdtemp() @override_settings( SESSION_FILE_PATH='/if/this/directory/exists/you/have/a/weird/computer', ) def test_configuration_check(self): del self.backend._storage_path # Make sure the file backend checks for a good storage dir with self.assertRaises(ImproperlyConfigured): self.backend() def test_invalid_key_backslash(self): # Ensure we don't allow directory-traversal. # This is tested directly on _key_to_file, as load() will swallow # a SuspiciousOperation in the same way as an OSError - by creating # a new session, making it unclear whether the slashes were detected. with self.assertRaises(InvalidSessionKey): self.backend()._key_to_file("a\\b\\c") def test_invalid_key_forwardslash(self): # Ensure we don't allow directory-traversal with self.assertRaises(InvalidSessionKey): self.backend()._key_to_file("a/b/c") @override_settings( SESSION_ENGINE="django.contrib.sessions.backends.file", SESSION_COOKIE_AGE=0, ) def test_clearsessions_command(self): """ Test clearsessions command for clearing expired sessions. """ storage_path = self.backend._get_storage_path() file_prefix = settings.SESSION_COOKIE_NAME def count_sessions(): return len([ session_file for session_file in os.listdir(storage_path) if session_file.startswith(file_prefix) ]) self.assertEqual(0, count_sessions()) # One object in the future self.session['foo'] = 'bar' self.session.set_expiry(3600) self.session.save() # One object in the past other_session = self.backend() other_session['foo'] = 'bar' other_session.set_expiry(-3600) other_session.save() # One object in the present without an expiry (should be deleted since # its modification time + SESSION_COOKIE_AGE will be in the past when # clearsessions runs). other_session2 = self.backend() other_session2['foo'] = 'bar' other_session2.save() # Three sessions are in the filesystem before clearsessions... self.assertEqual(3, count_sessions()) management.call_command('clearsessions') # ... and two are deleted. self.assertEqual(1, count_sessions()) class FileSessionPathLibTests(FileSessionTests): def mkdtemp(self): tmp_dir = super().mkdtemp() return Path(tmp_dir) class CacheSessionTests(SessionTestsMixin, SimpleTestCase): backend = CacheSession # Some backends might issue a warning @ignore_warnings(module="django.core.cache.backends.base") def test_load_overlong_key(self): self.session._session_key = (string.ascii_letters + string.digits) * 20 self.assertEqual(self.session.load(), {}) def test_default_cache(self): self.session.save() self.assertIsNotNone(caches['default'].get(self.session.cache_key)) @override_settings(CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, 'sessions': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'session', }, }, SESSION_CACHE_ALIAS='sessions') def test_non_default_cache(self): # Re-initialize the session backend to make use of overridden settings. self.session = self.backend() self.session.save() self.assertIsNone(caches['default'].get(self.session.cache_key)) self.assertIsNotNone(caches['sessions'].get(self.session.cache_key)) def test_create_and_save(self): self.session = self.backend() self.session.create() self.session.save() self.assertIsNotNone(caches['default'].get(self.session.cache_key)) class SessionMiddlewareTests(TestCase): request_factory = RequestFactory() @staticmethod def get_response_touching_session(request): request.session['hello'] = 'world' return HttpResponse('Session test') @override_settings(SESSION_COOKIE_SECURE=True) def test_secure_session_cookie(self): request = self.request_factory.get('/') middleware = SessionMiddleware(self.get_response_touching_session) # Handle the response through the middleware response = middleware(request) self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]['secure'], True) @override_settings(SESSION_COOKIE_HTTPONLY=True) def test_httponly_session_cookie(self): request = self.request_factory.get('/') middleware = SessionMiddleware(self.get_response_touching_session) # Handle the response through the middleware response = middleware(request) self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'], True) self.assertIn( cookies.Morsel._reserved['httponly'], str(response.cookies[settings.SESSION_COOKIE_NAME]) ) @override_settings(SESSION_COOKIE_SAMESITE='Strict') def test_samesite_session_cookie(self): request = self.request_factory.get('/') middleware = SessionMiddleware(self.get_response_touching_session) response = middleware(request) self.assertEqual(response.cookies[settings.SESSION_COOKIE_NAME]['samesite'], 'Strict') @override_settings(SESSION_COOKIE_HTTPONLY=False) def test_no_httponly_session_cookie(self): request = self.request_factory.get('/') middleware = SessionMiddleware(self.get_response_touching_session) response = middleware(request) self.assertEqual(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'], '') self.assertNotIn( cookies.Morsel._reserved['httponly'], str(response.cookies[settings.SESSION_COOKIE_NAME]) ) def test_session_save_on_500(self): def response_500(requset): response = HttpResponse('Horrible error') response.status_code = 500 request.session['hello'] = 'world' return response request = self.request_factory.get('/') SessionMiddleware(response_500)(request) # The value wasn't saved above. self.assertNotIn('hello', request.session.load()) def test_session_update_error_redirect(self): def response_delete_session(request): request.session = DatabaseSession() request.session.save(must_create=True) request.session.delete() return HttpResponse() request = self.request_factory.get('/foo/') middleware = SessionMiddleware(response_delete_session) msg = ( "The request's session was deleted before the request completed. " "The user may have logged out in a concurrent request, for example." ) with self.assertRaisesMessage(SessionInterrupted, msg): # Handle the response through the middleware. It will try to save # the deleted session which will cause an UpdateError that's caught # and raised as a SessionInterrupted. middleware(request) def test_session_delete_on_end(self): def response_ending_session(request): request.session.flush() return HttpResponse('Session test') request = self.request_factory.get('/') middleware = SessionMiddleware(response_ending_session) # Before deleting, there has to be an existing cookie request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc' # Handle the response through the middleware response = middleware(request) # The cookie was deleted, not recreated. # A deleted cookie header looks like: # Set-Cookie: sessionid=; expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=0; Path=/ self.assertEqual( 'Set-Cookie: {}=""; expires=Thu, 01 Jan 1970 00:00:00 GMT; ' 'Max-Age=0; Path=/; SameSite={}'.format( settings.SESSION_COOKIE_NAME, settings.SESSION_COOKIE_SAMESITE, ), str(response.cookies[settings.SESSION_COOKIE_NAME]) ) # SessionMiddleware sets 'Vary: Cookie' to prevent the 'Set-Cookie' # from being cached. self.assertEqual(response.headers['Vary'], 'Cookie') @override_settings(SESSION_COOKIE_DOMAIN='.example.local', SESSION_COOKIE_PATH='/example/') def test_session_delete_on_end_with_custom_domain_and_path(self): def response_ending_session(request): request.session.flush() return HttpResponse('Session test') request = self.request_factory.get('/') middleware = SessionMiddleware(response_ending_session) # Before deleting, there has to be an existing cookie request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc' # Handle the response through the middleware response = middleware(request) # The cookie was deleted, not recreated. # A deleted cookie header with a custom domain and path looks like: # Set-Cookie: sessionid=; Domain=.example.local; # expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=0; # Path=/example/ self.assertEqual( 'Set-Cookie: {}=""; Domain=.example.local; expires=Thu, ' '01 Jan 1970 00:00:00 GMT; Max-Age=0; Path=/example/; SameSite={}'.format( settings.SESSION_COOKIE_NAME, settings.SESSION_COOKIE_SAMESITE, ), str(response.cookies[settings.SESSION_COOKIE_NAME]) ) def test_flush_empty_without_session_cookie_doesnt_set_cookie(self): def response_ending_session(request): request.session.flush() return HttpResponse('Session test') request = self.request_factory.get('/') middleware = SessionMiddleware(response_ending_session) # Handle the response through the middleware response = middleware(request) # A cookie should not be set. self.assertEqual(response.cookies, {}) # The session is accessed so "Vary: Cookie" should be set. self.assertEqual(response.headers['Vary'], 'Cookie') def test_empty_session_saved(self): """ If a session is emptied of data but still has a key, it should still be updated. """ def response_set_session(request): # Set a session key and some data. request.session['foo'] = 'bar' return HttpResponse('Session test') request = self.request_factory.get('/') middleware = SessionMiddleware(response_set_session) # Handle the response through the middleware. response = middleware(request) self.assertEqual(tuple(request.session.items()), (('foo', 'bar'),)) # A cookie should be set, along with Vary: Cookie. self.assertIn( 'Set-Cookie: sessionid=%s' % request.session.session_key, str(response.cookies) ) self.assertEqual(response.headers['Vary'], 'Cookie') # Empty the session data. del request.session['foo'] # Handle the response through the middleware. response = HttpResponse('Session test') response = middleware.process_response(request, response) self.assertEqual(dict(request.session.values()), {}) session = Session.objects.get(session_key=request.session.session_key) self.assertEqual(session.get_decoded(), {}) # While the session is empty, it hasn't been flushed so a cookie should # still be set, along with Vary: Cookie. self.assertGreater(len(request.session.session_key), 8) self.assertIn( 'Set-Cookie: sessionid=%s' % request.session.session_key, str(response.cookies) ) self.assertEqual(response.headers['Vary'], 'Cookie') class CookieSessionTests(SessionTestsMixin, SimpleTestCase): backend = CookieSession def test_save(self): """ This test tested exists() in the other session backends, but that doesn't make sense for us. """ pass def test_cycle(self): """ This test tested cycle_key() which would create a new session key for the same session data. But we can't invalidate previously signed cookies (other than letting them expire naturally) so testing for this behavior is meaningless. """ pass @unittest.expectedFailure def test_actual_expiry(self): # The cookie backend doesn't handle non-default expiry dates, see #19201 super().test_actual_expiry() def test_unpickling_exception(self): # signed_cookies backend should handle unpickle exceptions gracefully # by creating a new session self.assertEqual(self.session.serializer, JSONSerializer) self.session.save() self.session.serializer = PickleSerializer self.session.load() @unittest.skip("Cookie backend doesn't have an external store to create records in.") def test_session_load_does_not_create_record(self): pass @unittest.skip("CookieSession is stored in the client and there is no way to query it.") def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self): pass
a72daae0e8fd4d4772a2b598fe7de6cbf9025bb2a8af6f62fe3ae3cb478356c9
from datetime import datetime from django.contrib.sitemaps import GenericSitemap from django.test import override_settings from .base import SitemapTestsBase from .models import TestModel @override_settings(ABSOLUTE_URL_OVERRIDES={}) class GenericViewsSitemapTests(SitemapTestsBase): def test_generic_sitemap_attributes(self): datetime_value = datetime.now() queryset = TestModel.objects.all() generic_sitemap = GenericSitemap( info_dict={ 'queryset': queryset, 'date_field': datetime_value, }, priority=0.6, changefreq='monthly', protocol='https', ) attr_values = ( ('date_field', datetime_value), ('priority', 0.6), ('changefreq', 'monthly'), ('protocol', 'https'), ) for attr_name, expected_value in attr_values: with self.subTest(attr_name=attr_name): self.assertEqual(getattr(generic_sitemap, attr_name), expected_value) self.assertCountEqual(generic_sitemap.queryset, queryset) def test_generic_sitemap(self): "A minimal generic sitemap can be rendered" response = self.client.get('/generic/sitemap.xml') expected = '' for pk in TestModel.objects.values_list("id", flat=True): expected += "<url><loc>%s/testmodel/%s/</loc></url>" % (self.base_url, pk) expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"> %s </urlset> """ % expected self.assertXMLEqual(response.content.decode(), expected_content) def test_generic_sitemap_lastmod(self): test_model = TestModel.objects.first() TestModel.objects.update(lastmod=datetime(2013, 3, 13, 10, 0, 0)) response = self.client.get('/generic-lastmod/sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"> <url><loc>%s/testmodel/%s/</loc><lastmod>2013-03-13</lastmod></url> </urlset> """ % (self.base_url, test_model.pk) self.assertXMLEqual(response.content.decode(), expected_content) self.assertEqual(response.headers['Last-Modified'], 'Wed, 13 Mar 2013 10:00:00 GMT')
c5c83523e7c7d6675cda76237ea5c4716aff2f6e0e46095282dc8707a174e778
import os from datetime import date from django.contrib.sitemaps import Sitemap from django.contrib.sites.models import Site from django.core.exceptions import ImproperlyConfigured from django.test import modify_settings, override_settings from django.utils import translation from django.utils.formats import localize from .base import SitemapTestsBase from .models import TestModel class HTTPSitemapTests(SitemapTestsBase): use_sitemap_err_msg = ( 'To use sitemaps, either enable the sites framework or pass a ' 'Site/RequestSite object in your view.' ) def test_simple_sitemap_index(self): "A simple sitemap index can be rendered" response = self.client.get('/simple/index.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % self.base_url self.assertXMLEqual(response.content.decode(), expected_content) def test_sitemap_not_callable(self): """A sitemap may not be callable.""" response = self.client.get('/simple-not-callable/index.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % self.base_url self.assertXMLEqual(response.content.decode(), expected_content) def test_paged_sitemap(self): """A sitemap may have multiple pages.""" response = self.client.get('/simple-paged/index.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>{0}/simple/sitemap-simple.xml</loc></sitemap><sitemap><loc>{0}/simple/sitemap-simple.xml?p=2</loc></sitemap> </sitemapindex> """.format(self.base_url) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')], }]) def test_simple_sitemap_custom_index(self): "A simple sitemap index can be rendered with a custom template" response = self.client.get('/simple/custom-index.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % self.base_url self.assertXMLEqual(response.content.decode(), expected_content) def test_simple_sitemap_section(self): "A simple sitemap section can be rendered" response = self.client.get('/simple/sitemap-simple.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"> <url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % (self.base_url, date.today()) self.assertXMLEqual(response.content.decode(), expected_content) def test_no_section(self): response = self.client.get('/simple/sitemap-simple2.xml') self.assertEqual(str(response.context['exception']), "No sitemap available for section: 'simple2'") self.assertEqual(response.status_code, 404) def test_empty_page(self): response = self.client.get('/simple/sitemap-simple.xml?p=0') self.assertEqual(str(response.context['exception']), 'Page 0 empty') self.assertEqual(response.status_code, 404) def test_page_not_int(self): response = self.client.get('/simple/sitemap-simple.xml?p=test') self.assertEqual(str(response.context['exception']), "No page 'test'") self.assertEqual(response.status_code, 404) def test_simple_sitemap(self): "A simple sitemap can be rendered" response = self.client.get('/simple/sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"> <url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % (self.base_url, date.today()) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')], }]) def test_simple_custom_sitemap(self): "A simple sitemap can be rendered with a custom template" response = self.client.get('/simple/custom-sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % (self.base_url, date.today()) self.assertXMLEqual(response.content.decode(), expected_content) def test_sitemap_last_modified(self): "Last-Modified header is set correctly" response = self.client.get('/lastmod/sitemap.xml') self.assertEqual(response.headers['Last-Modified'], 'Wed, 13 Mar 2013 10:00:00 GMT') def test_sitemap_last_modified_date(self): """ The Last-Modified header should be support dates (without time). """ response = self.client.get('/lastmod/date-sitemap.xml') self.assertEqual(response.headers['Last-Modified'], 'Wed, 13 Mar 2013 00:00:00 GMT') def test_sitemap_last_modified_tz(self): """ The Last-Modified header should be converted from timezone aware dates to GMT. """ response = self.client.get('/lastmod/tz-sitemap.xml') self.assertEqual(response.headers['Last-Modified'], 'Wed, 13 Mar 2013 15:00:00 GMT') def test_sitemap_last_modified_missing(self): "Last-Modified header is missing when sitemap has no lastmod" response = self.client.get('/generic/sitemap.xml') self.assertFalse(response.has_header('Last-Modified')) def test_sitemap_last_modified_mixed(self): "Last-Modified header is omitted when lastmod not on all items" response = self.client.get('/lastmod-mixed/sitemap.xml') self.assertFalse(response.has_header('Last-Modified')) def test_sitemaps_lastmod_mixed_ascending_last_modified_missing(self): """ The Last-Modified header is omitted when lastmod isn't found in all sitemaps. Test sitemaps are sorted by lastmod in ascending order. """ response = self.client.get('/lastmod-sitemaps/mixed-ascending.xml') self.assertFalse(response.has_header('Last-Modified')) def test_sitemaps_lastmod_mixed_descending_last_modified_missing(self): """ The Last-Modified header is omitted when lastmod isn't found in all sitemaps. Test sitemaps are sorted by lastmod in descending order. """ response = self.client.get('/lastmod-sitemaps/mixed-descending.xml') self.assertFalse(response.has_header('Last-Modified')) def test_sitemaps_lastmod_ascending(self): """ The Last-Modified header is set to the most recent sitemap lastmod. Test sitemaps are sorted by lastmod in ascending order. """ response = self.client.get('/lastmod-sitemaps/ascending.xml') self.assertEqual(response.headers['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT') def test_sitemaps_lastmod_descending(self): """ The Last-Modified header is set to the most recent sitemap lastmod. Test sitemaps are sorted by lastmod in descending order. """ response = self.client.get('/lastmod-sitemaps/descending.xml') self.assertEqual(response.headers['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT') @override_settings(USE_I18N=True, USE_L10N=True) def test_localized_priority(self): """The priority value should not be localized.""" with translation.override('fr'): self.assertEqual('0,3', localize(0.3)) # Priorities aren't rendered in localized format. response = self.client.get('/simple/sitemap.xml') self.assertContains(response, '<priority>0.5</priority>') self.assertContains(response, '<lastmod>%s</lastmod>' % date.today()) @modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}) def test_requestsite_sitemap(self): # Hitting the flatpages sitemap without the sites framework installed # doesn't raise an exception. response = self.client.get('/simple/sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"> <url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % date.today() self.assertXMLEqual(response.content.decode(), expected_content) def test_sitemap_get_urls_no_site_1(self): """ Check we get ImproperlyConfigured if we don't pass a site object to Sitemap.get_urls and no Site objects exist """ Site.objects.all().delete() with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg): Sitemap().get_urls() @modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}) def test_sitemap_get_urls_no_site_2(self): """ Check we get ImproperlyConfigured when we don't pass a site object to Sitemap.get_urls if Site objects exists, but the sites framework is not actually installed. """ with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg): Sitemap().get_urls() def test_sitemap_item(self): """ Check to make sure that the raw item is included with each Sitemap.get_url() url result. """ test_sitemap = Sitemap() test_sitemap.items = TestModel.objects.order_by('pk').all def is_testmodel(url): return isinstance(url['item'], TestModel) item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls())) self.assertTrue(item_in_url_info) def test_cached_sitemap_index(self): """ A cached sitemap index can be rendered (#2713). """ response = self.client.get('/cached/index.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % self.base_url self.assertXMLEqual(response.content.decode(), expected_content) def test_x_robots_sitemap(self): response = self.client.get('/simple/index.xml') self.assertEqual(response.headers['X-Robots-Tag'], 'noindex, noodp, noarchive') response = self.client.get('/simple/sitemap.xml') self.assertEqual(response.headers['X-Robots-Tag'], 'noindex, noodp, noarchive') def test_empty_sitemap(self): response = self.client.get('/empty/sitemap.xml') self.assertEqual(response.status_code, 200) @override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese'))) def test_simple_i18n_sitemap_index(self): """ A simple i18n sitemap index can be rendered. """ response = self.client.get('/simple/i18n.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"> <url><loc>{0}/en/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url><url><loc>{0}/pt/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """.format(self.base_url, self.i18n_model.pk) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese'))) def test_alternate_i18n_sitemap_index(self): """ A i18n sitemap with alternate/hreflang links can be rendered. """ response = self.client.get('/alternates/i18n.xml') url, pk = self.base_url, self.i18n_model.pk expected_urls = f""" <url><loc>{url}/en/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> </url> <url><loc>{url}/pt/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> </url> """.replace('\n', '') expected_content = f"""<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"> {expected_urls} </urlset> """ self.assertXMLEqual(response.content.decode(), expected_content) @override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese'), ('es', 'Spanish'))) def test_alternate_i18n_sitemap_limited(self): """ A i18n sitemap index with limited languages can be rendered. """ response = self.client.get('/limited/i18n.xml') url, pk = self.base_url, self.i18n_model.pk expected_urls = f""" <url><loc>{url}/en/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="es" href="{url}/es/i18n/testmodel/{pk}/"/> </url> <url><loc>{url}/es/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="es" href="{url}/es/i18n/testmodel/{pk}/"/> </url> """.replace('\n', '') expected_content = f"""<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"> {expected_urls} </urlset> """ self.assertXMLEqual(response.content.decode(), expected_content) @override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese'))) def test_alternate_i18n_sitemap_xdefault(self): """ A i18n sitemap index with x-default can be rendered. """ response = self.client.get('/x-default/i18n.xml') url, pk = self.base_url, self.i18n_model.pk expected_urls = f""" <url><loc>{url}/en/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="x-default" href="{url}/i18n/testmodel/{pk}/"/> </url> <url><loc>{url}/pt/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="x-default" href="{url}/i18n/testmodel/{pk}/"/> </url> """.replace('\n', '') expected_content = f"""<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"> {expected_urls} </urlset> """ self.assertXMLEqual(response.content.decode(), expected_content) def test_sitemap_without_entries(self): response = self.client.get('/sitemap-without-entries/sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml"> </urlset>""" self.assertXMLEqual(response.content.decode(), expected_content)
1fa0ed75576aa81a0b09698ac22e9eafad8ae61fa5fc139dcdf2d7e32bea0b96
""" Testing using the Test Client The test client is a class that can act like a simple browser for testing purposes. It allows the user to compose GET and POST requests, and obtain the response that the server gave to those requests. The server Response objects are annotated with the details of the contexts and templates that were rendered during the process of serving the request. ``Client`` objects are stateful - they will retain cookie (and thus session) details for the lifetime of the ``Client`` instance. This is not intended as a replacement for Twill, Selenium, or other browser automation frameworks - it is here to allow testing against the contexts and templates produced by a view, rather than the HTML rendered to the end-user. """ import itertools import tempfile from unittest import mock from django.contrib.auth.models import User from django.core import mail from django.http import HttpResponse, HttpResponseNotAllowed from django.test import ( AsyncRequestFactory, Client, RequestFactory, SimpleTestCase, TestCase, override_settings, ) from django.urls import reverse_lazy from .views import TwoArgException, get_view, post_view, trace_view @override_settings(ROOT_URLCONF='test_client.urls') class ClientTest(TestCase): @classmethod def setUpTestData(cls): cls.u1 = User.objects.create_user(username='testclient', password='password') cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False) def test_get_view(self): "GET a view" # The data is ignored, but let's check it doesn't crash the system # anyway. data = {'var': '\xf2'} response = self.client.get('/get_view/', data) # Check some response details self.assertContains(response, 'This is a test') self.assertEqual(response.context['var'], '\xf2') self.assertEqual(response.templates[0].name, 'GET Template') def test_query_string_encoding(self): # WSGI requires latin-1 encoded strings. response = self.client.get('/get_view/?var=1\ufffd') self.assertEqual(response.context['var'], '1\ufffd') def test_get_data_none(self): msg = ( "Cannot encode None for key 'value' in a query string. Did you " "mean to pass an empty string or omit the value?" ) with self.assertRaisesMessage(TypeError, msg): self.client.get('/get_view/', {'value': None}) def test_get_post_view(self): "GET a view that normally expects POSTs" response = self.client.get('/post_view/', {}) # Check some response details self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, 'Empty GET Template') self.assertTemplateUsed(response, 'Empty GET Template') self.assertTemplateNotUsed(response, 'Empty POST Template') def test_empty_post(self): "POST an empty dictionary to a view" response = self.client.post('/post_view/', {}) # Check some response details self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, 'Empty POST Template') self.assertTemplateNotUsed(response, 'Empty GET Template') self.assertTemplateUsed(response, 'Empty POST Template') def test_post(self): "POST some data to a view" post_data = { 'value': 37 } response = self.client.post('/post_view/', post_data) # Check some response details self.assertEqual(response.status_code, 200) self.assertEqual(response.context['data'], '37') self.assertEqual(response.templates[0].name, 'POST Template') self.assertContains(response, 'Data received') def test_post_data_none(self): msg = ( "Cannot encode None for key 'value' as POST data. Did you mean " "to pass an empty string or omit the value?" ) with self.assertRaisesMessage(TypeError, msg): self.client.post('/post_view/', {'value': None}) def test_json_serialization(self): """The test client serializes JSON data.""" methods = ('post', 'put', 'patch', 'delete') tests = ( ({'value': 37}, {'value': 37}), ([37, True], [37, True]), ((37, False), [37, False]), ) for method in methods: with self.subTest(method=method): for data, expected in tests: with self.subTest(data): client_method = getattr(self.client, method) method_name = method.upper() response = client_method('/json_view/', data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['data'], expected) self.assertContains(response, 'Viewing %s page.' % method_name) def test_json_encoder_argument(self): """The test Client accepts a json_encoder.""" mock_encoder = mock.MagicMock() mock_encoding = mock.MagicMock() mock_encoder.return_value = mock_encoding mock_encoding.encode.return_value = '{"value": 37}' client = self.client_class(json_encoder=mock_encoder) # Vendored tree JSON content types are accepted. client.post('/json_view/', {'value': 37}, content_type='application/vnd.api+json') self.assertTrue(mock_encoder.called) self.assertTrue(mock_encoding.encode.called) def test_put(self): response = self.client.put('/put_view/', {'foo': 'bar'}) self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, 'PUT Template') self.assertEqual(response.context['data'], "{'foo': 'bar'}") self.assertEqual(response.context['Content-Length'], '14') def test_trace(self): """TRACE a view""" response = self.client.trace('/trace_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['method'], 'TRACE') self.assertEqual(response.templates[0].name, 'TRACE Template') def test_response_headers(self): "Check the value of HTTP headers returned in a response" response = self.client.get("/header_view/") self.assertEqual(response.headers['X-DJANGO-TEST'], 'Slartibartfast') def test_response_attached_request(self): """ The returned response has a ``request`` attribute with the originating environ dict and a ``wsgi_request`` with the originating WSGIRequest. """ response = self.client.get("/header_view/") self.assertTrue(hasattr(response, 'request')) self.assertTrue(hasattr(response, 'wsgi_request')) for key, value in response.request.items(): self.assertIn(key, response.wsgi_request.environ) self.assertEqual(response.wsgi_request.environ[key], value) def test_response_resolver_match(self): """ The response contains a ResolverMatch instance. """ response = self.client.get('/header_view/') self.assertTrue(hasattr(response, 'resolver_match')) def test_response_resolver_match_redirect_follow(self): """ The response ResolverMatch instance contains the correct information when following redirects. """ response = self.client.get('/redirect_view/', follow=True) self.assertEqual(response.resolver_match.url_name, 'get_view') def test_response_resolver_match_regular_view(self): """ The response ResolverMatch instance contains the correct information when accessing a regular view. """ response = self.client.get('/get_view/') self.assertEqual(response.resolver_match.url_name, 'get_view') def test_raw_post(self): "POST raw data (with a content type) to a view" test_doc = """<?xml version="1.0" encoding="utf-8"?> <library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library> """ response = self.client.post('/raw_post_view/', test_doc, content_type='text/xml') self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, "Book template") self.assertEqual(response.content, b"Blink - Malcolm Gladwell") def test_insecure(self): "GET a URL through http" response = self.client.get('/secure_view/', secure=False) self.assertFalse(response.test_was_secure_request) self.assertEqual(response.test_server_port, '80') def test_secure(self): "GET a URL through https" response = self.client.get('/secure_view/', secure=True) self.assertTrue(response.test_was_secure_request) self.assertEqual(response.test_server_port, '443') def test_redirect(self): "GET a URL that redirects elsewhere" response = self.client.get('/redirect_view/') self.assertRedirects(response, '/get_view/') def test_redirect_with_query(self): "GET a URL that redirects with given GET parameters" response = self.client.get('/redirect_view/', {'var': 'value'}) self.assertRedirects(response, '/get_view/?var=value') def test_redirect_with_query_ordering(self): """assertRedirects() ignores the order of query string parameters.""" response = self.client.get('/redirect_view/', {'var': 'value', 'foo': 'bar'}) self.assertRedirects(response, '/get_view/?var=value&foo=bar') self.assertRedirects(response, '/get_view/?foo=bar&var=value') def test_permanent_redirect(self): "GET a URL that redirects permanently elsewhere" response = self.client.get('/permanent_redirect_view/') self.assertRedirects(response, '/get_view/', status_code=301) def test_temporary_redirect(self): "GET a URL that does a non-permanent redirect" response = self.client.get('/temporary_redirect_view/') self.assertRedirects(response, '/get_view/', status_code=302) def test_redirect_to_strange_location(self): "GET a URL that redirects to a non-200 page" response = self.client.get('/double_redirect_view/') # The response was a 302, and that the attempt to get the redirection # location returned 301 when retrieved self.assertRedirects(response, '/permanent_redirect_view/', target_status_code=301) def test_follow_redirect(self): "A URL that redirects can be followed to termination." response = self.client.get('/double_redirect_view/', follow=True) self.assertRedirects(response, '/get_view/', status_code=302, target_status_code=200) self.assertEqual(len(response.redirect_chain), 2) def test_follow_relative_redirect(self): "A URL with a relative redirect can be followed." response = self.client.get('/accounts/', follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(response.request['PATH_INFO'], '/accounts/login/') def test_follow_relative_redirect_no_trailing_slash(self): "A URL with a relative redirect with no trailing slash can be followed." response = self.client.get('/accounts/no_trailing_slash', follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(response.request['PATH_INFO'], '/accounts/login/') def test_follow_307_and_308_redirect(self): """ A 307 or 308 redirect preserves the request method after the redirect. """ methods = ('get', 'post', 'head', 'options', 'put', 'patch', 'delete', 'trace') codes = (307, 308) for method, code in itertools.product(methods, codes): with self.subTest(method=method, code=code): req_method = getattr(self.client, method) response = req_method('/redirect_view_%s/' % code, data={'value': 'test'}, follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(response.request['PATH_INFO'], '/post_view/') self.assertEqual(response.request['REQUEST_METHOD'], method.upper()) def test_follow_307_and_308_preserves_query_string(self): methods = ('post', 'options', 'put', 'patch', 'delete', 'trace') codes = (307, 308) for method, code in itertools.product(methods, codes): with self.subTest(method=method, code=code): req_method = getattr(self.client, method) response = req_method( '/redirect_view_%s_query_string/' % code, data={'value': 'test'}, follow=True, ) self.assertRedirects(response, '/post_view/?hello=world', status_code=code) self.assertEqual(response.request['QUERY_STRING'], 'hello=world') def test_follow_307_and_308_get_head_query_string(self): methods = ('get', 'head') codes = (307, 308) for method, code in itertools.product(methods, codes): with self.subTest(method=method, code=code): req_method = getattr(self.client, method) response = req_method( '/redirect_view_%s_query_string/' % code, data={'value': 'test'}, follow=True, ) self.assertRedirects(response, '/post_view/?hello=world', status_code=code) self.assertEqual(response.request['QUERY_STRING'], 'value=test') def test_follow_307_and_308_preserves_post_data(self): for code in (307, 308): with self.subTest(code=code): response = self.client.post('/redirect_view_%s/' % code, data={'value': 'test'}, follow=True) self.assertContains(response, 'test is the value') def test_follow_307_and_308_preserves_put_body(self): for code in (307, 308): with self.subTest(code=code): response = self.client.put('/redirect_view_%s/?to=/put_view/' % code, data='a=b', follow=True) self.assertContains(response, 'a=b is the body') def test_follow_307_and_308_preserves_get_params(self): data = {'var': 30, 'to': '/get_view/'} for code in (307, 308): with self.subTest(code=code): response = self.client.get('/redirect_view_%s/' % code, data=data, follow=True) self.assertContains(response, '30 is the value') def test_redirect_http(self): "GET a URL that redirects to an http URI" response = self.client.get('/http_redirect_view/', follow=True) self.assertFalse(response.test_was_secure_request) def test_redirect_https(self): "GET a URL that redirects to an https URI" response = self.client.get('/https_redirect_view/', follow=True) self.assertTrue(response.test_was_secure_request) def test_notfound_response(self): "GET a URL that responds as '404:Not Found'" response = self.client.get('/bad_view/') self.assertContains(response, 'MAGIC', status_code=404) def test_valid_form(self): "POST valid data to a form" post_data = { 'text': 'Hello World', 'email': '[email protected]', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Valid POST Template") def test_valid_form_with_hints(self): "GET a form, providing hints in the GET data" hints = { 'text': 'Hello World', 'multi': ('b', 'c', 'e') } response = self.client.get('/form_view/', data=hints) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Form GET Template") # The multi-value data has been rolled out ok self.assertContains(response, 'Select a valid choice.', 0) def test_incomplete_data_form(self): "POST incomplete data to a form" post_data = { 'text': 'Hello World', 'value': 37 } response = self.client.post('/form_view/', post_data) self.assertContains(response, 'This field is required.', 3) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") self.assertFormError(response, 'form', 'email', 'This field is required.') self.assertFormError(response, 'form', 'single', 'This field is required.') self.assertFormError(response, 'form', 'multi', 'This field is required.') def test_form_error(self): "POST erroneous data to a form" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") self.assertFormError(response, 'form', 'email', 'Enter a valid email address.') def test_valid_form_with_template(self): "POST valid data to a form using multiple templates" post_data = { 'text': 'Hello World', 'email': '[email protected]', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view_with_template/', post_data) self.assertContains(response, 'POST data OK') self.assertTemplateUsed(response, "form_view.html") self.assertTemplateUsed(response, 'base.html') self.assertTemplateNotUsed(response, "Valid POST Template") def test_incomplete_data_form_with_template(self): "POST incomplete data to a form using multiple templates" post_data = { 'text': 'Hello World', 'value': 37 } response = self.client.post('/form_view_with_template/', post_data) self.assertContains(response, 'POST data has errors') self.assertTemplateUsed(response, 'form_view.html') self.assertTemplateUsed(response, 'base.html') self.assertTemplateNotUsed(response, "Invalid POST Template") self.assertFormError(response, 'form', 'email', 'This field is required.') self.assertFormError(response, 'form', 'single', 'This field is required.') self.assertFormError(response, 'form', 'multi', 'This field is required.') def test_form_error_with_template(self): "POST erroneous data to a form using multiple templates" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view_with_template/', post_data) self.assertContains(response, 'POST data has errors') self.assertTemplateUsed(response, "form_view.html") self.assertTemplateUsed(response, 'base.html') self.assertTemplateNotUsed(response, "Invalid POST Template") self.assertFormError(response, 'form', 'email', 'Enter a valid email address.') def test_unknown_page(self): "GET an invalid URL" response = self.client.get('/unknown_view/') # The response was a 404 self.assertEqual(response.status_code, 404) def test_url_parameters(self): "Make sure that URL ;-parameters are not stripped." response = self.client.get('/unknown_view/;some-parameter') # The path in the response includes it (ignore that it's a 404) self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter') def test_view_with_login(self): "Request a page that is protected with @login_required" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') @override_settings( INSTALLED_APPS=['django.contrib.auth'], SESSION_ENGINE='django.contrib.sessions.backends.file', ) def test_view_with_login_when_sessions_app_is_not_installed(self): self.test_view_with_login() def test_view_with_force_login(self): "Request a page that is protected with @login_required" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') # Log in self.client.force_login(self.u1) # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') def test_view_with_method_login(self): "Request a page that is protected with a @login_required method" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_method_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Request a page that requires a login response = self.client.get('/login_protected_method_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') def test_view_with_method_force_login(self): "Request a page that is protected with a @login_required method" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_method_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/') # Log in self.client.force_login(self.u1) # Request a page that requires a login response = self.client.get('/login_protected_method_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') def test_view_with_login_and_custom_redirect(self): "Request a page that is protected with @login_required(redirect_field_name='redirect_to')" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view_custom_redirect/') self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Request a page that requires a login response = self.client.get('/login_protected_view_custom_redirect/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') def test_view_with_force_login_and_custom_redirect(self): """ Request a page that is protected with @login_required(redirect_field_name='redirect_to') """ # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view_custom_redirect/') self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/') # Log in self.client.force_login(self.u1) # Request a page that requires a login response = self.client.get('/login_protected_view_custom_redirect/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') def test_view_with_bad_login(self): "Request a page that is protected with @login, but use bad credentials" login = self.client.login(username='otheruser', password='nopassword') self.assertFalse(login) def test_view_with_inactive_login(self): """ An inactive user may login if the authenticate backend allows it. """ credentials = {'username': 'inactive', 'password': 'password'} self.assertFalse(self.client.login(**credentials)) with self.settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend']): self.assertTrue(self.client.login(**credentials)) @override_settings( AUTHENTICATION_BACKENDS=[ 'django.contrib.auth.backends.ModelBackend', 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] ) def test_view_with_inactive_force_login(self): "Request a page that is protected with @login, but use an inactive login" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') # Log in self.client.force_login(self.u2, backend='django.contrib.auth.backends.AllowAllUsersModelBackend') # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'inactive') def test_logout(self): "Request a logout after logging in" # Log in self.client.login(username='testclient', password='password') # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') # Log out self.client.logout() # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') def test_logout_with_force_login(self): "Request a logout after logging in" # Log in self.client.force_login(self.u1) # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') # Log out self.client.logout() # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') @override_settings( AUTHENTICATION_BACKENDS=[ 'django.contrib.auth.backends.ModelBackend', 'test_client.auth_backends.TestClientBackend', ], ) def test_force_login_with_backend(self): """ Request a page that is protected with @login_required when using force_login() and passing a backend. """ # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') # Log in self.client.force_login(self.u1, backend='test_client.auth_backends.TestClientBackend') self.assertEqual(self.u1.backend, 'test_client.auth_backends.TestClientBackend') # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') @override_settings( AUTHENTICATION_BACKENDS=[ 'django.contrib.auth.backends.ModelBackend', 'test_client.auth_backends.TestClientBackend', ], ) def test_force_login_without_backend(self): """ force_login() without passing a backend and with multiple backends configured should automatically use the first backend. """ self.client.force_login(self.u1) response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') self.assertEqual(self.u1.backend, 'django.contrib.auth.backends.ModelBackend') @override_settings(AUTHENTICATION_BACKENDS=[ 'test_client.auth_backends.BackendWithoutGetUserMethod', 'django.contrib.auth.backends.ModelBackend', ]) def test_force_login_with_backend_missing_get_user(self): """ force_login() skips auth backends without a get_user() method. """ self.client.force_login(self.u1) self.assertEqual(self.u1.backend, 'django.contrib.auth.backends.ModelBackend') @override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies") def test_logout_cookie_sessions(self): self.test_logout() def test_view_with_permissions(self): "Request a page that is protected with @permission_required" # Get the page without logging in. Should result in 302. response = self.client.get('/permission_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Log in with wrong permissions. Should result in 302. response = self.client.get('/permission_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/') # TODO: Log in with right permissions and request the page again def test_view_with_permissions_exception(self): "Request a page that is protected with @permission_required but raises an exception" # Get the page without logging in. Should result in 403. response = self.client.get('/permission_protected_view_exception/') self.assertEqual(response.status_code, 403) # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Log in with wrong permissions. Should result in 403. response = self.client.get('/permission_protected_view_exception/') self.assertEqual(response.status_code, 403) def test_view_with_method_permissions(self): "Request a page that is protected with a @permission_required method" # Get the page without logging in. Should result in 302. response = self.client.get('/permission_protected_method_view/') self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Log in with wrong permissions. Should result in 302. response = self.client.get('/permission_protected_method_view/') self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/') # TODO: Log in with right permissions and request the page again def test_external_redirect(self): response = self.client.get('/django_project_redirect/') self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False) def test_external_redirect_with_fetch_error_msg(self): """ assertRedirects without fetch_redirect_response=False raises a relevant ValueError rather than a non-descript AssertionError. """ response = self.client.get('/django_project_redirect/') msg = ( "The test client is unable to fetch remote URLs (got " "https://www.djangoproject.com/). If the host is served by Django, " "add 'www.djangoproject.com' to ALLOWED_HOSTS. " "Otherwise, use assertRedirects(..., fetch_redirect_response=False)." ) with self.assertRaisesMessage(ValueError, msg): self.assertRedirects(response, 'https://www.djangoproject.com/') def test_session_modifying_view(self): "Request a page that modifies the session" # Session value isn't set initially with self.assertRaises(KeyError): self.client.session['tobacconist'] self.client.post('/session_view/') # The session was modified self.assertEqual(self.client.session['tobacconist'], 'hovercraft') @override_settings( INSTALLED_APPS=[], SESSION_ENGINE='django.contrib.sessions.backends.file', ) def test_sessions_app_is_not_installed(self): self.test_session_modifying_view() @override_settings( INSTALLED_APPS=[], SESSION_ENGINE='django.contrib.sessions.backends.nonexistent', ) def test_session_engine_is_invalid(self): with self.assertRaisesMessage(ImportError, 'nonexistent'): self.test_session_modifying_view() def test_view_with_exception(self): "Request a page that is known to throw an error" with self.assertRaises(KeyError): self.client.get("/broken_view/") def test_exc_info(self): client = Client(raise_request_exception=False) response = client.get("/broken_view/") self.assertEqual(response.status_code, 500) exc_type, exc_value, exc_traceback = response.exc_info self.assertIs(exc_type, KeyError) self.assertIsInstance(exc_value, KeyError) self.assertEqual(str(exc_value), "'Oops! Looks like you wrote some bad code.'") self.assertIsNotNone(exc_traceback) def test_exc_info_none(self): response = self.client.get("/get_view/") self.assertIsNone(response.exc_info) def test_mail_sending(self): "Mail is redirected to a dummy outbox during test setup" response = self.client.get('/mail_sending_view/') self.assertEqual(response.status_code, 200) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, 'Test message') self.assertEqual(mail.outbox[0].body, 'This is a test email') self.assertEqual(mail.outbox[0].from_email, '[email protected]') self.assertEqual(mail.outbox[0].to[0], '[email protected]') self.assertEqual(mail.outbox[0].to[1], '[email protected]') def test_reverse_lazy_decodes(self): "reverse_lazy() works in the test client" data = {'var': 'data'} response = self.client.get(reverse_lazy('get_view'), data) # Check some response details self.assertContains(response, 'This is a test') def test_relative_redirect(self): response = self.client.get('/accounts/') self.assertRedirects(response, '/accounts/login/') def test_relative_redirect_no_trailing_slash(self): response = self.client.get('/accounts/no_trailing_slash') self.assertRedirects(response, '/accounts/login/') def test_mass_mail_sending(self): "Mass mail is redirected to a dummy outbox during test setup" response = self.client.get('/mass_mail_sending_view/') self.assertEqual(response.status_code, 200) self.assertEqual(len(mail.outbox), 2) self.assertEqual(mail.outbox[0].subject, 'First Test message') self.assertEqual(mail.outbox[0].body, 'This is the first test email') self.assertEqual(mail.outbox[0].from_email, '[email protected]') self.assertEqual(mail.outbox[0].to[0], '[email protected]') self.assertEqual(mail.outbox[0].to[1], '[email protected]') self.assertEqual(mail.outbox[1].subject, 'Second Test message') self.assertEqual(mail.outbox[1].body, 'This is the second test email') self.assertEqual(mail.outbox[1].from_email, '[email protected]') self.assertEqual(mail.outbox[1].to[0], '[email protected]') self.assertEqual(mail.outbox[1].to[1], '[email protected]') def test_exception_following_nested_client_request(self): """ A nested test client request shouldn't clobber exception signals from the outer client request. """ with self.assertRaisesMessage(Exception, 'exception message'): self.client.get('/nesting_exception_view/') def test_response_raises_multi_arg_exception(self): """A request may raise an exception with more than one required arg.""" with self.assertRaises(TwoArgException) as cm: self.client.get('/two_arg_exception/') self.assertEqual(cm.exception.args, ('one', 'two')) def test_uploading_temp_file(self): with tempfile.TemporaryFile() as test_file: response = self.client.post('/upload_view/', data={'temp_file': test_file}) self.assertEqual(response.content, b'temp_file') def test_uploading_named_temp_file(self): test_file = tempfile.NamedTemporaryFile() response = self.client.post('/upload_view/', data={'named_temp_file': test_file}) self.assertEqual(response.content, b'named_temp_file') @override_settings( MIDDLEWARE=['django.middleware.csrf.CsrfViewMiddleware'], ROOT_URLCONF='test_client.urls', ) class CSRFEnabledClientTests(SimpleTestCase): def test_csrf_enabled_client(self): "A client can be instantiated with CSRF checks enabled" csrf_client = Client(enforce_csrf_checks=True) # The normal client allows the post response = self.client.post('/post_view/', {}) self.assertEqual(response.status_code, 200) # The CSRF-enabled client rejects it response = csrf_client.post('/post_view/', {}) self.assertEqual(response.status_code, 403) class CustomTestClient(Client): i_am_customized = "Yes" class CustomTestClientTest(SimpleTestCase): client_class = CustomTestClient def test_custom_test_client(self): """A test case can specify a custom class for self.client.""" self.assertIs(hasattr(self.client, "i_am_customized"), True) def _generic_view(request): return HttpResponse(status=200) @override_settings(ROOT_URLCONF='test_client.urls') class RequestFactoryTest(SimpleTestCase): """Tests for the request factory.""" # A mapping between names of HTTP/1.1 methods and their test views. http_methods_and_views = ( ('get', get_view), ('post', post_view), ('put', _generic_view), ('patch', _generic_view), ('delete', _generic_view), ('head', _generic_view), ('options', _generic_view), ('trace', trace_view), ) request_factory = RequestFactory() def test_request_factory(self): """The request factory implements all the HTTP/1.1 methods.""" for method_name, view in self.http_methods_and_views: method = getattr(self.request_factory, method_name) request = method('/somewhere/') response = view(request) self.assertEqual(response.status_code, 200) def test_get_request_from_factory(self): """ The request factory returns a templated response for a GET request. """ request = self.request_factory.get('/somewhere/') response = get_view(request) self.assertContains(response, 'This is a test') def test_trace_request_from_factory(self): """The request factory returns an echo response for a TRACE request.""" url_path = '/somewhere/' request = self.request_factory.trace(url_path) response = trace_view(request) protocol = request.META["SERVER_PROTOCOL"] echoed_request_line = "TRACE {} {}".format(url_path, protocol) self.assertContains(response, echoed_request_line) @override_settings(ROOT_URLCONF='test_client.urls') class AsyncClientTest(TestCase): async def test_response_resolver_match(self): response = await self.async_client.get('/async_get_view/') self.assertTrue(hasattr(response, 'resolver_match')) self.assertEqual(response.resolver_match.url_name, 'async_get_view') async def test_follow_parameter_not_implemented(self): msg = 'AsyncClient request methods do not accept the follow parameter.' tests = ( 'get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace', ) for method_name in tests: with self.subTest(method=method_name): method = getattr(self.async_client, method_name) with self.assertRaisesMessage(NotImplementedError, msg): await method('/redirect_view/', follow=True) @override_settings(ROOT_URLCONF='test_client.urls') class AsyncRequestFactoryTest(SimpleTestCase): request_factory = AsyncRequestFactory() async def test_request_factory(self): tests = ( 'get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace', ) for method_name in tests: with self.subTest(method=method_name): async def async_generic_view(request): if request.method.lower() != method_name: return HttpResponseNotAllowed(method_name) return HttpResponse(status=200) method = getattr(self.request_factory, method_name) request = method('/somewhere/') response = await async_generic_view(request) self.assertEqual(response.status_code, 200)
f3ed1cbcae87ec8cb8fa04de18a5b233d5cb56a1aef799cea3ae47c053621c38
import json from urllib.parse import urlencode from xml.dom.minidom import parseString from django.contrib.auth.decorators import login_required, permission_required from django.core import mail from django.core.exceptions import ValidationError from django.forms import fields from django.forms.forms import Form from django.forms.formsets import BaseFormSet, formset_factory from django.http import ( HttpResponse, HttpResponseBadRequest, HttpResponseNotAllowed, HttpResponseNotFound, HttpResponseRedirect, ) from django.shortcuts import render from django.template import Context, Template from django.test import Client from django.utils.decorators import method_decorator def get_view(request): "A simple view that expects a GET request, and returns a rendered template" t = Template('This is a test. {{ var }} is the value.', name='GET Template') c = Context({'var': request.GET.get('var', 42)}) return HttpResponse(t.render(c)) async def async_get_view(request): return HttpResponse(b'GET content.') def trace_view(request): """ A simple view that expects a TRACE request and echoes its status line. TRACE requests should not have an entity; the view will return a 400 status response if it is present. """ if request.method.upper() != "TRACE": return HttpResponseNotAllowed("TRACE") elif request.body: return HttpResponseBadRequest("TRACE requests MUST NOT include an entity") else: protocol = request.META["SERVER_PROTOCOL"] t = Template( '{{ method }} {{ uri }} {{ version }}', name="TRACE Template", ) c = Context({ 'method': request.method, 'uri': request.path, 'version': protocol, }) return HttpResponse(t.render(c)) def put_view(request): if request.method == 'PUT': t = Template('Data received: {{ data }} is the body.', name='PUT Template') c = Context({ 'Content-Length': request.META['CONTENT_LENGTH'], 'data': request.body.decode(), }) else: t = Template('Viewing GET page.', name='Empty GET Template') c = Context() return HttpResponse(t.render(c)) def post_view(request): """A view that expects a POST, and returns a different template depending on whether any POST data is available """ if request.method == 'POST': if request.POST: t = Template('Data received: {{ data }} is the value.', name='POST Template') c = Context({'data': request.POST['value']}) else: t = Template('Viewing POST page.', name='Empty POST Template') c = Context() else: t = Template('Viewing GET page.', name='Empty GET Template') c = Context() return HttpResponse(t.render(c)) def json_view(request): """ A view that expects a request with the header 'application/json' and JSON data, which is deserialized and included in the context. """ if request.META.get('CONTENT_TYPE') != 'application/json': return HttpResponse() t = Template('Viewing {} page. With data {{ data }}.'.format(request.method)) data = json.loads(request.body.decode('utf-8')) c = Context({'data': data}) return HttpResponse(t.render(c)) def view_with_header(request): "A view that has a custom header" response = HttpResponse() response.headers['X-DJANGO-TEST'] = 'Slartibartfast' return response def raw_post_view(request): """A view which expects raw XML to be posted and returns content extracted from the XML""" if request.method == 'POST': root = parseString(request.body) first_book = root.firstChild.firstChild title, author = [n.firstChild.nodeValue for n in first_book.childNodes] t = Template("{{ title }} - {{ author }}", name="Book template") c = Context({"title": title, "author": author}) else: t = Template("GET request.", name="Book GET template") c = Context() return HttpResponse(t.render(c)) def redirect_view(request): "A view that redirects all requests to the GET view" if request.GET: query = '?' + urlencode(request.GET, True) else: query = '' return HttpResponseRedirect('/get_view/' + query) def method_saving_307_redirect_query_string_view(request): return HttpResponseRedirect('/post_view/?hello=world', status=307) def method_saving_308_redirect_query_string_view(request): return HttpResponseRedirect('/post_view/?hello=world', status=308) def _post_view_redirect(request, status_code): """Redirect to /post_view/ using the status code.""" redirect_to = request.GET.get('to', '/post_view/') return HttpResponseRedirect(redirect_to, status=status_code) def method_saving_307_redirect_view(request): return _post_view_redirect(request, 307) def method_saving_308_redirect_view(request): return _post_view_redirect(request, 308) def view_with_secure(request): "A view that indicates if the request was secure" response = HttpResponse() response.test_was_secure_request = request.is_secure() response.test_server_port = request.META.get('SERVER_PORT', 80) return response def double_redirect_view(request): "A view that redirects all requests to a redirection view" return HttpResponseRedirect('/permanent_redirect_view/') def bad_view(request): "A view that returns a 404 with some error content" return HttpResponseNotFound('Not found!. This page contains some MAGIC content') TestChoices = ( ('a', 'First Choice'), ('b', 'Second Choice'), ('c', 'Third Choice'), ('d', 'Fourth Choice'), ('e', 'Fifth Choice') ) class TestForm(Form): text = fields.CharField() email = fields.EmailField() value = fields.IntegerField() single = fields.ChoiceField(choices=TestChoices) multi = fields.MultipleChoiceField(choices=TestChoices) def clean(self): cleaned_data = self.cleaned_data if cleaned_data.get("text") == "Raise non-field error": raise ValidationError("Non-field error.") return cleaned_data def form_view(request): "A view that tests a simple form" if request.method == 'POST': form = TestForm(request.POST) if form.is_valid(): t = Template('Valid POST data.', name='Valid POST Template') c = Context() else: t = Template('Invalid POST data. {{ form.errors }}', name='Invalid POST Template') c = Context({'form': form}) else: form = TestForm(request.GET) t = Template('Viewing base form. {{ form }}.', name='Form GET Template') c = Context({'form': form}) return HttpResponse(t.render(c)) def form_view_with_template(request): "A view that tests a simple form" if request.method == 'POST': form = TestForm(request.POST) if form.is_valid(): message = 'POST data OK' else: message = 'POST data has errors' else: form = TestForm() message = 'GET form page' return render(request, 'form_view.html', { 'form': form, 'message': message, }) class BaseTestFormSet(BaseFormSet): def clean(self): """No two email addresses are the same.""" if any(self.errors): # Don't bother validating the formset unless each form is valid return emails = [] for form in self.forms: email = form.cleaned_data['email'] if email in emails: raise ValidationError( "Forms in a set must have distinct email addresses." ) emails.append(email) TestFormSet = formset_factory(TestForm, BaseTestFormSet) def formset_view(request): "A view that tests a simple formset" if request.method == 'POST': formset = TestFormSet(request.POST) if formset.is_valid(): t = Template('Valid POST data.', name='Valid POST Template') c = Context() else: t = Template('Invalid POST data. {{ my_formset.errors }}', name='Invalid POST Template') c = Context({'my_formset': formset}) else: formset = TestForm(request.GET) t = Template('Viewing base formset. {{ my_formset }}.', name='Formset GET Template') c = Context({'my_formset': formset}) return HttpResponse(t.render(c)) @login_required def login_protected_view(request): "A simple view that is login protected." t = Template('This is a login protected test. Username is {{ user.username }}.', name='Login Template') c = Context({'user': request.user}) return HttpResponse(t.render(c)) @login_required(redirect_field_name='redirect_to') def login_protected_view_changed_redirect(request): "A simple view that is login protected with a custom redirect field set" t = Template('This is a login protected test. Username is {{ user.username }}.', name='Login Template') c = Context({'user': request.user}) return HttpResponse(t.render(c)) def _permission_protected_view(request): "A simple view that is permission protected." t = Template('This is a permission protected test. ' 'Username is {{ user.username }}. ' 'Permissions are {{ user.get_all_permissions }}.', name='Permissions Template') c = Context({'user': request.user}) return HttpResponse(t.render(c)) permission_protected_view = permission_required('permission_not_granted')(_permission_protected_view) permission_protected_view_exception = ( permission_required('permission_not_granted', raise_exception=True)(_permission_protected_view) ) class _ViewManager: @method_decorator(login_required) def login_protected_view(self, request): t = Template('This is a login protected test using a method. ' 'Username is {{ user.username }}.', name='Login Method Template') c = Context({'user': request.user}) return HttpResponse(t.render(c)) @method_decorator(permission_required('permission_not_granted')) def permission_protected_view(self, request): t = Template('This is a permission protected test using a method. ' 'Username is {{ user.username }}. ' 'Permissions are {{ user.get_all_permissions }}.', name='Permissions Template') c = Context({'user': request.user}) return HttpResponse(t.render(c)) _view_manager = _ViewManager() login_protected_method_view = _view_manager.login_protected_view permission_protected_method_view = _view_manager.permission_protected_view def session_view(request): "A view that modifies the session" request.session['tobacconist'] = 'hovercraft' t = Template('This is a view that modifies the session.', name='Session Modifying View Template') c = Context() return HttpResponse(t.render(c)) def broken_view(request): """A view which just raises an exception, simulating a broken view.""" raise KeyError("Oops! Looks like you wrote some bad code.") def mail_sending_view(request): mail.EmailMessage( "Test message", "This is a test email", "[email protected]", ['[email protected]', '[email protected]']).send() return HttpResponse("Mail sent") def mass_mail_sending_view(request): m1 = mail.EmailMessage( 'First Test message', 'This is the first test email', '[email protected]', ['[email protected]', '[email protected]']) m2 = mail.EmailMessage( 'Second Test message', 'This is the second test email', '[email protected]', ['[email protected]', '[email protected]']) c = mail.get_connection() c.send_messages([m1, m2]) return HttpResponse("Mail sent") def nesting_exception_view(request): """ A view that uses a nested client to call another view and then raises an exception. """ client = Client() client.get('/get_view/') raise Exception('exception message') def django_project_redirect(request): return HttpResponseRedirect('https://www.djangoproject.com/') def upload_view(request): """Prints keys of request.FILES to the response.""" return HttpResponse(', '.join(request.FILES)) class TwoArgException(Exception): def __init__(self, one, two): pass def two_arg_exception(request): raise TwoArgException('one', 'two')
b280d38814102733c4174097d58801c73d6e306fe2155b40a287c9ae45d79a02
from django.core.exceptions import ImproperlyConfigured from django.core.handlers.wsgi import WSGIHandler, WSGIRequest, get_script_name from django.core.signals import request_finished, request_started from django.db import close_old_connections, connection from django.test import ( RequestFactory, SimpleTestCase, TransactionTestCase, override_settings, ) from django.utils.version import PY37 class HandlerTests(SimpleTestCase): request_factory = RequestFactory() def setUp(self): request_started.disconnect(close_old_connections) def tearDown(self): request_started.connect(close_old_connections) def test_middleware_initialized(self): handler = WSGIHandler() self.assertIsNotNone(handler._middleware_chain) def test_bad_path_info(self): """ A non-UTF-8 path populates PATH_INFO with an URL-encoded path and produces a 404. """ environ = self.request_factory.get('/').environ environ['PATH_INFO'] = '\xed' handler = WSGIHandler() response = handler(environ, lambda *a, **k: None) # The path of the request will be encoded to '/%ED'. self.assertEqual(response.status_code, 404) def test_non_ascii_query_string(self): """ Non-ASCII query strings are properly decoded (#20530, #22996). """ environ = self.request_factory.get('/').environ raw_query_strings = [ b'want=caf%C3%A9', # This is the proper way to encode 'café' b'want=caf\xc3\xa9', # UA forgot to quote bytes b'want=caf%E9', # UA quoted, but not in UTF-8 b'want=caf\xe9', # UA forgot to convert Latin-1 to UTF-8 and to quote (typical of MSIE) ] got = [] for raw_query_string in raw_query_strings: # Simulate http.server.BaseHTTPRequestHandler.parse_request handling of raw request environ['QUERY_STRING'] = str(raw_query_string, 'iso-8859-1') request = WSGIRequest(environ) got.append(request.GET['want']) # %E9 is converted to the Unicode replacement character by parse_qsl self.assertEqual(got, ['café', 'café', 'caf\ufffd', 'café']) def test_non_ascii_cookie(self): """Non-ASCII cookies set in JavaScript are properly decoded (#20557).""" environ = self.request_factory.get('/').environ raw_cookie = 'want="café"'.encode('utf-8').decode('iso-8859-1') environ['HTTP_COOKIE'] = raw_cookie request = WSGIRequest(environ) self.assertEqual(request.COOKIES['want'], "café") def test_invalid_unicode_cookie(self): """ Invalid cookie content should result in an absent cookie, but not in a crash while trying to decode it (#23638). """ environ = self.request_factory.get('/').environ environ['HTTP_COOKIE'] = 'x=W\x03c(h]\x8e' request = WSGIRequest(environ) # We don't test COOKIES content, as the result might differ between # Python version because parsing invalid content became stricter in # latest versions. self.assertIsInstance(request.COOKIES, dict) @override_settings(ROOT_URLCONF='handlers.urls') def test_invalid_multipart_boundary(self): """ Invalid boundary string should produce a "Bad Request" response, not a server error (#23887). """ environ = self.request_factory.post('/malformed_post/').environ environ['CONTENT_TYPE'] = 'multipart/form-data; boundary=WRONG\x07' handler = WSGIHandler() response = handler(environ, lambda *a, **k: None) # Expect "bad request" response self.assertEqual(response.status_code, 400) @override_settings(ROOT_URLCONF='handlers.urls', MIDDLEWARE=[]) class TransactionsPerRequestTests(TransactionTestCase): available_apps = [] def test_no_transaction(self): response = self.client.get('/in_transaction/') self.assertContains(response, 'False') def test_auto_transaction(self): old_atomic_requests = connection.settings_dict['ATOMIC_REQUESTS'] try: connection.settings_dict['ATOMIC_REQUESTS'] = True response = self.client.get('/in_transaction/') finally: connection.settings_dict['ATOMIC_REQUESTS'] = old_atomic_requests self.assertContains(response, 'True') async def test_auto_transaction_async_view(self): old_atomic_requests = connection.settings_dict['ATOMIC_REQUESTS'] try: connection.settings_dict['ATOMIC_REQUESTS'] = True msg = 'You cannot use ATOMIC_REQUESTS with async views.' with self.assertRaisesMessage(RuntimeError, msg): await self.async_client.get('/async_regular/') finally: connection.settings_dict['ATOMIC_REQUESTS'] = old_atomic_requests def test_no_auto_transaction(self): old_atomic_requests = connection.settings_dict['ATOMIC_REQUESTS'] try: connection.settings_dict['ATOMIC_REQUESTS'] = True response = self.client.get('/not_in_transaction/') finally: connection.settings_dict['ATOMIC_REQUESTS'] = old_atomic_requests self.assertContains(response, 'False') @override_settings(ROOT_URLCONF='handlers.urls') class SignalsTests(SimpleTestCase): def setUp(self): self.signals = [] self.signaled_environ = None request_started.connect(self.register_started) request_finished.connect(self.register_finished) def tearDown(self): request_started.disconnect(self.register_started) request_finished.disconnect(self.register_finished) def register_started(self, **kwargs): self.signals.append('started') self.signaled_environ = kwargs.get('environ') def register_finished(self, **kwargs): self.signals.append('finished') def test_request_signals(self): response = self.client.get('/regular/') self.assertEqual(self.signals, ['started', 'finished']) self.assertEqual(response.content, b"regular content") self.assertEqual(self.signaled_environ, response.wsgi_request.environ) def test_request_signals_streaming_response(self): response = self.client.get('/streaming/') self.assertEqual(self.signals, ['started']) self.assertEqual(b''.join(response.streaming_content), b"streaming content") self.assertEqual(self.signals, ['started', 'finished']) def empty_middleware(get_response): pass @override_settings(ROOT_URLCONF='handlers.urls') class HandlerRequestTests(SimpleTestCase): request_factory = RequestFactory() def test_async_view(self): """Calling an async view down the normal synchronous path.""" response = self.client.get('/async_regular/') self.assertEqual(response.status_code, 200) def test_suspiciousop_in_view_returns_400(self): response = self.client.get('/suspicious/') self.assertEqual(response.status_code, 400) def test_bad_request_in_view_returns_400(self): response = self.client.get('/bad_request/') self.assertEqual(response.status_code, 400) def test_invalid_urls(self): response = self.client.get('~%A9helloworld') self.assertEqual(response.status_code, 404) self.assertEqual(response.context['request_path'], '/~%25A9helloworld' if PY37 else '/%7E%25A9helloworld') response = self.client.get('d%aao%aaw%aan%aal%aao%aaa%aad%aa/') self.assertEqual(response.context['request_path'], '/d%25AAo%25AAw%25AAn%25AAl%25AAo%25AAa%25AAd%25AA') response = self.client.get('/%E2%99%E2%99%A5/') self.assertEqual(response.context['request_path'], '/%25E2%2599%E2%99%A5/') response = self.client.get('/%E2%98%8E%E2%A9%E2%99%A5/') self.assertEqual(response.context['request_path'], '/%E2%98%8E%25E2%25A9%E2%99%A5/') def test_environ_path_info_type(self): environ = self.request_factory.get('/%E2%A8%87%87%A5%E2%A8%A0').environ self.assertIsInstance(environ['PATH_INFO'], str) def test_handle_accepts_httpstatus_enum_value(self): def start_response(status, headers): start_response.status = status environ = self.request_factory.get('/httpstatus_enum/').environ WSGIHandler()(environ, start_response) self.assertEqual(start_response.status, '200 OK') @override_settings(MIDDLEWARE=['handlers.tests.empty_middleware']) def test_middleware_returns_none(self): msg = 'Middleware factory handlers.tests.empty_middleware returned None.' with self.assertRaisesMessage(ImproperlyConfigured, msg): self.client.get('/') def test_no_response(self): msg = "The view %s didn't return an HttpResponse object. It returned None instead." tests = ( ('/no_response_fbv/', 'handlers.views.no_response'), ('/no_response_cbv/', 'handlers.views.NoResponse.__call__'), ) for url, view in tests: with self.subTest(url=url), self.assertRaisesMessage(ValueError, msg % view): self.client.get(url) class ScriptNameTests(SimpleTestCase): def test_get_script_name(self): # Regression test for #23173 # Test first without PATH_INFO script_name = get_script_name({'SCRIPT_URL': '/foobar/'}) self.assertEqual(script_name, '/foobar/') script_name = get_script_name({'SCRIPT_URL': '/foobar/', 'PATH_INFO': '/'}) self.assertEqual(script_name, '/foobar') def test_get_script_name_double_slashes(self): """ WSGI squashes multiple successive slashes in PATH_INFO, get_script_name should take that into account when forming SCRIPT_NAME (#17133). """ script_name = get_script_name({ 'SCRIPT_URL': '/mst/milestones//accounts/login//help', 'PATH_INFO': '/milestones/accounts/login/help', }) self.assertEqual(script_name, '/mst') @override_settings(ROOT_URLCONF='handlers.urls') class AsyncHandlerRequestTests(SimpleTestCase): """Async variants of the normal handler request tests.""" async def test_sync_view(self): """Calling a sync view down the asynchronous path.""" response = await self.async_client.get('/regular/') self.assertEqual(response.status_code, 200) async def test_async_view(self): """Calling an async view down the asynchronous path.""" response = await self.async_client.get('/async_regular/') self.assertEqual(response.status_code, 200) async def test_suspiciousop_in_view_returns_400(self): response = await self.async_client.get('/suspicious/') self.assertEqual(response.status_code, 400) async def test_bad_request_in_view_returns_400(self): response = await self.async_client.get('/bad_request/') self.assertEqual(response.status_code, 400) async def test_no_response(self): msg = ( "The view handlers.views.no_response didn't return an " "HttpResponse object. It returned None instead." ) with self.assertRaisesMessage(ValueError, msg): await self.async_client.get('/no_response_fbv/') async def test_unawaited_response(self): msg = ( "The view handlers.views.CoroutineClearingView.__call__ didn't" " return an HttpResponse object. It returned an unawaited" " coroutine instead. You may need to add an 'await'" " into your view." ) with self.assertRaisesMessage(ValueError, msg): await self.async_client.get('/unawaited/')
635316d77b554f4689a43bdb2b5d0d58963e2e4d9e7234834fb62e39852ad639
from django.urls import path from . import views urlpatterns = [ path('regular/', views.regular), path('async_regular/', views.async_regular), path('no_response_fbv/', views.no_response), path('no_response_cbv/', views.NoResponse()), path('streaming/', views.streaming), path('in_transaction/', views.in_transaction), path('not_in_transaction/', views.not_in_transaction), path('bad_request/', views.bad_request), path('suspicious/', views.suspicious), path('malformed_post/', views.malformed_post), path('httpstatus_enum/', views.httpstatus_enum), path('unawaited/', views.async_unawaited), ]
17bda7516799561c6b5c007970b33453e7e0873370c412947488cdc0c139a90d
import asyncio from http import HTTPStatus from django.core.exceptions import BadRequest, SuspiciousOperation from django.db import connection, transaction from django.http import HttpResponse, StreamingHttpResponse from django.views.decorators.csrf import csrf_exempt def regular(request): return HttpResponse(b"regular content") def no_response(request): pass class NoResponse: def __call__(self, request): pass def streaming(request): return StreamingHttpResponse([b"streaming", b" ", b"content"]) def in_transaction(request): return HttpResponse(str(connection.in_atomic_block)) @transaction.non_atomic_requests def not_in_transaction(request): return HttpResponse(str(connection.in_atomic_block)) def bad_request(request): raise BadRequest() def suspicious(request): raise SuspiciousOperation('dubious') @csrf_exempt def malformed_post(request): request.POST return HttpResponse() def httpstatus_enum(request): return HttpResponse(status=HTTPStatus.OK) async def async_regular(request): return HttpResponse(b'regular content') class CoroutineClearingView: def __call__(self, request): """Return an unawaited coroutine (common error for async views).""" # Store coroutine to suppress 'unawaited' warning message self._unawaited_coroutine = asyncio.sleep(0) return self._unawaited_coroutine def __del__(self): self._unawaited_coroutine.close() async_unawaited = CoroutineClearingView()
aeeb86e8deb7ae22df55b8aff21fc0bf4a8c2cb5d3fb5e76c2f9f41f2af1980f
import datetime import itertools import unittest from copy import copy from unittest import mock from django.core.management.color import no_style from django.db import ( DatabaseError, DataError, IntegrityError, OperationalError, connection, ) from django.db.models import ( CASCADE, PROTECT, AutoField, BigAutoField, BigIntegerField, BinaryField, BooleanField, CharField, CheckConstraint, DateField, DateTimeField, ForeignKey, ForeignObject, Index, IntegerField, ManyToManyField, Model, OneToOneField, PositiveIntegerField, Q, SlugField, SmallAutoField, SmallIntegerField, TextField, TimeField, UniqueConstraint, UUIDField, ) from django.db.transaction import TransactionManagementError, atomic from django.test import ( TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import CaptureQueriesContext, isolate_apps from django.utils import timezone from .fields import ( CustomManyToManyField, InheritedManyToManyField, MediumBlobField, ) from .models import ( Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, AuthorWithIndexedName, AuthorWithIndexedNameAndBirthday, AuthorWithUniqueName, AuthorWithUniqueNameAndBirthday, Book, BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK, Node, Note, NoteRename, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps, ) class SchemaTests(TransactionTestCase): """ Tests for the schema-alteration code. Be aware that these tests are more liable than most to false results, as sometimes the code to check if a test has worked is almost as complex as the code it is testing. """ available_apps = [] models = [ Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Node, Note, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, ] # Utility functions def setUp(self): # local_models should contain test dependent model classes that will be # automatically removed from the app cache on test tear down. self.local_models = [] # isolated_local_models contains models that are in test methods # decorated with @isolate_apps. self.isolated_local_models = [] def tearDown(self): # Delete any tables made for our models self.delete_tables() new_apps.clear_cache() for model in new_apps.get_models(): model._meta._expire_cache() if 'schema' in new_apps.all_models: for model in self.local_models: for many_to_many in model._meta.many_to_many: through = many_to_many.remote_field.through if through and through._meta.auto_created: del new_apps.all_models['schema'][through._meta.model_name] del new_apps.all_models['schema'][model._meta.model_name] if self.isolated_local_models: with connection.schema_editor() as editor: for model in self.isolated_local_models: editor.delete_model(model) def delete_tables(self): "Deletes all model tables for our models for a clean test environment" converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: connection.disable_constraint_checking() table_names = connection.introspection.table_names() if connection.features.ignores_table_name_case: table_names = [table_name.lower() for table_name in table_names] for model in itertools.chain(SchemaTests.models, self.local_models): tbl = converter(model._meta.db_table) if connection.features.ignores_table_name_case: tbl = tbl.lower() if tbl in table_names: editor.delete_model(model) table_names.remove(tbl) connection.enable_constraint_checking() def column_classes(self, model): with connection.cursor() as cursor: columns = { d[0]: (connection.introspection.get_field_type(d[1], d), d) for d in connection.introspection.get_table_description( cursor, model._meta.db_table, ) } # SQLite has a different format for field_type for name, (type, desc) in columns.items(): if isinstance(type, tuple): columns[name] = (type[0], desc) # SQLite also doesn't error properly if not columns: raise DatabaseError("Table does not exist (empty pragma)") return columns def get_primary_key(self, table): with connection.cursor() as cursor: return connection.introspection.get_primary_key_column(cursor, table) def get_indexes(self, table): """ Get the indexes on the table using a new cursor. """ with connection.cursor() as cursor: return [ c['columns'][0] for c in connection.introspection.get_constraints(cursor, table).values() if c['index'] and len(c['columns']) == 1 ] def get_uniques(self, table): with connection.cursor() as cursor: return [ c['columns'][0] for c in connection.introspection.get_constraints(cursor, table).values() if c['unique'] and len(c['columns']) == 1 ] def get_constraints(self, table): """ Get the constraints on a table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def get_constraints_for_column(self, model, column_name): constraints = self.get_constraints(model._meta.db_table) constraints_for_column = [] for name, details in constraints.items(): if details['columns'] == [column_name]: constraints_for_column.append(name) return sorted(constraints_for_column) def check_added_field_default(self, schema_editor, model, field, field_name, expected_default, cast_function=None): with connection.cursor() as cursor: schema_editor.add_field(model, field) cursor.execute("SELECT {} FROM {};".format(field_name, model._meta.db_table)) database_default = cursor.fetchall()[0][0] if cast_function and not type(database_default) == type(expected_default): database_default = cast_function(database_default) self.assertEqual(database_default, expected_default) def get_constraints_count(self, table, column, fk_to): """ Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the number of foreign keys, unique constraints, and indexes on `table`.`column`. The `fk_to` argument is a 2-tuple specifying the expected foreign key relationship's (table, column). """ with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, table) counts = {'fks': 0, 'uniques': 0, 'indexes': 0} for c in constraints.values(): if c['columns'] == [column]: if c['foreign_key'] == fk_to: counts['fks'] += 1 if c['unique']: counts['uniques'] += 1 elif c['index']: counts['indexes'] += 1 return counts def get_column_collation(self, table, column): with connection.cursor() as cursor: return next( f.collation for f in connection.introspection.get_table_description(cursor, table) if f.name == column ) def assertIndexOrder(self, table, index, order): constraints = self.get_constraints(table) self.assertIn(index, constraints) index_orders = constraints[index]['orders'] self.assertTrue(all(val == expected for val, expected in zip(index_orders, order))) def assertForeignKeyExists(self, model, column, expected_fk_table, field='id'): """ Fail if the FK constraint on `model.Meta.db_table`.`column` to `expected_fk_table`.id doesn't exist. """ constraints = self.get_constraints(model._meta.db_table) constraint_fk = None for details in constraints.values(): if details['columns'] == [column] and details['foreign_key']: constraint_fk = details['foreign_key'] break self.assertEqual(constraint_fk, (expected_fk_table, field)) def assertForeignKeyNotExists(self, model, column, expected_fk_table): with self.assertRaises(AssertionError): self.assertForeignKeyExists(model, column, expected_fk_table) # Tests def test_creation_deletion(self): """ Tries creating a model's table, and then deleting it. """ with connection.schema_editor() as editor: # Create the table editor.create_model(Author) # The table is there list(Author.objects.all()) # Clean up that table editor.delete_model(Author) # No deferred SQL should be left over. self.assertEqual(editor.deferred_sql, []) # The table is gone with self.assertRaises(DatabaseError): list(Author.objects.all()) @skipUnlessDBFeature('supports_foreign_keys') def test_fk(self): "Creating tables out of FK order, then repointing, works" # Create the table with connection.schema_editor() as editor: editor.create_model(Book) editor.create_model(Author) editor.create_model(Tag) # Initial tables are there list(Author.objects.all()) list(Book.objects.all()) # Make sure the FK constraint is present with self.assertRaises(IntegrityError): Book.objects.create( author_id=1, title="Much Ado About Foreign Keys", pub_date=datetime.datetime.now(), ) # Repoint the FK constraint old_field = Book._meta.get_field("author") new_field = ForeignKey(Tag, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) self.assertForeignKeyExists(Book, 'author_id', 'schema_tag') @skipUnlessDBFeature('can_create_inline_fk') def test_inline_fk(self): # Create some tables. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) editor.create_model(Note) self.assertForeignKeyNotExists(Note, 'book_id', 'schema_book') # Add a foreign key from one to the other. with connection.schema_editor() as editor: new_field = ForeignKey(Book, CASCADE) new_field.set_attributes_from_name('book') editor.add_field(Note, new_field) self.assertForeignKeyExists(Note, 'book_id', 'schema_book') # Creating a FK field with a constraint uses a single statement without # a deferred ALTER TABLE. self.assertFalse([ sql for sql in (str(statement) for statement in editor.deferred_sql) if sql.startswith('ALTER TABLE') and 'ADD CONSTRAINT' in sql ]) @skipUnlessDBFeature('can_create_inline_fk') def test_add_inline_fk_update_data(self): with connection.schema_editor() as editor: editor.create_model(Node) # Add an inline foreign key and update data in the same transaction. new_field = ForeignKey(Node, CASCADE, related_name='new_fk', null=True) new_field.set_attributes_from_name('new_parent_fk') parent = Node.objects.create() with connection.schema_editor() as editor: editor.add_field(Node, new_field) editor.execute('UPDATE schema_node SET new_parent_fk_id = %s;', [parent.pk]) assertIndex = ( self.assertIn if connection.features.indexes_foreign_keys else self.assertNotIn ) assertIndex('new_parent_fk_id', self.get_indexes(Node._meta.db_table)) @skipUnlessDBFeature( 'can_create_inline_fk', 'allows_multiple_constraints_on_same_fields', ) @isolate_apps('schema') def test_add_inline_fk_index_update_data(self): class Node(Model): class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Node) # Add an inline foreign key, update data, and an index in the same # transaction. new_field = ForeignKey(Node, CASCADE, related_name='new_fk', null=True) new_field.set_attributes_from_name('new_parent_fk') parent = Node.objects.create() with connection.schema_editor() as editor: editor.add_field(Node, new_field) Node._meta.add_field(new_field) editor.execute('UPDATE schema_node SET new_parent_fk_id = %s;', [parent.pk]) editor.add_index(Node, Index(fields=['new_parent_fk'], name='new_parent_inline_fk_idx')) assertIndex = ( self.assertIn if connection.features.indexes_foreign_keys else self.assertNotIn ) assertIndex('new_parent_fk_id', self.get_indexes(Node._meta.db_table)) @skipUnlessDBFeature('supports_foreign_keys') def test_char_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorCharFieldWithIndex) # Change CharField to FK old_field = AuthorCharFieldWithIndex._meta.get_field('char_field') new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name('char_field') with connection.schema_editor() as editor: editor.alter_field(AuthorCharFieldWithIndex, old_field, new_field, strict=True) self.assertForeignKeyExists(AuthorCharFieldWithIndex, 'char_field_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') @skipUnlessDBFeature('supports_index_on_text_field') def test_text_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorTextFieldWithIndex) # Change TextField to FK old_field = AuthorTextFieldWithIndex._meta.get_field('text_field') new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name('text_field') with connection.schema_editor() as editor: editor.alter_field(AuthorTextFieldWithIndex, old_field, new_field, strict=True) self.assertForeignKeyExists(AuthorTextFieldWithIndex, 'text_field_id', 'schema_author') @isolate_apps('schema') def test_char_field_pk_to_auto_field(self): class Foo(Model): id = CharField(max_length=255, primary_key=True) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field('id') new_field = AutoField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Foo with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) @skipUnlessDBFeature('supports_foreign_keys') def test_fk_to_proxy(self): "Creating a FK to a proxy model creates database constraints." class AuthorProxy(Author): class Meta: app_label = 'schema' apps = new_apps proxy = True class AuthorRef(Model): author = ForeignKey(AuthorProxy, on_delete=CASCADE) class Meta: app_label = 'schema' apps = new_apps self.local_models = [AuthorProxy, AuthorRef] # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorRef) self.assertForeignKeyExists(AuthorRef, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_fk_db_constraint(self): "The db_constraint parameter is respected" # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Author) editor.create_model(BookWeak) # Initial tables are there list(Author.objects.all()) list(Tag.objects.all()) list(BookWeak.objects.all()) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') # Make a db_constraint=False FK new_field = ForeignKey(Tag, CASCADE, db_constraint=False) new_field.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.add_field(Author, new_field) self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag') # Alter to one with a constraint new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) self.assertForeignKeyExists(Author, 'tag_id', 'schema_tag') # Alter to one without a constraint again new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field2, new_field, strict=True) self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag') @isolate_apps('schema') def test_no_db_constraint_added_during_primary_key_change(self): """ When a primary key that's pointed to by a ForeignKey with db_constraint=False is altered, a foreign key constraint isn't added. """ class Author(Model): class Meta: app_label = 'schema' class BookWeak(Model): author = ForeignKey(Author, CASCADE, db_constraint=False) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWeak) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') old_field = Author._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.model = Author new_field.set_attributes_from_name('id') # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') def _test_m2m_db_constraint(self, M2MFieldClass): class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(LocalAuthorWithM2M) # Initial tables are there list(LocalAuthorWithM2M.objects.all()) list(Tag.objects.all()) # Make a db_constraint=False FK new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False) new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) self.assertForeignKeyNotExists(new_field.remote_field.through, 'tag_id', 'schema_tag') @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint(self): self._test_m2m_db_constraint(ManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_custom(self): self._test_m2m_db_constraint(CustomManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_inherited(self): self._test_m2m_db_constraint(InheritedManyToManyField) def test_add_field(self): """ Tests adding fields to models """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add the new field new_field = IntegerField(null=True) new_field.set_attributes_from_name("age") with CaptureQueriesContext(connection) as ctx, connection.schema_editor() as editor: editor.add_field(Author, new_field) drop_default_sql = editor.sql_alter_column_no_default % { 'column': editor.quote_name(new_field.name), } self.assertFalse(any(drop_default_sql in query['sql'] for query in ctx.captured_queries)) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['age'][0], connection.features.introspected_field_types['IntegerField']) self.assertTrue(columns['age'][1][6]) def test_add_field_remove_field(self): """ Adding a field and removing it removes all deferred sql referring to it. """ with connection.schema_editor() as editor: # Create a table with a unique constraint on the slug field. editor.create_model(Tag) # Remove the slug column. editor.remove_field(Tag, Tag._meta.get_field('slug')) self.assertEqual(editor.deferred_sql, []) def test_add_field_temp_default(self): """ Tests adding fields to models with a temporary default """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = CharField(max_length=30, default="Godwin") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['surname'][0], connection.features.introspected_field_types['CharField']) self.assertEqual(columns['surname'][1][6], connection.features.interprets_empty_strings_as_nulls) def test_add_field_temp_default_boolean(self): """ Tests adding fields to models with a temporary default where the default is False. (#21783) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = BooleanField(default=False) new_field.set_attributes_from_name("awesome") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # BooleanField are stored as TINYINT(1) on MySQL. field_type = columns['awesome'][0] self.assertEqual(field_type, connection.features.introspected_field_types['BooleanField']) def test_add_field_default_transform(self): """ Tests adding fields to models with a default that is not directly valid in the database (#22581) """ class TestTransformField(IntegerField): # Weird field that saves the count of items in its value def get_default(self): return self.default def get_prep_value(self, value): if value is None: return 0 return len(value) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add the field with a default it needs to cast (to string in this case) new_field = TestTransformField(default={1: 2}) new_field.set_attributes_from_name("thing") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is there columns = self.column_classes(Author) field_type, field_info = columns['thing'] self.assertEqual(field_type, connection.features.introspected_field_types['IntegerField']) # Make sure the values were transformed correctly self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2) def test_add_field_binary(self): """ Tests binary fields get a sane default (#22851) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field new_field = BinaryField(blank=True) new_field.set_attributes_from_name("bits") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # MySQL annoyingly uses the same backend, so it'll come back as one of # these two types. self.assertIn(columns['bits'][0], ("BinaryField", "TextField")) @unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific") def test_add_binaryfield_mediumblob(self): """ Test adding a custom-sized binary field on MySQL (#24846). """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field with default new_field = MediumBlobField(blank=True, default=b'123') new_field.set_attributes_from_name('bits') with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # Introspection treats BLOBs as TextFields self.assertEqual(columns['bits'][0], "TextField") def test_alter(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], connection.features.introspected_field_types['CharField']) self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) # Alter the name field to a TextField old_field = Author._meta.get_field("name") new_field = TextField(null=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertTrue(columns['name'][1][6]) # Change nullability again new_field2 = TextField(null=False) new_field2.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) def test_alter_auto_field_to_integer_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to IntegerField old_field = Author._meta.get_field('id') new_field = IntegerField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Now that ID is an IntegerField, the database raises an error if it # isn't provided. if not connection.features.supports_unspecified_pk: with self.assertRaises(DatabaseError): Author.objects.create() def test_alter_auto_field_to_char_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to CharField old_field = Author._meta.get_field('id') new_field = CharField(primary_key=True, max_length=50) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) @isolate_apps('schema') def test_alter_auto_field_quoted_db_column(self): class Foo(Model): id = AutoField(primary_key=True, db_column='"quoted_id"') class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.model = Foo new_field.db_column = '"quoted_id"' new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) Foo.objects.create() def test_alter_not_unique_field_to_primary_key(self): if ( connection.vendor == 'mysql' and connection.mysql_is_mariadb and (10, 4, 3) < connection.mysql_version < (10, 5, 2) ): self.skipTest('https://jira.mariadb.org/browse/MDEV-19598') # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) # Change UUIDField to primary key. old_field = Author._meta.get_field('uuid') new_field = UUIDField(primary_key=True) new_field.set_attributes_from_name('uuid') new_field.model = Author with connection.schema_editor() as editor: editor.remove_field(Author, Author._meta.get_field('id')) editor.alter_field(Author, old_field, new_field, strict=True) @isolate_apps('schema') def test_alter_primary_key_quoted_db_table(self): class Foo(Model): class Meta: app_label = 'schema' db_table = '"foo"' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.model = Foo new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) Foo.objects.create() def test_alter_text_field(self): # Regression for "BLOB/TEXT column 'info' can't have a default value") # on MySQL. # Create the table with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("info") new_field = TextField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) @skipUnlessDBFeature('can_defer_constraint_checks', 'can_rollback_ddl') def test_alter_fk_checks_deferred_constraints(self): """ #25492 - Altering a foreign key's structure and data in the same transaction. """ with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field('parent') new_field = ForeignKey(Node, CASCADE) new_field.set_attributes_from_name('parent') parent = Node.objects.create() with connection.schema_editor() as editor: # Update the parent FK to create a deferred constraint check. Node.objects.update(parent=parent) editor.alter_field(Node, old_field, new_field, strict=True) def test_alter_text_field_to_date_field(self): """ #25002 - Test conversion of text field to date field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05') old_field = Note._meta.get_field('info') new_field = DateField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_datetime_field(self): """ #25002 - Test conversion of text field to datetime field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05 3:16:17.4567') old_field = Note._meta.get_field('info') new_field = DateTimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_time_field(self): """ #25002 - Test conversion of text field to time field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='3:16:17.4567') old_field = Note._meta.get_field('info') new_field = TimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_alter_textual_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = CharField(max_length=50) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): Note.objects.create(info=None) def test_alter_numeric_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(UniqueTest) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='aaa') old_field = UniqueTest._meta.get_field("year") new_field = BigIntegerField() new_field.set_attributes_from_name("year") with connection.schema_editor() as editor: editor.alter_field(UniqueTest, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='bbb') def test_alter_null_to_not_null(self): """ #23609 - Tests handling of default values when altering from NULL to NOT NULL. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertTrue(columns['height'][1][6]) # Create some test data Author.objects.create(name='Not null author', height=12) Author.objects.create(name='Null author') # Verify null value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertIsNone(Author.objects.get(name='Null author').height) # Alter the height field to NOT NULL with default old_field = Author._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertFalse(columns['height'][1][6]) # Verify default value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertEqual(Author.objects.get(name='Null author').height, 42) def test_alter_charfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a CharField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change the CharField to null old_field = Author._meta.get_field('name') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific') def test_alter_char_field_decrease_length(self): # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) Author.objects.create(name='x' * 255) # Change max_length of CharField. old_field = Author._meta.get_field('name') new_field = CharField(max_length=254) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: msg = 'value too long for type character varying(254)' with self.assertRaisesMessage(DataError, msg): editor.alter_field(Author, old_field, new_field, strict=True) @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific') def test_alter_field_with_custom_db_type(self): from django.contrib.postgres.fields import ArrayField class Foo(Model): field = ArrayField(CharField(max_length=255)) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field('field') new_field = ArrayField(CharField(max_length=16)) new_field.set_attributes_from_name('field') new_field.model = Foo with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) @isolate_apps('schema') @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific') def test_alter_array_field_decrease_base_field_length(self): from django.contrib.postgres.fields import ArrayField class ArrayModel(Model): field = ArrayField(CharField(max_length=16)) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(ArrayModel) self.isolated_local_models = [ArrayModel] ArrayModel.objects.create(field=['x' * 16]) old_field = ArrayModel._meta.get_field('field') new_field = ArrayField(CharField(max_length=15)) new_field.set_attributes_from_name('field') new_field.model = ArrayModel with connection.schema_editor() as editor: msg = 'value too long for type character varying(15)' with self.assertRaisesMessage(DataError, msg): editor.alter_field(ArrayModel, old_field, new_field, strict=True) @isolate_apps('schema') @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific') def test_alter_array_field_decrease_nested_base_field_length(self): from django.contrib.postgres.fields import ArrayField class ArrayModel(Model): field = ArrayField(ArrayField(CharField(max_length=16))) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(ArrayModel) self.isolated_local_models = [ArrayModel] ArrayModel.objects.create(field=[['x' * 16]]) old_field = ArrayModel._meta.get_field('field') new_field = ArrayField(ArrayField(CharField(max_length=15))) new_field.set_attributes_from_name('field') new_field.model = ArrayModel with connection.schema_editor() as editor: msg = 'value too long for type character varying(15)' with self.assertRaisesMessage(DataError, msg): editor.alter_field(ArrayModel, old_field, new_field, strict=True) def test_alter_textfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a TextField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Note) # Change the TextField to null old_field = Note._meta.get_field('info') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) @skipUnlessDBFeature('supports_combined_alters') def test_alter_null_to_not_null_keeping_default(self): """ #23738 - Can change a nullable field with default to non-nullable with the same default. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) # Ensure the field is right to begin with columns = self.column_classes(AuthorWithDefaultHeight) self.assertTrue(columns['height'][1][6]) # Alter the height field to NOT NULL keeping the previous default old_field = AuthorWithDefaultHeight._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(AuthorWithDefaultHeight) self.assertFalse(columns['height'][1][6]) @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk(self): """ Tests altering of FKs """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], connection.features.introspected_field_types['IntegerField']) self.assertForeignKeyExists(Book, 'author_id', 'schema_author') # Alter the FK old_field = Book._meta.get_field("author") new_field = ForeignKey(Author, CASCADE, editable=False) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], connection.features.introspected_field_types['IntegerField']) self.assertForeignKeyExists(Book, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_to_fk(self): """ #24447 - Tests adding a FK constraint for an existing column """ class LocalBook(Model): author = IntegerField() title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBook] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBook) # Ensure no FK constraint exists constraints = self.get_constraints(LocalBook._meta.db_table) for details in constraints.values(): if details['foreign_key']: self.fail('Found an unexpected FK constraint to %s' % details['columns']) old_field = LocalBook._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(LocalBook, old_field, new_field, strict=True) self.assertForeignKeyExists(LocalBook, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_o2o_to_fk(self): """ #24163 - Tests altering of OneToOneField to ForeignKey """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) # Ensure the field is right to begin with columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], connection.features.introspected_field_types['IntegerField']) # Ensure the field is unique author = Author.objects.create(name="Joe") BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) BookWithO2O.objects.all().delete() self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author') # Alter the OneToOneField to ForeignKey old_field = BookWithO2O._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], connection.features.introspected_field_types['IntegerField']) # Ensure the field is not unique anymore Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) self.assertForeignKeyExists(Book, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk_to_o2o(self): """ #24163 - Tests altering of ForeignKey to OneToOneField """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], connection.features.introspected_field_types['IntegerField']) # Ensure the field is not unique author = Author.objects.create(name="Joe") Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) Book.objects.all().delete() self.assertForeignKeyExists(Book, 'author_id', 'schema_author') # Alter the ForeignKey to OneToOneField old_field = Book._meta.get_field("author") new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], connection.features.introspected_field_types['IntegerField']) # Ensure the field is unique now BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author') def test_alter_field_fk_to_o2o(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = 1 if connection.features.supports_foreign_keys else 0 expected_indexes = 1 if connection.features.indexes_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual( counts, {'fks': expected_fks, 'uniques': 0, 'indexes': expected_indexes}, ) old_field = Book._meta.get_field('author') new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The index on ForeignKey is replaced with a unique constraint for OneToOneField. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) def test_alter_field_fk_keeps_index(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = 1 if connection.features.supports_foreign_keys else 0 expected_indexes = 1 if connection.features.indexes_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual( counts, {'fks': expected_fks, 'uniques': 0, 'indexes': expected_indexes}, ) old_field = Book._meta.get_field('author') # on_delete changed from CASCADE. new_field = ForeignKey(Author, PROTECT) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The index remains. self.assertEqual( counts, {'fks': expected_fks, 'uniques': 0, 'indexes': expected_indexes}, ) def test_alter_field_o2o_to_fk(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) old_field = BookWithO2O._meta.get_field('author') new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint on OneToOneField is replaced with an index for ForeignKey. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) def test_alter_field_o2o_keeps_unique(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) old_field = BookWithO2O._meta.get_field('author') # on_delete changed from CASCADE. new_field = OneToOneField(Author, PROTECT) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint remains. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) @skipUnlessDBFeature('ignores_table_name_case') def test_alter_db_table_case(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Alter the case of the table old_table_name = Author._meta.db_table with connection.schema_editor() as editor: editor.alter_db_table(Author, old_table_name, old_table_name.upper()) def test_alter_implicit_id_to_explicit(self): """ Should be able to convert an implicit "id" field to an explicit "id" primary key field. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # This will fail if DROP DEFAULT is inadvertently executed on this # field which drops the id sequence, at least on PostgreSQL. Author.objects.create(name='Foo') Author.objects.create(name='Bar') def test_alter_autofield_pk_to_bigautofield_pk_sequence_owner(self): """ Converting an implicit PK to BigAutoField(primary_key=True) should keep a sequence owner on PostgreSQL. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) Author.objects.create(name='Foo', pk=1) with connection.cursor() as cursor: sequence_reset_sqls = connection.ops.sequence_reset_sql(no_style(), [Author]) if sequence_reset_sqls: cursor.execute(sequence_reset_sqls[0]) # Fail on PostgreSQL if sequence is missing an owner. self.assertIsNotNone(Author.objects.create(name='Bar')) def test_alter_autofield_pk_to_smallautofield_pk_sequence_owner(self): """ Converting an implicit PK to SmallAutoField(primary_key=True) should keep a sequence owner on PostgreSQL. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field('id') new_field = SmallAutoField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) Author.objects.create(name='Foo', pk=1) with connection.cursor() as cursor: sequence_reset_sqls = connection.ops.sequence_reset_sql(no_style(), [Author]) if sequence_reset_sqls: cursor.execute(sequence_reset_sqls[0]) # Fail on PostgreSQL if sequence is missing an owner. self.assertIsNotNone(Author.objects.create(name='Bar')) def test_alter_int_pk_to_autofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to AutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field('i') new_field = AutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # A model representing the updated model. class IntegerPKToAutoField(Model): i = AutoField(primary_key=True) j = IntegerField(unique=True) class Meta: app_label = 'schema' apps = new_apps db_table = IntegerPK._meta.db_table # An id (i) is generated by the database. obj = IntegerPKToAutoField.objects.create(j=1) self.assertIsNotNone(obj.i) def test_alter_int_pk_to_bigautofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to BigAutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field('i') new_field = BigAutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # A model representing the updated model. class IntegerPKToBigAutoField(Model): i = BigAutoField(primary_key=True) j = IntegerField(unique=True) class Meta: app_label = 'schema' apps = new_apps db_table = IntegerPK._meta.db_table # An id (i) is generated by the database. obj = IntegerPKToBigAutoField.objects.create(j=1) self.assertIsNotNone(obj.i) @isolate_apps('schema') def test_alter_smallint_pk_to_smallautofield_pk(self): """ Should be able to rename an SmallIntegerField(primary_key=True) to SmallAutoField(primary_key=True). """ class SmallIntegerPK(Model): i = SmallIntegerField(primary_key=True) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(SmallIntegerPK) self.isolated_local_models = [SmallIntegerPK] old_field = SmallIntegerPK._meta.get_field('i') new_field = SmallAutoField(primary_key=True) new_field.model = SmallIntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(SmallIntegerPK, old_field, new_field, strict=True) def test_alter_int_pk_to_int_unique(self): """ Should be able to rename an IntegerField(primary_key=True) to IntegerField(unique=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) # Delete the old PK old_field = IntegerPK._meta.get_field('i') new_field = IntegerField(unique=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # The primary key constraint is gone. Result depends on database: # 'id' for SQLite, None for others (must not be 'i'). self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ('id', None)) # Set up a model class as it currently stands. The original IntegerPK # class is now out of date and some backends make use of the whole # model class when modifying a field (such as sqlite3 when remaking a # table) so an outdated model class leads to incorrect results. class Transitional(Model): i = IntegerField(unique=True) j = IntegerField(unique=True) class Meta: app_label = 'schema' apps = new_apps db_table = 'INTEGERPK' # model requires a new PK old_field = Transitional._meta.get_field('j') new_field = IntegerField(primary_key=True) new_field.model = Transitional new_field.set_attributes_from_name('j') with connection.schema_editor() as editor: editor.alter_field(Transitional, old_field, new_field, strict=True) # Create a model class representing the updated model. class IntegerUnique(Model): i = IntegerField(unique=True) j = IntegerField(primary_key=True) class Meta: app_label = 'schema' apps = new_apps db_table = 'INTEGERPK' # Ensure unique constraint works. IntegerUnique.objects.create(i=1, j=1) with self.assertRaises(IntegrityError): IntegerUnique.objects.create(i=1, j=2) def test_rename(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], connection.features.introspected_field_types['CharField']) self.assertNotIn("display_name", columns) # Alter the name field's name old_field = Author._meta.get_field("name") new_field = CharField(max_length=254) new_field.set_attributes_from_name("display_name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['display_name'][0], connection.features.introspected_field_types['CharField']) self.assertNotIn("name", columns) @isolate_apps('schema') def test_rename_referenced_field(self): class Author(Model): name = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE, to_field='name') class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_field(Author, Author._meta.get_field('name'), new_field) # Ensure the foreign key reference was updated. self.assertForeignKeyExists(Book, 'author_id', 'schema_author', 'renamed') @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_rename_keep_null_status(self): """ Renaming a field shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = TextField() new_field.set_attributes_from_name("detail_info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns['detail_info'][0], "TextField") self.assertNotIn("info", columns) with self.assertRaises(IntegrityError): NoteRename.objects.create(detail_info=None) def _test_m2m_create(self, M2MFieldClass): """ Tests M2M fields on models during creation """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2M) # Ensure there is now an m2m table there columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], connection.features.introspected_field_types['IntegerField']) def test_m2m_create(self): self._test_m2m_create(ManyToManyField) def test_m2m_create_custom(self): self._test_m2m_create(CustomManyToManyField) def test_m2m_create_inherited(self): self._test_m2m_create(InheritedManyToManyField) def _test_m2m_create_through(self, M2MFieldClass): """ Tests M2M fields on models during creation with through models """ class LocalTagThrough(Model): book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalBookWithM2MThrough(Model): tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalTagThrough, LocalBookWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalTagThrough) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2MThrough) # Ensure there is now an m2m table there columns = self.column_classes(LocalTagThrough) self.assertEqual(columns['book_id'][0], connection.features.introspected_field_types['IntegerField']) self.assertEqual(columns['tag_id'][0], connection.features.introspected_field_types['IntegerField']) def test_m2m_create_through(self): self._test_m2m_create_through(ManyToManyField) def test_m2m_create_through_custom(self): self._test_m2m_create_through(CustomManyToManyField) def test_m2m_create_through_inherited(self): self._test_m2m_create_through(InheritedManyToManyField) def _test_m2m(self, M2MFieldClass): """ Tests adding/removing M2M fields on models """ class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorWithM2M) editor.create_model(TagM2MTest) # Create an M2M field new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors") new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Ensure there is now an m2m table there columns = self.column_classes(new_field.remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], connection.features.introspected_field_types['IntegerField']) # "Alter" the field. This should not rename the DB table to itself. with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True) # Remove the M2M table again with connection.schema_editor() as editor: editor.remove_field(LocalAuthorWithM2M, new_field) # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Make sure the model state is coherent with the table one now that # we've removed the tags field. opts = LocalAuthorWithM2M._meta opts.local_many_to_many.remove(new_field) del new_apps.all_models['schema'][new_field.remote_field.through._meta.model_name] opts._expire_cache() def test_m2m(self): self._test_m2m(ManyToManyField) def test_m2m_custom(self): self._test_m2m(CustomManyToManyField) def test_m2m_inherited(self): self._test_m2m(InheritedManyToManyField) def _test_m2m_through_alter(self, M2MFieldClass): """ Tests altering M2Ms with explicit through models (should no-op) """ class LocalAuthorTag(Model): author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalAuthorWithM2MThrough(Model): name = CharField(max_length=255) tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorTag) editor.create_model(LocalAuthorWithM2MThrough) editor.create_model(TagM2MTest) # Ensure the m2m table is there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) # "Alter" the field's blankness. This should not actually do anything. old_field = LocalAuthorWithM2MThrough._meta.get_field("tags") new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags") with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field, strict=True) # Ensure the m2m table is still there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) def test_m2m_through_alter(self): self._test_m2m_through_alter(ManyToManyField) def test_m2m_through_alter_custom(self): self._test_m2m_through_alter(CustomManyToManyField) def test_m2m_through_alter_inherited(self): self._test_m2m_through_alter(InheritedManyToManyField) def _test_m2m_repoint(self, M2MFieldClass): """ Tests repointing M2M fields """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBookWithM2M) editor.create_model(TagM2MTest) editor.create_model(UniqueTest) # Ensure the M2M exists and points to TagM2MTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists( LocalBookWithM2M._meta.get_field("tags").remote_field.through, 'tagm2mtest_id', 'schema_tagm2mtest', ) # Repoint the M2M old_field = LocalBookWithM2M._meta.get_field("tags") new_field = M2MFieldClass(UniqueTest) new_field.contribute_to_class(LocalBookWithM2M, "uniques") with connection.schema_editor() as editor: editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True) # Ensure old M2M is gone with self.assertRaises(DatabaseError): self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) # This model looks like the new model and is used for teardown. opts = LocalBookWithM2M._meta opts.local_many_to_many.remove(old_field) # Ensure the new M2M exists and points to UniqueTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists(new_field.remote_field.through, 'uniquetest_id', 'schema_uniquetest') def test_m2m_repoint(self): self._test_m2m_repoint(ManyToManyField) def test_m2m_repoint_custom(self): self._test_m2m_repoint(CustomManyToManyField) def test_m2m_repoint_inherited(self): self._test_m2m_repoint(InheritedManyToManyField) @isolate_apps('schema') def test_m2m_rename_field_in_target_model(self): class LocalTagM2MTest(Model): title = CharField(max_length=255) class Meta: app_label = 'schema' class LocalM2M(Model): tags = ManyToManyField(LocalTagM2MTest) class Meta: app_label = 'schema' # Create the tables. with connection.schema_editor() as editor: editor.create_model(LocalM2M) editor.create_model(LocalTagM2MTest) self.isolated_local_models = [LocalM2M, LocalTagM2MTest] # Ensure the m2m table is there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) # Alter a field in LocalTagM2MTest. old_field = LocalTagM2MTest._meta.get_field('title') new_field = CharField(max_length=254) new_field.contribute_to_class(LocalTagM2MTest, 'title1') # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True) # Ensure the m2m table is still there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) @skipUnlessDBFeature('supports_column_check_constraints', 'can_introspect_check_constraints') def test_check_constraints(self): """ Tests creating/deleting CHECK constraints """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the constraint exists constraints = self.get_constraints(Author._meta.db_table) if not any(details['columns'] == ['height'] and details['check'] for details in constraints.values()): self.fail("No check constraint for height found") # Alter the column to remove it old_field = Author._meta.get_field("height") new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) for details in constraints.values(): if details['columns'] == ["height"] and details['check']: self.fail("Check constraint for height found") # Alter the column to re-add it new_field2 = Author._meta.get_field("height") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) if not any(details['columns'] == ['height'] and details['check'] for details in constraints.values()): self.fail("No check constraint for height found") @skipUnlessDBFeature('supports_column_check_constraints', 'can_introspect_check_constraints') def test_remove_field_check_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(Author) # Add the custom check constraint constraint = CheckConstraint(check=Q(height__gte=0), name='author_height_gte_0_check') custom_constraint_name = constraint.name Author._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) # Ensure the constraints exist constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field check old_field = Author._meta.get_field('height') new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field check new_field2 = Author._meta.get_field('height') with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the check constraint with connection.schema_editor() as editor: Author._meta.constraints = [] editor.remove_constraint(Author, constraint) def test_unique(self): """ Tests removing and adding unique constraints to a single column. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the field is unique to begin with Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be non-unique old_field = Tag._meta.get_field("slug") new_field = SlugField(unique=False) new_field.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) # Ensure the field is no longer unique Tag.objects.create(title="foo", slug="foo") Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be unique new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) # Ensure the field is unique again Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Rename the field new_field3 = SlugField(unique=True) new_field3.set_attributes_from_name("slug2") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field2, new_field3, strict=True) # Ensure the field is still unique TagUniqueRename.objects.create(title="foo", slug2="foo") with self.assertRaises(IntegrityError): TagUniqueRename.objects.create(title="bar", slug2="foo") Tag.objects.all().delete() def test_unique_name_quoting(self): old_table_name = TagUniqueRename._meta.db_table try: with connection.schema_editor() as editor: editor.create_model(TagUniqueRename) editor.alter_db_table(TagUniqueRename, old_table_name, 'unique-table') TagUniqueRename._meta.db_table = 'unique-table' # This fails if the unique index name isn't quoted. editor.alter_unique_together(TagUniqueRename, [], (('title', 'slug2'),)) finally: TagUniqueRename._meta.db_table = old_table_name @isolate_apps('schema') @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') @skipUnlessDBFeature('supports_foreign_keys') def test_unique_no_unnecessary_fk_drops(self): """ If AlterField isn't selective about dropping foreign key constraints when modifying a field with a unique constraint, the AlterField incorrectly drops and recreates the Book.author foreign key even though it doesn't restrict the field being changed (#29193). """ class Author(Model): name = CharField(max_length=254, unique=True) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.model = Author new_field.set_attributes_from_name('name') with self.assertLogs('django.db.backends.schema', 'DEBUG') as cm: with connection.schema_editor() as editor: editor.alter_field(Author, Author._meta.get_field('name'), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) @isolate_apps('schema') @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite remakes the table on field alteration.') def test_unique_and_reverse_m2m(self): """ AlterField can modify a unique field when there's a reverse M2M relation on the model. """ class Tag(Model): title = CharField(max_length=255) slug = SlugField(unique=True) class Meta: app_label = 'schema' class Book(Model): tags = ManyToManyField(Tag, related_name='books') class Meta: app_label = 'schema' self.isolated_local_models = [Book._meta.get_field('tags').remote_field.through] with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Book) new_field = SlugField(max_length=75, unique=True) new_field.model = Tag new_field.set_attributes_from_name('slug') with self.assertLogs('django.db.backends.schema', 'DEBUG') as cm: with connection.schema_editor() as editor: editor.alter_field(Tag, Tag._meta.get_field('slug'), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) # Ensure that the field is still unique. Tag.objects.create(title='foo', slug='foo') with self.assertRaises(IntegrityError): Tag.objects.create(title='bar', slug='foo') @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_field_unique_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueName) # Add the custom unique constraint constraint = UniqueConstraint(fields=['name'], name='author_name_uniq') custom_constraint_name = constraint.name AuthorWithUniqueName._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueName, constraint) # Ensure the constraints exist constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field uniqueness old_field = AuthorWithUniqueName._meta.get_field('name') new_field = CharField(max_length=255) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field uniqueness new_field2 = AuthorWithUniqueName._meta.get_field('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueName._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueName, constraint) def test_unique_together(self): """ Tests removing and adding unique_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(UniqueTest) # Ensure the fields are unique to begin with UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2011, slug="foo") UniqueTest.objects.create(year=2011, slug="bar") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter the model to its non-unique-together companion with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, []) # Ensure the fields are no longer unique UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together) # Ensure the fields are unique again UniqueTest.objects.create(year=2012, slug="foo") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() def test_unique_together_with_fk(self): """ Tests removing and adding unique_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) def test_unique_together_with_fk_with_existing_index(self): """ Tests removing and adding unique_together constraints that include a foreign key, where the foreign key is added after the model is created. """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithoutAuthor) new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') editor.add_field(BookWithoutAuthor, new_field) # Ensure the fields aren't unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_unique_together_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueNameAndBirthday) # Add the custom unique constraint constraint = UniqueConstraint(fields=['name', 'birthday'], name='author_name_birthday_uniq') custom_constraint_name = constraint.name AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint) # Ensure the constraints exist constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Remove unique together unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together with connection.schema_editor() as editor: editor.alter_unique_together(AuthorWithUniqueNameAndBirthday, unique_together, []) constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Re-add unique together with connection.schema_editor() as editor: editor.alter_unique_together(AuthorWithUniqueNameAndBirthday, [], unique_together) constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueNameAndBirthday._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint) def test_index_together(self): """ Tests removing and adding index_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure there's no index on the year/slug columns first self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), False, ) # Alter the model to add an index with connection.schema_editor() as editor: editor.alter_index_together(Tag, [], [("slug", "title")]) # Ensure there is now an index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), True, ) # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_index_together(Tag, [("slug", "title")], []) # Ensure there's no index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), False, ) def test_index_together_with_fk(self): """ Tests removing and adding index_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.index_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_index_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_index_together(Book, [['author', 'title']], []) def test_create_index_together(self): """ Tests creating models with index_together already defined """ # Create the table with connection.schema_editor() as editor: editor.create_model(TagIndexed) # Ensure there is an index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tagindexed").values() if c['columns'] == ["slug", "title"] ), True, ) @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_index_together_does_not_remove_meta_indexes(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedNameAndBirthday) # Add the custom index index = Index(fields=['name', 'birthday'], name='author_name_birthday_idx') custom_index_name = index.name AuthorWithIndexedNameAndBirthday._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedNameAndBirthday, index) # Ensure the indexes exist constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Remove index together index_together = AuthorWithIndexedNameAndBirthday._meta.index_together with connection.schema_editor() as editor: editor.alter_index_together(AuthorWithIndexedNameAndBirthday, index_together, []) constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 0) # Re-add index together with connection.schema_editor() as editor: editor.alter_index_together(AuthorWithIndexedNameAndBirthday, [], index_together) constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Drop the index with connection.schema_editor() as editor: AuthorWithIndexedNameAndBirthday._meta.indexes = [] editor.remove_index(AuthorWithIndexedNameAndBirthday, index) @isolate_apps('schema') def test_db_table(self): """ Tests renaming of the table """ class Author(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' # Create the table and one referring it. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], connection.features.introspected_field_types['CharField']) # Alter the table with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_db_table(Author, "schema_author", "schema_otherauthor") # Ensure the table is there afterwards Author._meta.db_table = "schema_otherauthor" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], connection.features.introspected_field_types['CharField']) # Ensure the foreign key reference was updated self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor") # Alter the table again with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_db_table(Author, "schema_otherauthor", "schema_author") # Ensure the table is still there Author._meta.db_table = "schema_author" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], connection.features.introspected_field_types['CharField']) def test_add_remove_index(self): """ Tests index addition and removal """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the table is there and has no index self.assertNotIn('title', self.get_indexes(Author._meta.db_table)) # Add the index index = Index(fields=['name'], name='author_title_idx') with connection.schema_editor() as editor: editor.add_index(Author, index) self.assertIn('name', self.get_indexes(Author._meta.db_table)) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn('name', self.get_indexes(Author._meta.db_table)) def test_remove_db_index_doesnt_remove_custom_indexes(self): """ Changing db_index to False doesn't remove indexes from Meta.indexes. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedName) # Ensure the table has its index self.assertIn('name', self.get_indexes(AuthorWithIndexedName._meta.db_table)) # Add the custom index index = Index(fields=['-name'], name='author_name_idx') author_index_name = index.name with connection.schema_editor() as editor: db_index_name = editor._create_index_name( table_name=AuthorWithIndexedName._meta.db_table, column_names=('name',), ) try: AuthorWithIndexedName._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedName, index) old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertIn(author_index_name, old_constraints) self.assertIn(db_index_name, old_constraints) # Change name field to db_index=False old_field = AuthorWithIndexedName._meta.get_field('name') new_field = CharField(max_length=255) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithIndexedName, old_field, new_field, strict=True) new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertNotIn(db_index_name, new_constraints) # The index from Meta.indexes is still in the database. self.assertIn(author_index_name, new_constraints) # Drop the index with connection.schema_editor() as editor: editor.remove_index(AuthorWithIndexedName, index) finally: AuthorWithIndexedName._meta.indexes = [] def test_order_index(self): """ Indexes defined with ordering (ASC/DESC) defined on column """ with connection.schema_editor() as editor: editor.create_model(Author) # The table doesn't have an index self.assertNotIn('title', self.get_indexes(Author._meta.db_table)) index_name = 'author_name_idx' # Add the index index = Index(fields=['name', '-weight'], name=index_name) with connection.schema_editor() as editor: editor.add_index(Author, index) if connection.features.supports_index_column_ordering: self.assertIndexOrder(Author._meta.db_table, index_name, ['ASC', 'DESC']) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) def test_indexes(self): """ Tests creation/altering of indexes """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there and has the right index self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to remove the index old_field = Book._meta.get_field("title") new_field = CharField(max_length=100, db_index=False) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the table is there and has no index self.assertNotIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to re-add the index new_field2 = Book._meta.get_field("title") with connection.schema_editor() as editor: editor.alter_field(Book, new_field, new_field2, strict=True) # Ensure the table is there and has the index again self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Add a unique column, verify that creates an implicit index new_field3 = BookWithSlug._meta.get_field("slug") with connection.schema_editor() as editor: editor.add_field(Book, new_field3) self.assertIn( "slug", self.get_uniques(Book._meta.db_table), ) # Remove the unique, check the index goes with it new_field4 = CharField(max_length=20, unique=False) new_field4.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True) self.assertNotIn( "slug", self.get_uniques(Book._meta.db_table), ) def test_text_field_with_db_index(self): with connection.schema_editor() as editor: editor.create_model(AuthorTextFieldWithIndex) # The text_field index is present if the database supports it. assertion = self.assertIn if connection.features.supports_index_on_text_field else self.assertNotIn assertion('text_field', self.get_indexes(AuthorTextFieldWithIndex._meta.db_table)) def test_primary_key(self): """ Tests altering of the primary key """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the table is there and has the right PK self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'id') # Alter to change the PK id_field = Tag._meta.get_field("id") old_field = Tag._meta.get_field("slug") new_field = SlugField(primary_key=True) new_field.set_attributes_from_name("slug") new_field.model = Tag with connection.schema_editor() as editor: editor.remove_field(Tag, id_field) editor.alter_field(Tag, old_field, new_field) # Ensure the PK changed self.assertNotIn( 'id', self.get_indexes(Tag._meta.db_table), ) self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'slug') def test_context_manager_exit(self): """ Ensures transaction is correctly closed when an error occurs inside a SchemaEditor context. """ class SomeError(Exception): pass try: with connection.schema_editor(): raise SomeError except SomeError: self.assertFalse(connection.in_atomic_block) @skipIfDBFeature('can_rollback_ddl') def test_unsupported_transactional_ddl_disallowed(self): message = ( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) with atomic(), connection.schema_editor() as editor: with self.assertRaisesMessage(TransactionManagementError, message): editor.execute(editor.sql_create_table % {'table': 'foo', 'definition': ''}) @skipUnlessDBFeature('supports_foreign_keys', 'indexes_foreign_keys') def test_foreign_key_index_long_names_regression(self): """ Regression test for #21497. Only affects databases that supports foreign keys. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Find the properly shortened column name column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id") column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase # Ensure the table is there and has an index on the column self.assertIn( column_name, self.get_indexes(BookWithLongName._meta.db_table), ) @skipUnlessDBFeature('supports_foreign_keys') def test_add_foreign_key_long_names(self): """ Regression test for #23009. Only affects databases that supports foreign keys. """ # Create the initial tables with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Add a second FK, this would fail due to long ref name before the fix new_field = ForeignKey(AuthorWithEvenLongerName, CASCADE, related_name="something") new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk") with connection.schema_editor() as editor: editor.add_field(BookWithLongName, new_field) @isolate_apps('schema') @skipUnlessDBFeature('supports_foreign_keys') def test_add_foreign_key_quoted_db_table(self): class Author(Model): class Meta: db_table = '"table_author_double_quoted"' app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) if connection.vendor == 'mysql': self.assertForeignKeyExists(Book, 'author_id', '"table_author_double_quoted"') else: self.assertForeignKeyExists(Book, 'author_id', 'table_author_double_quoted') def test_add_foreign_object(self): with connection.schema_editor() as editor: editor.create_model(BookForeignObj) new_field = ForeignObject(Author, on_delete=CASCADE, from_fields=['author_id'], to_fields=['id']) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.add_field(BookForeignObj, new_field) def test_creation_deletion_reserved_names(self): """ Tries creating a model's table, and then deleting it when it has a SQL reserved name. """ # Create the table with connection.schema_editor() as editor: try: editor.create_model(Thing) except OperationalError as e: self.fail("Errors when applying initial migration for a model " "with a table named after an SQL reserved word: %s" % e) # The table is there list(Thing.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Thing) # The table is gone with self.assertRaises(DatabaseError): list(Thing.objects.all()) def test_remove_constraints_capital_letters(self): """ #23065 - Constraint names must be quoted if they contain capital letters. """ def get_field(*args, field_class=IntegerField, **kwargs): kwargs['db_column'] = "CamelCase" field = field_class(*args, **kwargs) field.set_attributes_from_name("CamelCase") return field model = Author field = get_field() table = model._meta.db_table column = field.column identifier_converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: editor.create_model(model) editor.add_field(model, field) constraint_name = 'CamelCaseIndex' expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_index % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "using": "", "columns": editor.quote_name(column), "extra": "", "condition": "", "include": "", } ) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(db_index=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) constraint_name = 'CamelCaseUniqConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor._create_unique_sql(model, [field.column], constraint_name)) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(unique=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) if editor.sql_create_fk: constraint_name = 'CamelCaseFKConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_fk % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "column": editor.quote_name(column), "to_table": editor.quote_name(table), "to_column": editor.quote_name(model._meta.auto_field.column), "deferrable": connection.ops.deferrable_sql(), } ) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) def test_add_field_use_effective_default(self): """ #23987 - effective_default() should be used as the field default when adding a new field. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField to ensure default will be used from effective_default new_field = CharField(max_length=15, blank=True) new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '') def test_add_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField with a default new_field = CharField(max_length=15, blank=True, default='surname default') new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], 'surname default') # And that the default is no longer set in the database. field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "surname" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_alter_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') self.assertIsNone(Author.objects.get().height) old_field = Author._meta.get_field('height') # The default from the new field is used in updating existing rows. new_field = IntegerField(blank=True, default=42) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual(Author.objects.get().height, 42) # The database default should be removed. with connection.cursor() as cursor: field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "height" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') def test_alter_field_default_doesnt_perform_queries(self): """ No queries are performed if a field default changes and the field's not changing from null to non-null. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) old_field = AuthorWithDefaultHeight._meta.get_field('height') new_default = old_field.default * 2 new_field = PositiveIntegerField(null=True, blank=True, default=new_default) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True) @skipUnlessDBFeature('supports_foreign_keys') def test_alter_field_fk_attributes_noop(self): """ No queries are performed when changing field attributes that don't affect the schema. """ with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) old_field = Book._meta.get_field('author') new_field = ForeignKey( Author, blank=True, editable=False, error_messages={'invalid': 'error message'}, help_text='help text', limit_choices_to={'limit': 'choice'}, on_delete=PROTECT, related_name='related_name', related_query_name='related_query_name', validators=[lambda x: x], verbose_name='verbose name', ) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(Book, old_field, new_field, strict=True) with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(Book, new_field, old_field, strict=True) def test_add_textfield_unhashable_default(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') # Create a field that has an unhashable default new_field = TextField(default={}) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.add_field(Author, new_field) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_add_indexed_charfield(self): field = CharField(max_length=255, db_index=True) field.set_attributes_from_name('nom_de_plume') with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, 'nom_de_plume'), ['schema_author_nom_de_plume_7570a851', 'schema_author_nom_de_plume_7570a851_like'], ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_add_unique_charfield(self): field = CharField(max_length=255, unique=True) field.set_attributes_from_name('nom_de_plume') with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, 'nom_de_plume'), ['schema_author_nom_de_plume_7570a851_like', 'schema_author_nom_de_plume_key'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) # Alter to add db_index=True and create 2 indexes. old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, db_index=True) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, 'name'), ['schema_author_name_1fbc5617', 'schema_author_name_1fbc5617_like'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_unique_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) # Alter to add unique=True and create 2 indexes. old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, 'name'), ['schema_author_name_1fbc5617_like', 'schema_author_name_1fbc5617_uniq'] ) # Remove unique=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_textfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Note) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) # Alter to add db_index=True and create 2 indexes. old_field = Note._meta.get_field('info') new_field = TextField(db_index=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Note, 'info'), ['schema_note_info_4b0ea695', 'schema_note_info_4b0ea695_like'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Note, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_unique_to_charfield_with_db_index(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to remove unique=True (should drop unique index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_remove_unique_and_db_index_from_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to remove both unique=True and db_index=True (should drop all indexes) new_field2 = CharField(max_length=100) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual(self.get_constraints_for_column(BookWithoutAuthor, 'title'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_swap_unique_and_db_index_with_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to set unique=True and remove db_index=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to set db_index=True and remove unique=True (should restore index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_db_index_to_charfield_with_unique(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(Tag) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to add db_index=True old_field = Tag._meta.get_field('slug') new_field = SlugField(db_index=True, unique=True) new_field.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to remove db_index=True new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) def test_alter_field_add_index_to_integerfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), []) # Alter to add db_index=True and create index. old_field = Author._meta.get_field('weight') new_field = IntegerField(null=True, db_index=True) new_field.set_attributes_from_name('weight') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), ['schema_author_weight_587740f9']) # Remove db_index=True to drop index. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), []) def test_alter_pk_with_self_referential_field(self): """ Changing the primary key field name of a model with a self-referential foreign key (#26384). """ if ( connection.vendor == 'mysql' and connection.mysql_is_mariadb and (10, 4, 12) < connection.mysql_version < (10, 5) ): self.skipTest('https://jira.mariadb.org/browse/MDEV-22775') with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field('node_id') new_field = AutoField(primary_key=True) new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Node, old_field, new_field, strict=True) self.assertForeignKeyExists(Node, 'parent_id', Node._meta.db_table) @mock.patch('django.db.backends.base.schema.datetime') @mock.patch('django.db.backends.base.schema.timezone') def test_add_datefield_and_datetimefield_use_effective_default(self, mocked_datetime, mocked_tz): """ effective_default() should be used for DateField, DateTimeField, and TimeField if auto_now or auto_now_add is set (#25005). """ now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1) now_tz = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1, tzinfo=timezone.utc) mocked_datetime.now = mock.MagicMock(return_value=now) mocked_tz.now = mock.MagicMock(return_value=now_tz) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Check auto_now/auto_now_add attributes are not defined columns = self.column_classes(Author) self.assertNotIn("dob_auto_now", columns) self.assertNotIn("dob_auto_now_add", columns) self.assertNotIn("dtob_auto_now", columns) self.assertNotIn("dtob_auto_now_add", columns) self.assertNotIn("tob_auto_now", columns) self.assertNotIn("tob_auto_now_add", columns) # Create a row Author.objects.create(name='Anonymous1') # Ensure fields were added with the correct defaults dob_auto_now = DateField(auto_now=True) dob_auto_now.set_attributes_from_name('dob_auto_now') self.check_added_field_default( editor, Author, dob_auto_now, 'dob_auto_now', now.date(), cast_function=lambda x: x.date(), ) dob_auto_now_add = DateField(auto_now_add=True) dob_auto_now_add.set_attributes_from_name('dob_auto_now_add') self.check_added_field_default( editor, Author, dob_auto_now_add, 'dob_auto_now_add', now.date(), cast_function=lambda x: x.date(), ) dtob_auto_now = DateTimeField(auto_now=True) dtob_auto_now.set_attributes_from_name('dtob_auto_now') self.check_added_field_default( editor, Author, dtob_auto_now, 'dtob_auto_now', now, ) dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True) dt_tm_of_birth_auto_now_add.set_attributes_from_name('dtob_auto_now_add') self.check_added_field_default( editor, Author, dt_tm_of_birth_auto_now_add, 'dtob_auto_now_add', now, ) tob_auto_now = TimeField(auto_now=True) tob_auto_now.set_attributes_from_name('tob_auto_now') self.check_added_field_default( editor, Author, tob_auto_now, 'tob_auto_now', now.time(), cast_function=lambda x: x.time(), ) tob_auto_now_add = TimeField(auto_now_add=True) tob_auto_now_add.set_attributes_from_name('tob_auto_now_add') self.check_added_field_default( editor, Author, tob_auto_now_add, 'tob_auto_now_add', now.time(), cast_function=lambda x: x.time(), ) def test_namespaced_db_table_create_index_name(self): """ Table names are stripped of their namespace/schema before being used to generate index names. """ with connection.schema_editor() as editor: max_name_length = connection.ops.max_name_length() or 200 namespace = 'n' * max_name_length table_name = 't' * max_name_length namespaced_table_name = '"%s"."%s"' % (namespace, table_name) self.assertEqual( editor._create_index_name(table_name, []), editor._create_index_name(namespaced_table_name, []), ) @unittest.skipUnless(connection.vendor == 'oracle', 'Oracle specific db_table syntax') def test_creation_with_db_table_double_quotes(self): oracle_user = connection.creation._test_database_user() class Student(Model): name = CharField(max_length=30) class Meta: app_label = 'schema' apps = new_apps db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user class Document(Model): name = CharField(max_length=30) students = ManyToManyField(Student) class Meta: app_label = 'schema' apps = new_apps db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user self.local_models = [Student, Document] with connection.schema_editor() as editor: editor.create_model(Student) editor.create_model(Document) doc = Document.objects.create(name='Test Name') student = Student.objects.create(name='Some man') doc.students.add(student) @isolate_apps('schema') @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific db_table syntax.') def test_namespaced_db_table_foreign_key_reference(self): with connection.cursor() as cursor: cursor.execute('CREATE SCHEMA django_schema_tests') def delete_schema(): with connection.cursor() as cursor: cursor.execute('DROP SCHEMA django_schema_tests CASCADE') self.addCleanup(delete_schema) class Author(Model): class Meta: app_label = 'schema' class Book(Model): class Meta: app_label = 'schema' db_table = '"django_schema_tests"."schema_book"' author = ForeignKey(Author, CASCADE) author.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) editor.add_field(Book, author) def test_rename_table_renames_deferred_sql_references(self): atomic_rename = connection.features.supports_atomic_references_rename with connection.schema_editor(atomic=atomic_rename) as editor: editor.create_model(Author) editor.create_model(Book) editor.alter_db_table(Author, 'schema_author', 'schema_renamed_author') editor.alter_db_table(Author, 'schema_book', 'schema_renamed_book') self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_table('schema_author'), False) self.assertIs(statement.references_table('schema_book'), False) @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') def test_rename_column_renames_deferred_sql_references(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) old_title = Book._meta.get_field('title') new_title = CharField(max_length=100, db_index=True) new_title.set_attributes_from_name('renamed_title') editor.alter_field(Book, old_title, new_title) old_author = Book._meta.get_field('author') new_author = ForeignKey(Author, CASCADE) new_author.set_attributes_from_name('renamed_author') editor.alter_field(Book, old_author, new_author) self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_column('book', 'title'), False) self.assertIs(statement.references_column('book', 'author_id'), False) @isolate_apps('schema') def test_referenced_field_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the field they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field='field', db_constraint=False) class Meta: app_label = 'schema' self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=True) as editor: editor.alter_field(Foo, Foo._meta.get_field('field'), new_field) @isolate_apps('schema') def test_referenced_table_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the table they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field='field', db_constraint=False) class Meta: app_label = 'schema' self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=True) as editor: editor.alter_db_table(Foo, Foo._meta.db_table, 'renamed_table') Foo._meta.db_table = 'renamed_table' @isolate_apps('schema') @skipUnlessDBFeature('supports_collation_on_charfield') def test_db_collation_charfield(self): collation = connection.features.test_collations.get('non_default') if not collation: self.skipTest('Language collations are not supported.') class Foo(Model): field = CharField(max_length=255, db_collation=collation) class Meta: app_label = 'schema' self.isolated_local_models = [Foo] with connection.schema_editor() as editor: editor.create_model(Foo) self.assertEqual( self.get_column_collation(Foo._meta.db_table, 'field'), collation, ) @isolate_apps('schema') @skipUnlessDBFeature('supports_collation_on_textfield') def test_db_collation_textfield(self): collation = connection.features.test_collations.get('non_default') if not collation: self.skipTest('Language collations are not supported.') class Foo(Model): field = TextField(db_collation=collation) class Meta: app_label = 'schema' self.isolated_local_models = [Foo] with connection.schema_editor() as editor: editor.create_model(Foo) self.assertEqual( self.get_column_collation(Foo._meta.db_table, 'field'), collation, ) @skipUnlessDBFeature('supports_collation_on_charfield') def test_add_field_db_collation(self): collation = connection.features.test_collations.get('non_default') if not collation: self.skipTest('Language collations are not supported.') with connection.schema_editor() as editor: editor.create_model(Author) new_field = CharField(max_length=255, db_collation=collation) new_field.set_attributes_from_name('alias') with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) self.assertEqual( columns['alias'][0], connection.features.introspected_field_types['CharField'], ) self.assertEqual(columns['alias'][1][8], collation) @skipUnlessDBFeature('supports_collation_on_charfield') def test_alter_field_db_collation(self): collation = connection.features.test_collations.get('non_default') if not collation: self.skipTest('Language collations are not supported.') with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, db_collation=collation) new_field.set_attributes_from_name('name') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_column_collation(Author._meta.db_table, 'name'), collation, ) with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertIsNone(self.get_column_collation(Author._meta.db_table, 'name')) @skipUnlessDBFeature('supports_collation_on_charfield') def test_alter_field_type_and_db_collation(self): collation = connection.features.test_collations.get('non_default') if not collation: self.skipTest('Language collations are not supported.') with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field('info') new_field = CharField(max_length=255, db_collation=collation) new_field.set_attributes_from_name('info') new_field.model = Note with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual( columns['info'][0], connection.features.introspected_field_types['CharField'], ) self.assertEqual(columns['info'][1][8], collation) with connection.schema_editor() as editor: editor.alter_field(Note, new_field, old_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns['info'][0], 'TextField') self.assertIsNone(columns['info'][1][8]) @skipUnlessDBFeature( 'supports_collation_on_charfield', 'supports_non_deterministic_collations', ) def test_ci_cs_db_collation(self): cs_collation = connection.features.test_collations.get('cs') ci_collation = connection.features.test_collations.get('ci') try: if connection.vendor == 'mysql': cs_collation = 'latin1_general_cs' elif connection.vendor == 'postgresql': cs_collation = 'en-x-icu' with connection.cursor() as cursor: cursor.execute( "CREATE COLLATION IF NOT EXISTS case_insensitive " "(provider = icu, locale = 'und-u-ks-level2', " "deterministic = false)" ) ci_collation = 'case_insensitive' # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) # Case-insensitive collation. old_field = Author._meta.get_field('name') new_field_ci = CharField(max_length=255, db_collation=ci_collation) new_field_ci.set_attributes_from_name('name') new_field_ci.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field_ci, strict=True) Author.objects.create(name='ANDREW') self.assertIs(Author.objects.filter(name='Andrew').exists(), True) # Case-sensitive collation. new_field_cs = CharField(max_length=255, db_collation=cs_collation) new_field_cs.set_attributes_from_name('name') new_field_cs.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, new_field_ci, new_field_cs, strict=True) self.assertIs(Author.objects.filter(name='Andrew').exists(), False) finally: if connection.vendor == 'postgresql': with connection.cursor() as cursor: cursor.execute('DROP COLLATION IF EXISTS case_insensitive')
0b87a36ea6c9dcf976f761c07934564c291b8575a361d9481dabb6a66a0bd9f1
from functools import update_wrapper, wraps from unittest import TestCase from django.contrib.admin.views.decorators import staff_member_required from django.contrib.auth.decorators import ( login_required, permission_required, user_passes_test, ) from django.http import HttpRequest, HttpResponse, HttpResponseNotAllowed from django.middleware.clickjacking import XFrameOptionsMiddleware from django.test import SimpleTestCase from django.utils.decorators import method_decorator from django.utils.functional import keep_lazy, keep_lazy_text, lazy from django.utils.safestring import mark_safe from django.views.decorators.cache import ( cache_control, cache_page, never_cache, ) from django.views.decorators.clickjacking import ( xframe_options_deny, xframe_options_exempt, xframe_options_sameorigin, ) from django.views.decorators.http import ( condition, require_GET, require_http_methods, require_POST, require_safe, ) from django.views.decorators.vary import vary_on_cookie, vary_on_headers def fully_decorated(request): """Expected __doc__""" return HttpResponse('<html><body>dummy</body></html>') fully_decorated.anything = "Expected __dict__" def compose(*functions): # compose(f, g)(*args, **kwargs) == f(g(*args, **kwargs)) functions = list(reversed(functions)) def _inner(*args, **kwargs): result = functions[0](*args, **kwargs) for f in functions[1:]: result = f(result) return result return _inner full_decorator = compose( # django.views.decorators.http require_http_methods(["GET"]), require_GET, require_POST, require_safe, condition(lambda r: None, lambda r: None), # django.views.decorators.vary vary_on_headers('Accept-language'), vary_on_cookie, # django.views.decorators.cache cache_page(60 * 15), cache_control(private=True), never_cache, # django.contrib.auth.decorators # Apply user_passes_test twice to check #9474 user_passes_test(lambda u: True), login_required, permission_required('change_world'), # django.contrib.admin.views.decorators staff_member_required, # django.utils.functional keep_lazy(HttpResponse), keep_lazy_text, lazy, # django.utils.safestring mark_safe, ) fully_decorated = full_decorator(fully_decorated) class DecoratorsTest(TestCase): def test_attributes(self): """ Built-in decorators set certain attributes of the wrapped function. """ self.assertEqual(fully_decorated.__name__, 'fully_decorated') self.assertEqual(fully_decorated.__doc__, 'Expected __doc__') self.assertEqual(fully_decorated.__dict__['anything'], 'Expected __dict__') def test_user_passes_test_composition(self): """ The user_passes_test decorator can be applied multiple times (#9474). """ def test1(user): user.decorators_applied.append('test1') return True def test2(user): user.decorators_applied.append('test2') return True def callback(request): return request.user.decorators_applied callback = user_passes_test(test1)(callback) callback = user_passes_test(test2)(callback) class DummyUser: pass class DummyRequest: pass request = DummyRequest() request.user = DummyUser() request.user.decorators_applied = [] response = callback(request) self.assertEqual(response, ['test2', 'test1']) def test_cache_page(self): def my_view(request): return "response" my_view_cached = cache_page(123)(my_view) self.assertEqual(my_view_cached(HttpRequest()), "response") my_view_cached2 = cache_page(123, key_prefix="test")(my_view) self.assertEqual(my_view_cached2(HttpRequest()), "response") def test_require_safe_accepts_only_safe_methods(self): """ Test for the require_safe decorator. A view returns either a response or an exception. Refs #15637. """ def my_view(request): return HttpResponse("OK") my_safe_view = require_safe(my_view) request = HttpRequest() request.method = 'GET' self.assertIsInstance(my_safe_view(request), HttpResponse) request.method = 'HEAD' self.assertIsInstance(my_safe_view(request), HttpResponse) request.method = 'POST' self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed) request.method = 'PUT' self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed) request.method = 'DELETE' self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed) # For testing method_decorator, a decorator that assumes a single argument. # We will get type arguments if there is a mismatch in the number of arguments. def simple_dec(func): def wrapper(arg): return func("test:" + arg) return wraps(func)(wrapper) simple_dec_m = method_decorator(simple_dec) # For testing method_decorator, two decorators that add an attribute to the function def myattr_dec(func): def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.myattr = True return wrapper myattr_dec_m = method_decorator(myattr_dec) def myattr2_dec(func): def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.myattr2 = True return wrapper myattr2_dec_m = method_decorator(myattr2_dec) class ClsDec: def __init__(self, myattr): self.myattr = myattr def __call__(self, f): def wrapped(): return f() and self.myattr return update_wrapper(wrapped, f) class MethodDecoratorTests(SimpleTestCase): """ Tests for method_decorator """ def test_preserve_signature(self): class Test: @simple_dec_m def say(self, arg): return arg self.assertEqual("test:hello", Test().say("hello")) def test_preserve_attributes(self): # Sanity check myattr_dec and myattr2_dec @myattr_dec def func(): pass self.assertIs(getattr(func, 'myattr', False), True) @myattr2_dec def func(): pass self.assertIs(getattr(func, 'myattr2', False), True) @myattr_dec @myattr2_dec def func(): pass self.assertIs(getattr(func, 'myattr', False), True) self.assertIs(getattr(func, 'myattr2', False), False) # Decorate using method_decorator() on the method. class TestPlain: @myattr_dec_m @myattr2_dec_m def method(self): "A method" pass # Decorate using method_decorator() on both the class and the method. # The decorators applied to the methods are applied before the ones # applied to the class. @method_decorator(myattr_dec_m, "method") class TestMethodAndClass: @method_decorator(myattr2_dec_m) def method(self): "A method" pass # Decorate using an iterable of function decorators. @method_decorator((myattr_dec, myattr2_dec), 'method') class TestFunctionIterable: def method(self): "A method" pass # Decorate using an iterable of method decorators. decorators = (myattr_dec_m, myattr2_dec_m) @method_decorator(decorators, "method") class TestMethodIterable: def method(self): "A method" pass tests = (TestPlain, TestMethodAndClass, TestFunctionIterable, TestMethodIterable) for Test in tests: with self.subTest(Test=Test): self.assertIs(getattr(Test().method, 'myattr', False), True) self.assertIs(getattr(Test().method, 'myattr2', False), True) self.assertIs(getattr(Test.method, 'myattr', False), True) self.assertIs(getattr(Test.method, 'myattr2', False), True) self.assertEqual(Test.method.__doc__, 'A method') self.assertEqual(Test.method.__name__, 'method') def test_new_attribute(self): """A decorator that sets a new attribute on the method.""" def decorate(func): func.x = 1 return func class MyClass: @method_decorator(decorate) def method(self): return True obj = MyClass() self.assertEqual(obj.method.x, 1) self.assertIs(obj.method(), True) def test_bad_iterable(self): decorators = {myattr_dec_m, myattr2_dec_m} msg = "'set' object is not subscriptable" with self.assertRaisesMessage(TypeError, msg): @method_decorator(decorators, "method") class TestIterable: def method(self): "A method" pass # Test for argumented decorator def test_argumented(self): class Test: @method_decorator(ClsDec(False)) def method(self): return True self.assertIs(Test().method(), False) def test_descriptors(self): def original_dec(wrapped): def _wrapped(arg): return wrapped(arg) return _wrapped method_dec = method_decorator(original_dec) class bound_wrapper: def __init__(self, wrapped): self.wrapped = wrapped self.__name__ = wrapped.__name__ def __call__(self, arg): return self.wrapped(arg) def __get__(self, instance, cls=None): return self class descriptor_wrapper: def __init__(self, wrapped): self.wrapped = wrapped self.__name__ = wrapped.__name__ def __get__(self, instance, cls=None): return bound_wrapper(self.wrapped.__get__(instance, cls)) class Test: @method_dec @descriptor_wrapper def method(self, arg): return arg self.assertEqual(Test().method(1), 1) def test_class_decoration(self): """ @method_decorator can be used to decorate a class and its methods. """ def deco(func): def _wrapper(*args, **kwargs): return True return _wrapper @method_decorator(deco, name="method") class Test: def method(self): return False self.assertTrue(Test().method()) def test_tuple_of_decorators(self): """ @method_decorator can accept a tuple of decorators. """ def add_question_mark(func): def _wrapper(*args, **kwargs): return func(*args, **kwargs) + "?" return _wrapper def add_exclamation_mark(func): def _wrapper(*args, **kwargs): return func(*args, **kwargs) + "!" return _wrapper # The order should be consistent with the usual order in which # decorators are applied, e.g. # @add_exclamation_mark # @add_question_mark # def func(): # ... decorators = (add_exclamation_mark, add_question_mark) @method_decorator(decorators, name="method") class TestFirst: def method(self): return "hello world" class TestSecond: @method_decorator(decorators) def method(self): return "hello world" self.assertEqual(TestFirst().method(), "hello world?!") self.assertEqual(TestSecond().method(), "hello world?!") def test_invalid_non_callable_attribute_decoration(self): """ @method_decorator on a non-callable attribute raises an error. """ msg = ( "Cannot decorate 'prop' as it isn't a callable attribute of " "<class 'Test'> (1)" ) with self.assertRaisesMessage(TypeError, msg): @method_decorator(lambda: None, name="prop") class Test: prop = 1 @classmethod def __module__(cls): return "tests" def test_invalid_method_name_to_decorate(self): """ @method_decorator on a nonexistent method raises an error. """ msg = ( "The keyword argument `name` must be the name of a method of the " "decorated class: <class 'Test'>. Got 'nonexistent_method' instead" ) with self.assertRaisesMessage(ValueError, msg): @method_decorator(lambda: None, name='nonexistent_method') class Test: @classmethod def __module__(cls): return "tests" class XFrameOptionsDecoratorsTests(TestCase): """ Tests for the X-Frame-Options decorators. """ def test_deny_decorator(self): """ Ensures @xframe_options_deny properly sets the X-Frame-Options header. """ @xframe_options_deny def a_view(request): return HttpResponse() r = a_view(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'DENY') def test_sameorigin_decorator(self): """ Ensures @xframe_options_sameorigin properly sets the X-Frame-Options header. """ @xframe_options_sameorigin def a_view(request): return HttpResponse() r = a_view(HttpRequest()) self.assertEqual(r.headers['X-Frame-Options'], 'SAMEORIGIN') def test_exempt_decorator(self): """ Ensures @xframe_options_exempt properly instructs the XFrameOptionsMiddleware to NOT set the header. """ @xframe_options_exempt def a_view(request): return HttpResponse() req = HttpRequest() resp = a_view(req) self.assertIsNone(resp.get('X-Frame-Options', None)) self.assertTrue(resp.xframe_options_exempt) # Since the real purpose of the exempt decorator is to suppress # the middleware's functionality, let's make sure it actually works... r = XFrameOptionsMiddleware(a_view)(req) self.assertIsNone(r.get('X-Frame-Options', None)) class NeverCacheDecoratorTest(TestCase): def test_never_cache_decorator(self): @never_cache def a_view(request): return HttpResponse() r = a_view(HttpRequest()) self.assertEqual( set(r.headers['Cache-Control'].split(', ')), {'max-age=0', 'no-cache', 'no-store', 'must-revalidate', 'private'}, )
39de702019b00266f34432e290b4c87fb6aa3b9b91aff27aa8639eda853d7d88
from datetime import datetime from django.test import SimpleTestCase, override_settings FULL_RESPONSE = 'Test conditional get response' LAST_MODIFIED = datetime(2007, 10, 21, 23, 21, 47) LAST_MODIFIED_STR = 'Sun, 21 Oct 2007 23:21:47 GMT' LAST_MODIFIED_NEWER_STR = 'Mon, 18 Oct 2010 16:56:23 GMT' LAST_MODIFIED_INVALID_STR = 'Mon, 32 Oct 2010 16:56:23 GMT' EXPIRED_LAST_MODIFIED_STR = 'Sat, 20 Oct 2007 23:21:47 GMT' ETAG = '"b4246ffc4f62314ca13147c9d4f76974"' WEAK_ETAG = 'W/"b4246ffc4f62314ca13147c9d4f76974"' # weak match to ETAG EXPIRED_ETAG = '"7fae4cd4b0f81e7d2914700043aa8ed6"' @override_settings(ROOT_URLCONF='conditional_processing.urls') class ConditionalGet(SimpleTestCase): def assertFullResponse(self, response, check_last_modified=True, check_etag=True): self.assertEqual(response.status_code, 200) self.assertEqual(response.content, FULL_RESPONSE.encode()) if response.request['REQUEST_METHOD'] in ('GET', 'HEAD'): if check_last_modified: self.assertEqual(response.headers['Last-Modified'], LAST_MODIFIED_STR) if check_etag: self.assertEqual(response.headers['ETag'], ETAG) else: self.assertNotIn('Last-Modified', response.headers) self.assertNotIn('ETag', response.headers) def assertNotModified(self, response): self.assertEqual(response.status_code, 304) self.assertEqual(response.content, b'') def test_without_conditions(self): response = self.client.get('/condition/') self.assertFullResponse(response) def test_if_modified_since(self): self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.get('/condition/') self.assertNotModified(response) response = self.client.put('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_NEWER_STR response = self.client.get('/condition/') self.assertNotModified(response) response = self.client.put('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_INVALID_STR response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR response = self.client.get('/condition/') self.assertFullResponse(response) def test_if_unmodified_since(self): self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_NEWER_STR response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_INVALID_STR response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR response = self.client.get('/condition/') self.assertEqual(response.status_code, 412) def test_if_none_match(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/') self.assertNotModified(response) response = self.client.put('/condition/') self.assertEqual(response.status_code, 412) self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/') self.assertFullResponse(response) # Several etags in If-None-Match is a bit exotic but why not? self.client.defaults['HTTP_IF_NONE_MATCH'] = '%s, %s' % (ETAG, EXPIRED_ETAG) response = self.client.get('/condition/') self.assertNotModified(response) def test_weak_if_none_match(self): """ If-None-Match comparisons use weak matching, so weak and strong ETags with the same value result in a 304 response. """ self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/weak_etag/') self.assertNotModified(response) response = self.client.put('/condition/weak_etag/') self.assertEqual(response.status_code, 412) self.client.defaults['HTTP_IF_NONE_MATCH'] = WEAK_ETAG response = self.client.get('/condition/weak_etag/') self.assertNotModified(response) response = self.client.put('/condition/weak_etag/') self.assertEqual(response.status_code, 412) response = self.client.get('/condition/') self.assertNotModified(response) response = self.client.put('/condition/') self.assertEqual(response.status_code, 412) def test_all_if_none_match(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = '*' response = self.client.get('/condition/') self.assertNotModified(response) response = self.client.put('/condition/') self.assertEqual(response.status_code, 412) response = self.client.get('/condition/no_etag/') self.assertFullResponse(response, check_last_modified=False, check_etag=False) def test_if_match(self): self.client.defaults['HTTP_IF_MATCH'] = ETAG response = self.client.put('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG response = self.client.put('/condition/') self.assertEqual(response.status_code, 412) def test_weak_if_match(self): """ If-Match comparisons use strong matching, so any comparison involving a weak ETag return a 412 response. """ self.client.defaults['HTTP_IF_MATCH'] = ETAG response = self.client.get('/condition/weak_etag/') self.assertEqual(response.status_code, 412) self.client.defaults['HTTP_IF_MATCH'] = WEAK_ETAG response = self.client.get('/condition/weak_etag/') self.assertEqual(response.status_code, 412) response = self.client.get('/condition/') self.assertEqual(response.status_code, 412) def test_all_if_match(self): self.client.defaults['HTTP_IF_MATCH'] = '*' response = self.client.get('/condition/') self.assertFullResponse(response) response = self.client.get('/condition/no_etag/') self.assertEqual(response.status_code, 412) def test_both_headers(self): # see https://tools.ietf.org/html/rfc7232#section-6 self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/') self.assertNotModified(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/') self.assertNotModified(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/') self.assertFullResponse(response) def test_both_headers_2(self): self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR self.client.defaults['HTTP_IF_MATCH'] = ETAG response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR self.client.defaults['HTTP_IF_MATCH'] = ETAG response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/') self.assertEqual(response.status_code, 412) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/') self.assertEqual(response.status_code, 412) def test_single_condition_1(self): self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.get('/condition/last_modified/') self.assertNotModified(response) response = self.client.get('/condition/etag/') self.assertFullResponse(response, check_last_modified=False) def test_single_condition_2(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/etag/') self.assertNotModified(response) response = self.client.get('/condition/last_modified/') self.assertFullResponse(response, check_etag=False) def test_single_condition_3(self): self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR response = self.client.get('/condition/last_modified/') self.assertFullResponse(response, check_etag=False) def test_single_condition_4(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/etag/') self.assertFullResponse(response, check_last_modified=False) def test_single_condition_5(self): self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.get('/condition/last_modified2/') self.assertNotModified(response) response = self.client.get('/condition/etag2/') self.assertFullResponse(response, check_last_modified=False) def test_single_condition_6(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/etag2/') self.assertNotModified(response) response = self.client.get('/condition/last_modified2/') self.assertFullResponse(response, check_etag=False) def test_single_condition_7(self): self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR response = self.client.get('/condition/last_modified/') self.assertEqual(response.status_code, 412) response = self.client.get('/condition/etag/') self.assertEqual(response.status_code, 412) def test_single_condition_8(self): self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.get('/condition/last_modified/') self.assertFullResponse(response, check_etag=False) def test_single_condition_9(self): self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR response = self.client.get('/condition/last_modified2/') self.assertEqual(response.status_code, 412) response = self.client.get('/condition/etag2/') self.assertEqual(response.status_code, 412) def test_single_condition_head(self): self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.head('/condition/') self.assertNotModified(response) def test_unquoted(self): """ The same quoted ETag should be set on the header regardless of whether etag_func() in condition() returns a quoted or an unquoted ETag. """ response_quoted = self.client.get('/condition/etag/') response_unquoted = self.client.get('/condition/unquoted_etag/') self.assertEqual(response_quoted['ETag'], response_unquoted['ETag']) # It's possible that the matching algorithm could use the wrong value even # if the ETag header is set correctly correctly (as tested by # test_unquoted()), so check that the unquoted value is matched. def test_unquoted_if_none_match(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/unquoted_etag/') self.assertNotModified(response) response = self.client.put('/condition/unquoted_etag/') self.assertEqual(response.status_code, 412) self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/unquoted_etag/') self.assertFullResponse(response, check_last_modified=False) def test_invalid_etag(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = '"""' response = self.client.get('/condition/etag/') self.assertFullResponse(response, check_last_modified=False)
02b8b806d3cec741c3c8a5d008ecab0ae73db81a26e10534d7f2ab6c0ddc243f
""" A series of tests to establish that the command-line management tools work as advertised - especially with regards to the handling of the DJANGO_SETTINGS_MODULE and default settings.py files. """ import os import re import shutil import socket import subprocess import sys import tempfile import unittest from io import StringIO from unittest import mock from django import conf, get_version from django.conf import settings from django.core.management import ( BaseCommand, CommandError, call_command, color, ) from django.core.management.commands.loaddata import Command as LoaddataCommand from django.core.management.commands.runserver import ( Command as RunserverCommand, ) from django.core.management.commands.testserver import ( Command as TestserverCommand, ) from django.db import ConnectionHandler, connection from django.db.migrations.recorder import MigrationRecorder from django.test import ( LiveServerTestCase, SimpleTestCase, TestCase, override_settings, ) custom_templates_dir = os.path.join(os.path.dirname(__file__), 'custom_templates') SYSTEM_CHECK_MSG = 'System check identified no issues' class AdminScriptTestCase(SimpleTestCase): def setUp(self): tmpdir = tempfile.TemporaryDirectory() self.addCleanup(tmpdir.cleanup) # os.path.realpath() is required for temporary directories on macOS, # where `/var` is a symlink to `/private/var`. self.test_dir = os.path.realpath(os.path.join(tmpdir.name, 'test_project')) os.mkdir(self.test_dir) def write_settings(self, filename, apps=None, is_dir=False, sdict=None, extra=None): if is_dir: settings_dir = os.path.join(self.test_dir, filename) os.mkdir(settings_dir) settings_file_path = os.path.join(settings_dir, '__init__.py') else: settings_file_path = os.path.join(self.test_dir, filename) with open(settings_file_path, 'w') as settings_file: settings_file.write('# Settings file automatically generated by admin_scripts test case\n') if extra: settings_file.write("%s\n" % extra) exports = [ 'DATABASES', 'ROOT_URLCONF', 'SECRET_KEY', ] for s in exports: if hasattr(settings, s): o = getattr(settings, s) if not isinstance(o, (dict, tuple, list)): o = "'%s'" % o settings_file.write("%s = %s\n" % (s, o)) if apps is None: apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts'] settings_file.write("INSTALLED_APPS = %s\n" % apps) if sdict: for k, v in sdict.items(): settings_file.write("%s = %s\n" % (k, v)) def _ext_backend_paths(self): """ Returns the paths for any external backend packages. """ paths = [] for backend in settings.DATABASES.values(): package = backend['ENGINE'].split('.')[0] if package != 'django': backend_pkg = __import__(package) backend_dir = os.path.dirname(backend_pkg.__file__) paths.append(os.path.dirname(backend_dir)) return paths def run_test(self, args, settings_file=None, apps=None): base_dir = os.path.dirname(self.test_dir) # The base dir for Django's tests is one level up. tests_dir = os.path.dirname(os.path.dirname(__file__)) # The base dir for Django is one level above the test dir. We don't use # `import django` to figure that out, so we don't pick up a Django # from site-packages or similar. django_dir = os.path.dirname(tests_dir) ext_backend_base_dirs = self._ext_backend_paths() # Define a temporary environment for the subprocess test_environ = os.environ.copy() # Set the test environment if settings_file: test_environ['DJANGO_SETTINGS_MODULE'] = settings_file elif 'DJANGO_SETTINGS_MODULE' in test_environ: del test_environ['DJANGO_SETTINGS_MODULE'] python_path = [base_dir, django_dir, tests_dir] python_path.extend(ext_backend_base_dirs) test_environ['PYTHONPATH'] = os.pathsep.join(python_path) test_environ['PYTHONWARNINGS'] = '' p = subprocess.run( [sys.executable, *args], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.test_dir, env=test_environ, universal_newlines=True, ) return p.stdout, p.stderr def run_django_admin(self, args, settings_file=None): return self.run_test(['-m', 'django', *args], settings_file) def run_manage(self, args, settings_file=None, manage_py=None): template_manage_py = ( os.path.join(os.path.dirname(__file__), manage_py) if manage_py else os.path.join(os.path.dirname(conf.__file__), 'project_template', 'manage.py-tpl') ) test_manage_py = os.path.join(self.test_dir, 'manage.py') shutil.copyfile(template_manage_py, test_manage_py) with open(test_manage_py) as fp: manage_py_contents = fp.read() manage_py_contents = manage_py_contents.replace( "{{ project_name }}", "test_project") with open(test_manage_py, 'w') as fp: fp.write(manage_py_contents) return self.run_test(['./manage.py', *args], settings_file) def assertNoOutput(self, stream): "Utility assertion: assert that the given stream is empty" self.assertEqual(len(stream), 0, "Stream should be empty: actually contains '%s'" % stream) def assertOutput(self, stream, msg, regex=False): "Utility assertion: assert that the given message exists in the output" if regex: self.assertIsNotNone( re.search(msg, stream), "'%s' does not match actual output text '%s'" % (msg, stream) ) else: self.assertIn(msg, stream, "'%s' does not match actual output text '%s'" % (msg, stream)) def assertNotInOutput(self, stream, msg): "Utility assertion: assert that the given message doesn't exist in the output" self.assertNotIn(msg, stream, "'%s' matches actual output text '%s'" % (msg, stream)) ########################################################################## # DJANGO ADMIN TESTS # This first series of test classes checks the environment processing # of the django-admin.py script ########################################################################## class DjangoAdminNoSettings(AdminScriptTestCase): "A series of tests for django-admin.py when there is no settings.py file." def test_builtin_command(self): "no settings: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_bad_settings(self): "no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_commands_with_invalid_settings(self): """ Commands that don't require settings succeed if the settings file doesn't exist. """ args = ['startproject'] out, err = self.run_django_admin(args, settings_file='bad_settings') self.assertNoOutput(out) self.assertOutput(err, "You must provide a project name", regex=True) class DjangoAdminDefaultSettings(AdminScriptTestCase): """A series of tests for django-admin.py when using a settings.py file that contains the test application. """ def setUp(self): super().setUp() self.write_settings('settings.py') def test_builtin_command(self): "default: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_settings(self): "default: django-admin builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "default: django-admin builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "default: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "default: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "default: django-admin can't execute user commands if it isn't provided settings" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "default: django-admin can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "default: django-admin can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminFullPathDefaultSettings(AdminScriptTestCase): """A series of tests for django-admin.py when using a settings.py file that contains the test application specified using a full path. """ def setUp(self): super().setUp() self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts', 'admin_scripts.complex_app']) def test_builtin_command(self): "fulldefault: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_settings(self): "fulldefault: django-admin builtin commands succeed if a settings file is provided" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "fulldefault: django-admin builtin commands succeed if the environment contains settings" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "fulldefault: django-admin can't execute user commands unless settings are provided" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "fulldefault: django-admin can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "fulldefault: django-admin can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminMinimalSettings(AdminScriptTestCase): """A series of tests for django-admin.py when using a settings.py file that doesn't contain the test application. """ def setUp(self): super().setUp() self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes']) def test_builtin_command(self): "minimal: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_settings(self): "minimal: django-admin builtin commands fail if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_environment(self): "minimal: django-admin builtin commands fail if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_bad_settings(self): "minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "minimal: django-admin can't execute user commands unless settings are provided" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "minimal: django-admin can't execute user commands, even if settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_environment(self): "minimal: django-admin can't execute user commands, even if settings are provided in environment" args = ['noargs_command'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") class DjangoAdminAlternateSettings(AdminScriptTestCase): """A series of tests for django-admin.py when using a settings file with a name other than 'settings.py'. """ def setUp(self): super().setUp() self.write_settings('alternate_settings.py') def test_builtin_command(self): "alternate: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_settings(self): "alternate: django-admin builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "alternate: django-admin builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.alternate_settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "alternate: django-admin can't execute user commands unless settings are provided" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "alternate: django-admin can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=test_project.alternate_settings'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "alternate: django-admin can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_django_admin(args, 'test_project.alternate_settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminMultipleSettings(AdminScriptTestCase): """A series of tests for django-admin.py when multiple settings files (including the default 'settings.py') are available. The default settings file is insufficient for performing the operations described, so the alternate settings must be used by the running script. """ def setUp(self): super().setUp() self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes']) self.write_settings('alternate_settings.py') def test_builtin_command(self): "alternate: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_settings(self): "alternate: django-admin builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "alternate: django-admin builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.alternate_settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "alternate: django-admin can't execute user commands unless settings are provided" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "alternate: django-admin can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=test_project.alternate_settings'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "alternate: django-admin can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_django_admin(args, 'test_project.alternate_settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminSettingsDirectory(AdminScriptTestCase): """ A series of tests for django-admin.py when the settings file is in a directory. (see #9751). """ def setUp(self): super().setUp() self.write_settings('settings', is_dir=True) def test_setup_environ(self): "directory: startapp creates the correct directory" args = ['startapp', 'settings_test'] app_path = os.path.join(self.test_dir, 'settings_test') out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertTrue(os.path.exists(app_path)) with open(os.path.join(app_path, 'apps.py')) as f: content = f.read() self.assertIn("class SettingsTestConfig(AppConfig)", content) self.assertIn("name = 'settings_test'", content) def test_setup_environ_custom_template(self): "directory: startapp creates the correct directory with a custom template" template_path = os.path.join(custom_templates_dir, 'app_template') args = ['startapp', '--template', template_path, 'custom_settings_test'] app_path = os.path.join(self.test_dir, 'custom_settings_test') out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertTrue(os.path.exists(app_path)) self.assertTrue(os.path.exists(os.path.join(app_path, 'api.py'))) def test_startapp_unicode_name(self): """startapp creates the correct directory with Unicode characters.""" args = ['startapp', 'こんにちは'] app_path = os.path.join(self.test_dir, 'こんにちは') out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertTrue(os.path.exists(app_path)) with open(os.path.join(app_path, 'apps.py'), encoding='utf8') as f: content = f.read() self.assertIn("class こんにちはConfig(AppConfig)", content) self.assertIn("name = 'こんにちは'", content) def test_builtin_command(self): "directory: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_bad_settings(self): "directory: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "directory: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "directory: django-admin can't execute user commands unless settings are provided" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_builtin_with_settings(self): "directory: django-admin builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "directory: django-admin builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) ########################################################################## # MANAGE.PY TESTS # This next series of test classes checks the environment processing # of the generated manage.py script ########################################################################## class ManageManuallyConfiguredSettings(AdminScriptTestCase): """Customized manage.py calling settings.configure().""" def test_non_existent_command_output(self): out, err = self.run_manage(['invalid_command'], manage_py='configured_settings_manage.py') self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'invalid_command'") self.assertNotInOutput(err, 'No Django settings specified') class ManageNoSettings(AdminScriptTestCase): "A series of tests for manage.py when there is no settings.py file." def test_builtin_command(self): "no settings: manage.py builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, r"No module named '?(test_project\.)?settings'?", regex=True) def test_builtin_with_bad_settings(self): "no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) class ManageDefaultSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings.py file that contains the test application. """ def setUp(self): super().setUp() self.write_settings('settings.py') def test_builtin_command(self): "default: manage.py builtin commands succeed when default settings are appropriate" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_settings(self): "default: manage.py builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "default: manage.py builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "default: manage.py builtin commands succeed if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "default: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "default: manage.py can execute user commands when default settings are appropriate" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_settings(self): "default: manage.py can execute user commands when settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "default: manage.py can execute user commands when settings are provided in environment" args = ['noargs_command'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class ManageFullPathDefaultSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings.py file that contains the test application specified using a full path. """ def setUp(self): super().setUp() self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts']) def test_builtin_command(self): "fulldefault: manage.py builtin commands succeed when default settings are appropriate" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_settings(self): "fulldefault: manage.py builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "fulldefault: manage.py builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "fulldefault: manage.py can execute user commands when default settings are appropriate" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_settings(self): "fulldefault: manage.py can execute user commands when settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "fulldefault: manage.py can execute user commands when settings are provided in environment" args = ['noargs_command'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class ManageMinimalSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings.py file that doesn't contain the test application. """ def setUp(self): super().setUp() self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes']) def test_builtin_command(self): "minimal: manage.py builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_settings(self): "minimal: manage.py builtin commands fail if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_environment(self): "minimal: manage.py builtin commands fail if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_bad_settings(self): "minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "minimal: manage.py can't execute user commands without appropriate settings" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "minimal: manage.py can't execute user commands, even if settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_environment(self): "minimal: manage.py can't execute user commands, even if settings are provided in environment" args = ['noargs_command'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") class ManageAlternateSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings file with a name other than 'settings.py'. """ def setUp(self): super().setUp() self.write_settings('alternate_settings.py') def test_builtin_command(self): "alternate: manage.py builtin commands fail with an error when no default settings provided" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, r"No module named '?(test_project\.)?settings'?", regex=True) def test_builtin_with_settings(self): "alternate: manage.py builtin commands work with settings provided as argument" args = ['check', '--settings=alternate_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertOutput(out, SYSTEM_CHECK_MSG) self.assertNoOutput(err) def test_builtin_with_environment(self): "alternate: manage.py builtin commands work if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'alternate_settings') self.assertOutput(out, SYSTEM_CHECK_MSG) self.assertNoOutput(err) def test_builtin_with_bad_settings(self): "alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "alternate: manage.py can't execute user commands without settings" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, r"No module named '?(test_project\.)?settings'?", regex=True) def test_custom_command_with_settings(self): "alternate: manage.py can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=alternate_settings'] out, err = self.run_manage(args) self.assertOutput( out, "EXECUTE: noargs_command options=[('force_color', False), " "('no_color', False), ('pythonpath', None), ('settings', " "'alternate_settings'), ('traceback', False), ('verbosity', 1)]" ) self.assertNoOutput(err) def test_custom_command_with_environment(self): "alternate: manage.py can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_manage(args, 'alternate_settings') self.assertOutput( out, "EXECUTE: noargs_command options=[('force_color', False), " "('no_color', False), ('pythonpath', None), ('settings', None), " "('traceback', False), ('verbosity', 1)]" ) self.assertNoOutput(err) def test_custom_command_output_color(self): "alternate: manage.py output syntax color can be deactivated with the `--no-color` option" args = ['noargs_command', '--no-color', '--settings=alternate_settings'] out, err = self.run_manage(args) self.assertOutput( out, "EXECUTE: noargs_command options=[('force_color', False), " "('no_color', True), ('pythonpath', None), ('settings', " "'alternate_settings'), ('traceback', False), ('verbosity', 1)]" ) self.assertNoOutput(err) class ManageMultipleSettings(AdminScriptTestCase): """A series of tests for manage.py when multiple settings files (including the default 'settings.py') are available. The default settings file is insufficient for performing the operations described, so the alternate settings must be used by the running script. """ def setUp(self): super().setUp() self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes']) self.write_settings('alternate_settings.py') def test_builtin_command(self): "multiple: manage.py builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_settings(self): "multiple: manage.py builtin commands succeed if settings are provided as argument" args = ['check', '--settings=alternate_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "multiple: manage.py can execute builtin commands if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'alternate_settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "multiple: manage.py can't execute user commands using default settings" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "multiple: manage.py can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=alternate_settings'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "multiple: manage.py can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_manage(args, 'alternate_settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class ManageSettingsWithSettingsErrors(AdminScriptTestCase): """ Tests for manage.py when using the default settings.py file containing runtime errors. """ def write_settings_with_import_error(self, filename): settings_file_path = os.path.join(self.test_dir, filename) with open(settings_file_path, 'w') as settings_file: settings_file.write('# Settings file automatically generated by admin_scripts test case\n') settings_file.write('# The next line will cause an import error:\nimport foo42bar\n') def test_import_error(self): """ import error: manage.py builtin commands shows useful diagnostic info when settings with import errors is provided (#14130). """ self.write_settings_with_import_error('settings.py') args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named") self.assertOutput(err, "foo42bar") def test_attribute_error(self): """ manage.py builtin commands does not swallow attribute error due to bad settings (#18845). """ self.write_settings('settings.py', sdict={'BAD_VAR': 'INSTALLED_APPS.crash'}) args = ['collectstatic', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "AttributeError: 'list' object has no attribute 'crash'") def test_key_error(self): self.write_settings('settings.py', sdict={'BAD_VAR': 'DATABASES["blah"]'}) args = ['collectstatic', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "KeyError: 'blah'") def test_help(self): """ Test listing available commands output note when only core commands are available. """ self.write_settings( 'settings.py', extra='from django.core.exceptions import ImproperlyConfigured\n' 'raise ImproperlyConfigured()', ) args = ['help'] out, err = self.run_manage(args) self.assertOutput(out, 'only Django core commands are listed') self.assertNoOutput(err) class ManageCheck(AdminScriptTestCase): def test_nonexistent_app(self): """check reports an error on a nonexistent app in INSTALLED_APPS.""" self.write_settings( 'settings.py', apps=['admin_scriptz.broken_app'], sdict={'USE_I18N': False}, ) args = ['check'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, 'ModuleNotFoundError') self.assertOutput(err, 'No module named') self.assertOutput(err, 'admin_scriptz') def test_broken_app(self): """ manage.py check reports an ImportError if an app's models.py raises one on import """ self.write_settings('settings.py', apps=['admin_scripts.broken_app']) args = ['check'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, 'ImportError') def test_complex_app(self): """ manage.py check does not raise an ImportError validating a complex app with nested calls to load_app """ self.write_settings( 'settings.py', apps=[ 'admin_scripts.complex_app', 'admin_scripts.simple_app', 'django.contrib.admin.apps.SimpleAdminConfig', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.messages', ], sdict={ 'DEBUG': True, 'MIDDLEWARE': [ 'django.contrib.messages.middleware.MessageMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', ], 'TEMPLATES': [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ], } ) args = ['check'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertEqual(out, 'System check identified no issues (0 silenced).\n') def test_app_with_import(self): """ manage.py check does not raise errors when an app imports a base class that itself has an abstract base. """ self.write_settings( 'settings.py', apps=[ 'admin_scripts.app_with_import', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sites', ], sdict={'DEBUG': True}, ) args = ['check'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertEqual(out, 'System check identified no issues (0 silenced).\n') def test_output_format(self): """ All errors/warnings should be sorted by level and by message. """ self.write_settings( 'settings.py', apps=[ 'admin_scripts.app_raising_messages', 'django.contrib.auth', 'django.contrib.contenttypes', ], sdict={'DEBUG': True}, ) args = ['check'] out, err = self.run_manage(args) expected_err = ( "SystemCheckError: System check identified some issues:\n" "\n" "ERRORS:\n" "?: An error\n" "\tHINT: Error hint\n" "\n" "WARNINGS:\n" "a: Second warning\n" "obj: First warning\n" "\tHINT: Hint\n" "\n" "System check identified 3 issues (0 silenced).\n" ) self.assertEqual(err, expected_err) self.assertNoOutput(out) def test_warning_does_not_halt(self): """ When there are only warnings or less serious messages, then Django should not prevent user from launching their project, so `check` command should not raise `CommandError` exception. In this test we also test output format. """ self.write_settings( 'settings.py', apps=[ 'admin_scripts.app_raising_warning', 'django.contrib.auth', 'django.contrib.contenttypes', ], sdict={'DEBUG': True}, ) args = ['check'] out, err = self.run_manage(args) expected_err = ( "System check identified some issues:\n" # No "CommandError: " part "\n" "WARNINGS:\n" "?: A warning\n" "\n" "System check identified 1 issue (0 silenced).\n" ) self.assertEqual(err, expected_err) self.assertNoOutput(out) class ManageRunserver(SimpleTestCase): def setUp(self): def monkey_run(*args, **options): return self.output = StringIO() self.cmd = RunserverCommand(stdout=self.output) self.cmd.run = monkey_run def assertServerSettings(self, addr, port, ipv6=False, raw_ipv6=False): self.assertEqual(self.cmd.addr, addr) self.assertEqual(self.cmd.port, port) self.assertEqual(self.cmd.use_ipv6, ipv6) self.assertEqual(self.cmd._raw_ipv6, raw_ipv6) def test_runserver_addrport(self): call_command(self.cmd) self.assertServerSettings('127.0.0.1', '8000') call_command(self.cmd, addrport="1.2.3.4:8000") self.assertServerSettings('1.2.3.4', '8000') call_command(self.cmd, addrport="7000") self.assertServerSettings('127.0.0.1', '7000') @unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6") def test_runner_addrport_ipv6(self): call_command(self.cmd, addrport="", use_ipv6=True) self.assertServerSettings('::1', '8000', ipv6=True, raw_ipv6=True) call_command(self.cmd, addrport="7000", use_ipv6=True) self.assertServerSettings('::1', '7000', ipv6=True, raw_ipv6=True) call_command(self.cmd, addrport="[2001:0db8:1234:5678::9]:7000") self.assertServerSettings('2001:0db8:1234:5678::9', '7000', ipv6=True, raw_ipv6=True) def test_runner_hostname(self): call_command(self.cmd, addrport="localhost:8000") self.assertServerSettings('localhost', '8000') call_command(self.cmd, addrport="test.domain.local:7000") self.assertServerSettings('test.domain.local', '7000') @unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6") def test_runner_hostname_ipv6(self): call_command(self.cmd, addrport="test.domain.local:7000", use_ipv6=True) self.assertServerSettings('test.domain.local', '7000', ipv6=True) def test_runner_custom_defaults(self): self.cmd.default_addr = '0.0.0.0' self.cmd.default_port = '5000' call_command(self.cmd) self.assertServerSettings('0.0.0.0', '5000') @unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6") def test_runner_custom_defaults_ipv6(self): self.cmd.default_addr_ipv6 = '::' call_command(self.cmd, use_ipv6=True) self.assertServerSettings('::', '8000', ipv6=True, raw_ipv6=True) def test_runner_ambiguous(self): # Only 4 characters, all of which could be in an ipv6 address call_command(self.cmd, addrport="beef:7654") self.assertServerSettings('beef', '7654') # Uses only characters that could be in an ipv6 address call_command(self.cmd, addrport="deadbeef:7654") self.assertServerSettings('deadbeef', '7654') def test_no_database(self): """ Ensure runserver.check_migrations doesn't choke on empty DATABASES. """ tested_connections = ConnectionHandler({}) with mock.patch('django.core.management.base.connections', new=tested_connections): self.cmd.check_migrations() def test_readonly_database(self): """ runserver.check_migrations() doesn't choke when a database is read-only. """ with mock.patch.object(MigrationRecorder, 'has_table', return_value=False): self.cmd.check_migrations() # You have # ... self.assertIn('unapplied migration(s)', self.output.getvalue()) class ManageRunserverMigrationWarning(TestCase): def setUp(self): self.stdout = StringIO() self.runserver_command = RunserverCommand(stdout=self.stdout) @override_settings(INSTALLED_APPS=["admin_scripts.app_waiting_migration"]) def test_migration_warning_one_app(self): self.runserver_command.check_migrations() output = self.stdout.getvalue() self.assertIn('You have 1 unapplied migration(s)', output) self.assertIn('apply the migrations for app(s): app_waiting_migration.', output) @override_settings( INSTALLED_APPS=[ "admin_scripts.app_waiting_migration", "admin_scripts.another_app_waiting_migration", ], ) def test_migration_warning_multiple_apps(self): self.runserver_command.check_migrations() output = self.stdout.getvalue() self.assertIn('You have 2 unapplied migration(s)', output) self.assertIn( 'apply the migrations for app(s): another_app_waiting_migration, ' 'app_waiting_migration.', output ) class ManageRunserverEmptyAllowedHosts(AdminScriptTestCase): def setUp(self): super().setUp() self.write_settings('settings.py', sdict={ 'ALLOWED_HOSTS': [], 'DEBUG': False, }) def test_empty_allowed_hosts_error(self): out, err = self.run_manage(['runserver']) self.assertNoOutput(out) self.assertOutput(err, 'CommandError: You must set settings.ALLOWED_HOSTS if DEBUG is False.') class ManageTestserver(SimpleTestCase): @mock.patch.object(TestserverCommand, 'handle', return_value='') def test_testserver_handle_params(self, mock_handle): out = StringIO() call_command('testserver', 'blah.json', stdout=out) mock_handle.assert_called_with( 'blah.json', stdout=out, settings=None, pythonpath=None, verbosity=1, traceback=False, addrport='', no_color=False, use_ipv6=False, skip_checks=True, interactive=True, force_color=False, ) @mock.patch('django.db.connection.creation.create_test_db', return_value='test_db') @mock.patch.object(LoaddataCommand, 'handle', return_value='') @mock.patch.object(RunserverCommand, 'handle', return_value='') def test_params_to_runserver(self, mock_runserver_handle, mock_loaddata_handle, mock_create_test_db): call_command('testserver', 'blah.json') mock_runserver_handle.assert_called_with( addrport='', force_color=False, insecure_serving=False, no_color=False, pythonpath=None, settings=None, shutdown_message=( "\nServer stopped.\nNote that the test database, 'test_db', " "has not been deleted. You can explore it on your own." ), skip_checks=True, traceback=False, use_ipv6=False, use_reloader=False, use_static_handler=True, use_threading=connection.features.test_db_allows_multiple_connections, verbosity=1, ) ########################################################################## # COMMAND PROCESSING TESTS # user-space commands are correctly handled - in particular, arguments to # the commands are correctly parsed and processed. ########################################################################## class ColorCommand(BaseCommand): requires_system_checks = [] def handle(self, *args, **options): self.stdout.write('Hello, world!', self.style.ERROR) self.stderr.write('Hello, world!', self.style.ERROR) class CommandTypes(AdminScriptTestCase): "Tests for the various types of base command types that can be defined." def setUp(self): super().setUp() self.write_settings('settings.py') def test_version(self): "version is handled as a special case" args = ['version'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, get_version()) def test_version_alternative(self): "--version is equivalent to version" args1, args2 = ['version'], ['--version'] # It's possible one outputs on stderr and the other on stdout, hence the set self.assertEqual(set(self.run_manage(args1)), set(self.run_manage(args2))) def test_help(self): "help is handled as a special case" args = ['help'] out, err = self.run_manage(args) self.assertOutput(out, "Type 'manage.py help <subcommand>' for help on a specific subcommand.") self.assertOutput(out, '[django]') self.assertOutput(out, 'startapp') self.assertOutput(out, 'startproject') def test_help_commands(self): "help --commands shows the list of all available commands" args = ['help', '--commands'] out, err = self.run_manage(args) self.assertNotInOutput(out, 'usage:') self.assertNotInOutput(out, 'Options:') self.assertNotInOutput(out, '[django]') self.assertOutput(out, 'startapp') self.assertOutput(out, 'startproject') self.assertNotInOutput(out, '\n\n') def test_help_alternative(self): "--help is equivalent to help" args1, args2 = ['help'], ['--help'] self.assertEqual(self.run_manage(args1), self.run_manage(args2)) def test_help_short_altert(self): "-h is handled as a short form of --help" args1, args2 = ['--help'], ['-h'] self.assertEqual(self.run_manage(args1), self.run_manage(args2)) def test_specific_help(self): "--help can be used on a specific command" args = ['check', '--help'] out, err = self.run_manage(args) self.assertNoOutput(err) # Command-specific options like --tag appear before options common to # all commands like --version. tag_location = out.find('--tag') version_location = out.find('--version') self.assertNotEqual(tag_location, -1) self.assertNotEqual(version_location, -1) self.assertLess(tag_location, version_location) self.assertOutput(out, "Checks the entire Django project for potential problems.") def test_color_style(self): style = color.no_style() self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!') style = color.make_style('nocolor') self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!') style = color.make_style('dark') self.assertIn('Hello, world!', style.ERROR('Hello, world!')) self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!') # Default palette has color. style = color.make_style('') self.assertIn('Hello, world!', style.ERROR('Hello, world!')) self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!') def test_command_color(self): out = StringIO() err = StringIO() command = ColorCommand(stdout=out, stderr=err) call_command(command) if color.supports_color(): self.assertIn('Hello, world!\n', out.getvalue()) self.assertIn('Hello, world!\n', err.getvalue()) self.assertNotEqual(out.getvalue(), 'Hello, world!\n') self.assertNotEqual(err.getvalue(), 'Hello, world!\n') else: self.assertEqual(out.getvalue(), 'Hello, world!\n') self.assertEqual(err.getvalue(), 'Hello, world!\n') def test_command_no_color(self): "--no-color prevent colorization of the output" out = StringIO() err = StringIO() command = ColorCommand(stdout=out, stderr=err, no_color=True) call_command(command) self.assertEqual(out.getvalue(), 'Hello, world!\n') self.assertEqual(err.getvalue(), 'Hello, world!\n') out = StringIO() err = StringIO() command = ColorCommand(stdout=out, stderr=err) call_command(command, no_color=True) self.assertEqual(out.getvalue(), 'Hello, world!\n') self.assertEqual(err.getvalue(), 'Hello, world!\n') def test_force_color_execute(self): out = StringIO() err = StringIO() with mock.patch.object(sys.stdout, 'isatty', lambda: False): command = ColorCommand(stdout=out, stderr=err) call_command(command, force_color=True) self.assertEqual(out.getvalue(), '\x1b[31;1mHello, world!\n\x1b[0m') self.assertEqual(err.getvalue(), '\x1b[31;1mHello, world!\n\x1b[0m') def test_force_color_command_init(self): out = StringIO() err = StringIO() with mock.patch.object(sys.stdout, 'isatty', lambda: False): command = ColorCommand(stdout=out, stderr=err, force_color=True) call_command(command) self.assertEqual(out.getvalue(), '\x1b[31;1mHello, world!\n\x1b[0m') self.assertEqual(err.getvalue(), '\x1b[31;1mHello, world!\n\x1b[0m') def test_no_color_force_color_mutually_exclusive_execute(self): msg = "The --no-color and --force-color options can't be used together." with self.assertRaisesMessage(CommandError, msg): call_command(BaseCommand(), no_color=True, force_color=True) def test_no_color_force_color_mutually_exclusive_command_init(self): msg = "'no_color' and 'force_color' can't be used together." with self.assertRaisesMessage(CommandError, msg): call_command(BaseCommand(no_color=True, force_color=True)) def test_custom_stdout(self): class Command(BaseCommand): requires_system_checks = [] def handle(self, *args, **options): self.stdout.write("Hello, World!") out = StringIO() command = Command(stdout=out) call_command(command) self.assertEqual(out.getvalue(), "Hello, World!\n") out.truncate(0) new_out = StringIO() call_command(command, stdout=new_out) self.assertEqual(out.getvalue(), "") self.assertEqual(new_out.getvalue(), "Hello, World!\n") def test_custom_stderr(self): class Command(BaseCommand): requires_system_checks = [] def handle(self, *args, **options): self.stderr.write("Hello, World!") err = StringIO() command = Command(stderr=err) call_command(command) self.assertEqual(err.getvalue(), "Hello, World!\n") err.truncate(0) new_err = StringIO() call_command(command, stderr=new_err) self.assertEqual(err.getvalue(), "") self.assertEqual(new_err.getvalue(), "Hello, World!\n") def test_base_command(self): "User BaseCommands can execute when a label is provided" args = ['base_command', 'testlabel'] expected_labels = "('testlabel',)" self._test_base_command(args, expected_labels) def test_base_command_no_label(self): "User BaseCommands can execute when no labels are provided" args = ['base_command'] expected_labels = "()" self._test_base_command(args, expected_labels) def test_base_command_multiple_label(self): "User BaseCommands can execute when no labels are provided" args = ['base_command', 'testlabel', 'anotherlabel'] expected_labels = "('testlabel', 'anotherlabel')" self._test_base_command(args, expected_labels) def test_base_command_with_option(self): "User BaseCommands can execute with options when a label is provided" args = ['base_command', 'testlabel', '--option_a=x'] expected_labels = "('testlabel',)" self._test_base_command(args, expected_labels, option_a="'x'") def test_base_command_with_options(self): "User BaseCommands can execute with multiple options when a label is provided" args = ['base_command', 'testlabel', '-a', 'x', '--option_b=y'] expected_labels = "('testlabel',)" self._test_base_command(args, expected_labels, option_a="'x'", option_b="'y'") def test_base_command_with_wrong_option(self): "User BaseCommands outputs command usage when wrong option is specified" args = ['base_command', '--invalid'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "usage: manage.py base_command") self.assertOutput(err, "error: unrecognized arguments: --invalid") def _test_base_command(self, args, labels, option_a="'1'", option_b="'2'"): out, err = self.run_manage(args) expected_out = ( "EXECUTE:BaseCommand labels=%s, " "options=[('force_color', False), ('no_color', False), " "('option_a', %s), ('option_b', %s), ('option_c', '3'), " "('pythonpath', None), ('settings', None), ('traceback', False), " "('verbosity', 1)]") % (labels, option_a, option_b) self.assertNoOutput(err) self.assertOutput(out, expected_out) def test_base_run_from_argv(self): """ Test run_from_argv properly terminates even with custom execute() (#19665) Also test proper traceback display. """ err = StringIO() command = BaseCommand(stderr=err) def raise_command_error(*args, **kwargs): raise CommandError("Custom error") command.execute = lambda args: args # This will trigger TypeError # If the Exception is not CommandError it should always # raise the original exception. with self.assertRaises(TypeError): command.run_from_argv(['', '']) # If the Exception is CommandError and --traceback is not present # this command should raise a SystemExit and don't print any # traceback to the stderr. command.execute = raise_command_error err.truncate(0) with self.assertRaises(SystemExit): command.run_from_argv(['', '']) err_message = err.getvalue() self.assertNotIn("Traceback", err_message) self.assertIn("CommandError", err_message) # If the Exception is CommandError and --traceback is present # this command should raise the original CommandError as if it # were not a CommandError. err.truncate(0) with self.assertRaises(CommandError): command.run_from_argv(['', '', '--traceback']) def test_run_from_argv_non_ascii_error(self): """ Non-ASCII message of CommandError does not raise any UnicodeDecodeError in run_from_argv. """ def raise_command_error(*args, **kwargs): raise CommandError("Erreur personnalisée") command = BaseCommand(stderr=StringIO()) command.execute = raise_command_error with self.assertRaises(SystemExit): command.run_from_argv(['', '']) def test_run_from_argv_closes_connections(self): """ A command called from the command line should close connections after being executed (#21255). """ command = BaseCommand() command.check = lambda: [] command.handle = lambda *args, **kwargs: args with mock.patch('django.core.management.base.connections') as mock_connections: command.run_from_argv(['', '']) # Test connections have been closed self.assertTrue(mock_connections.close_all.called) def test_noargs(self): "NoArg Commands can be executed" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE: noargs_command options=[('force_color', False), " "('no_color', False), ('pythonpath', None), ('settings', None), " "('traceback', False), ('verbosity', 1)]" ) def test_noargs_with_args(self): "NoArg Commands raise an error if an argument is provided" args = ['noargs_command', 'argument'] out, err = self.run_manage(args) self.assertOutput(err, "error: unrecognized arguments: argument") def test_app_command(self): "User AppCommands can execute when a single app name is provided" args = ['app_command', 'auth'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=") self.assertOutput( out, ", options=[('force_color', False), ('no_color', False), " "('pythonpath', None), ('settings', None), ('traceback', False), " "('verbosity', 1)]" ) def test_app_command_no_apps(self): "User AppCommands raise an error when no app name is provided" args = ['app_command'] out, err = self.run_manage(args) self.assertOutput(err, 'error: Enter at least one application label.') def test_app_command_multiple_apps(self): "User AppCommands raise an error when multiple app names are provided" args = ['app_command', 'auth', 'contenttypes'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=") self.assertOutput( out, ", options=[('force_color', False), ('no_color', False), " "('pythonpath', None), ('settings', None), ('traceback', False), " "('verbosity', 1)]" ) self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.contenttypes, options=") self.assertOutput( out, ", options=[('force_color', False), ('no_color', False), " "('pythonpath', None), ('settings', None), ('traceback', False), " "('verbosity', 1)]" ) def test_app_command_invalid_app_label(self): "User AppCommands can execute when a single app name is provided" args = ['app_command', 'NOT_AN_APP'] out, err = self.run_manage(args) self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.") def test_app_command_some_invalid_app_labels(self): "User AppCommands can execute when some of the provided app names are invalid" args = ['app_command', 'auth', 'NOT_AN_APP'] out, err = self.run_manage(args) self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.") def test_label_command(self): "User LabelCommands can execute when a label is provided" args = ['label_command', 'testlabel'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE:LabelCommand label=testlabel, options=[('force_color', " "False), ('no_color', False), ('pythonpath', None), ('settings', " "None), ('traceback', False), ('verbosity', 1)]" ) def test_label_command_no_label(self): "User LabelCommands raise an error if no label is provided" args = ['label_command'] out, err = self.run_manage(args) self.assertOutput(err, 'Enter at least one label') def test_label_command_multiple_label(self): "User LabelCommands are executed multiple times if multiple labels are provided" args = ['label_command', 'testlabel', 'anotherlabel'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE:LabelCommand label=testlabel, options=[('force_color', " "False), ('no_color', False), ('pythonpath', None), " "('settings', None), ('traceback', False), ('verbosity', 1)]" ) self.assertOutput( out, "EXECUTE:LabelCommand label=anotherlabel, options=[('force_color', " "False), ('no_color', False), ('pythonpath', None), " "('settings', None), ('traceback', False), ('verbosity', 1)]" ) class Discovery(SimpleTestCase): def test_precedence(self): """ Apps listed first in INSTALLED_APPS have precedence. """ with self.settings(INSTALLED_APPS=['admin_scripts.complex_app', 'admin_scripts.simple_app', 'django.contrib.auth', 'django.contrib.contenttypes']): out = StringIO() call_command('duplicate', stdout=out) self.assertEqual(out.getvalue().strip(), 'complex_app') with self.settings(INSTALLED_APPS=['admin_scripts.simple_app', 'admin_scripts.complex_app', 'django.contrib.auth', 'django.contrib.contenttypes']): out = StringIO() call_command('duplicate', stdout=out) self.assertEqual(out.getvalue().strip(), 'simple_app') class ArgumentOrder(AdminScriptTestCase): """Tests for 2-stage argument parsing scheme. django-admin command arguments are parsed in 2 parts; the core arguments (--settings, --traceback and --pythonpath) are parsed using a basic parser, ignoring any unknown options. Then the full settings are passed to the command parser, which extracts commands of interest to the individual command. """ def setUp(self): super().setUp() self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes']) self.write_settings('alternate_settings.py') def test_setting_then_option(self): """ Options passed after settings are correctly handled. """ args = ['base_command', 'testlabel', '--settings=alternate_settings', '--option_a=x'] self._test(args) def test_setting_then_short_option(self): """ Short options passed after settings are correctly handled. """ args = ['base_command', 'testlabel', '--settings=alternate_settings', '-a', 'x'] self._test(args) def test_option_then_setting(self): """ Options passed before settings are correctly handled. """ args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings'] self._test(args) def test_short_option_then_setting(self): """ Short options passed before settings are correctly handled. """ args = ['base_command', 'testlabel', '-a', 'x', '--settings=alternate_settings'] self._test(args) def test_option_then_setting_then_option(self): """ Options are correctly handled when they are passed before and after a setting. """ args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings', '--option_b=y'] self._test(args, option_b="'y'") def _test(self, args, option_b="'2'"): out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE:BaseCommand labels=('testlabel',), options=[" "('force_color', False), ('no_color', False), ('option_a', 'x'), " "('option_b', %s), ('option_c', '3'), ('pythonpath', None), " "('settings', 'alternate_settings'), ('traceback', False), " "('verbosity', 1)]" % option_b ) @override_settings(ROOT_URLCONF='admin_scripts.urls') class StartProject(LiveServerTestCase, AdminScriptTestCase): available_apps = [ 'admin_scripts', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', ] def test_wrong_args(self): "Make sure passing the wrong kinds of arguments outputs an error and prints usage" out, err = self.run_django_admin(['startproject']) self.assertNoOutput(out) self.assertOutput(err, "usage:") self.assertOutput(err, "You must provide a project name.") def test_simple_project(self): "Make sure the startproject management command creates a project" args = ['startproject', 'testproject'] testproject_dir = os.path.join(self.test_dir, 'testproject') out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) # running again.. out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput( err, "CommandError: 'testproject' conflicts with the name of an " "existing Python module and cannot be used as a project name. " "Please try another name.", ) def test_invalid_project_name(self): "Make sure the startproject management command validates a project name" for bad_name in ('7testproject', '../testproject'): with self.subTest(project_name=bad_name): args = ['startproject', bad_name] testproject_dir = os.path.join(self.test_dir, bad_name) out, err = self.run_django_admin(args) self.assertOutput( err, "Error: '%s' is not a valid project name. Please make " "sure the name is a valid identifier." % bad_name ) self.assertFalse(os.path.exists(testproject_dir)) def test_importable_project_name(self): """ startproject validates that project name doesn't clash with existing Python modules. """ bad_name = 'os' args = ['startproject', bad_name] testproject_dir = os.path.join(self.test_dir, bad_name) out, err = self.run_django_admin(args) self.assertOutput( err, "CommandError: 'os' conflicts with the name of an existing " "Python module and cannot be used as a project name. Please try " "another name." ) self.assertFalse(os.path.exists(testproject_dir)) def test_simple_project_different_directory(self): "Make sure the startproject management command creates a project in a specific directory" args = ['startproject', 'testproject', 'othertestproject'] testproject_dir = os.path.join(self.test_dir, 'othertestproject') os.mkdir(testproject_dir) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'manage.py'))) # running again.. out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput( err, "already exists. Overlaying a project into an existing directory " "won't replace conflicting files." ) def test_custom_project_template(self): "Make sure the startproject management command is able to use a different project template" template_path = os.path.join(custom_templates_dir, 'project_template') args = ['startproject', '--template', template_path, 'customtestproject'] testproject_dir = os.path.join(self.test_dir, 'customtestproject') out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir'))) def test_template_dir_with_trailing_slash(self): "Ticket 17475: Template dir passed has a trailing path separator" template_path = os.path.join(custom_templates_dir, 'project_template' + os.sep) args = ['startproject', '--template', template_path, 'customtestproject'] testproject_dir = os.path.join(self.test_dir, 'customtestproject') out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir'))) def test_custom_project_template_from_tarball_by_path(self): "Make sure the startproject management command is able to use a different project template from a tarball" template_path = os.path.join(custom_templates_dir, 'project_template.tgz') args = ['startproject', '--template', template_path, 'tarballtestproject'] testproject_dir = os.path.join(self.test_dir, 'tarballtestproject') out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py'))) def test_custom_project_template_from_tarball_to_alternative_location(self): "Startproject can use a project template from a tarball and create it in a specified location" template_path = os.path.join(custom_templates_dir, 'project_template.tgz') args = ['startproject', '--template', template_path, 'tarballtestproject', 'altlocation'] testproject_dir = os.path.join(self.test_dir, 'altlocation') os.mkdir(testproject_dir) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py'))) def test_custom_project_template_from_tarball_by_url(self): """ The startproject management command is able to use a different project template from a tarball via a URL. """ template_url = '%s/custom_templates/project_template.tgz' % self.live_server_url args = ['startproject', '--template', template_url, 'urltestproject'] testproject_dir = os.path.join(self.test_dir, 'urltestproject') out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py'))) def test_project_template_tarball_url(self): "Startproject management command handles project template tar/zip balls from non-canonical urls" template_url = '%s/custom_templates/project_template.tgz/' % self.live_server_url args = ['startproject', '--template', template_url, 'urltestproject'] testproject_dir = os.path.join(self.test_dir, 'urltestproject') out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py'))) def test_file_without_extension(self): "Make sure the startproject management command is able to render custom files" template_path = os.path.join(custom_templates_dir, 'project_template') args = ['startproject', '--template', template_path, 'customtestproject', '-e', 'txt', '-n', 'Procfile'] testproject_dir = os.path.join(self.test_dir, 'customtestproject') out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir'))) base_path = os.path.join(testproject_dir, 'additional_dir') for f in ('Procfile', 'additional_file.py', 'requirements.txt'): self.assertTrue(os.path.exists(os.path.join(base_path, f))) with open(os.path.join(base_path, f)) as fh: self.assertEqual(fh.read().strip(), '# some file for customtestproject test project') def test_custom_project_template_context_variables(self): "Make sure template context variables are rendered with proper values" template_path = os.path.join(custom_templates_dir, 'project_template') args = ['startproject', '--template', template_path, 'another_project', 'project_dir'] testproject_dir = os.path.join(self.test_dir, 'project_dir') os.mkdir(testproject_dir) out, err = self.run_django_admin(args) self.assertNoOutput(err) test_manage_py = os.path.join(testproject_dir, 'manage.py') with open(test_manage_py) as fp: content = fp.read() self.assertIn("project_name = 'another_project'", content) self.assertIn("project_directory = '%s'" % testproject_dir, content) def test_no_escaping_of_project_variables(self): "Make sure template context variables are not html escaped" # We're using a custom command so we need the alternate settings self.write_settings('alternate_settings.py') template_path = os.path.join(custom_templates_dir, 'project_template') args = [ 'custom_startproject', '--template', template_path, 'another_project', 'project_dir', '--extra', '<&>', '--settings=alternate_settings', ] testproject_dir = os.path.join(self.test_dir, 'project_dir') os.mkdir(testproject_dir) out, err = self.run_manage(args) self.assertNoOutput(err) test_manage_py = os.path.join(testproject_dir, 'additional_dir', 'extra.py') with open(test_manage_py) as fp: content = fp.read() self.assertIn("<&>", content) def test_custom_project_destination_missing(self): """ Make sure an exception is raised when the provided destination directory doesn't exist """ template_path = os.path.join(custom_templates_dir, 'project_template') args = ['startproject', '--template', template_path, 'yet_another_project', 'project_dir2'] testproject_dir = os.path.join(self.test_dir, 'project_dir2') out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "Destination directory '%s' does not exist, please create it first." % testproject_dir) self.assertFalse(os.path.exists(testproject_dir)) def test_custom_project_template_with_non_ascii_templates(self): """ The startproject management command is able to render templates with non-ASCII content. """ template_path = os.path.join(custom_templates_dir, 'project_template') args = ['startproject', '--template', template_path, '--extension=txt', 'customtestproject'] testproject_dir = os.path.join(self.test_dir, 'customtestproject') out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) path = os.path.join(testproject_dir, 'ticket-18091-non-ascii-template.txt') with open(path, encoding='utf-8') as f: self.assertEqual(f.read().splitlines(False), [ 'Some non-ASCII text for testing ticket #18091:', 'üäö €']) class StartApp(AdminScriptTestCase): def test_invalid_name(self): """startapp validates that app name is a valid Python identifier.""" for bad_name in ('7testproject', '../testproject'): with self.subTest(app_name=bad_name): args = ['startapp', bad_name] testproject_dir = os.path.join(self.test_dir, bad_name) out, err = self.run_django_admin(args) self.assertOutput( err, "CommandError: '{}' is not a valid app name. Please make " "sure the name is a valid identifier.".format(bad_name) ) self.assertFalse(os.path.exists(testproject_dir)) def test_importable_name(self): """ startapp validates that app name doesn't clash with existing Python modules. """ bad_name = 'os' args = ['startapp', bad_name] testproject_dir = os.path.join(self.test_dir, bad_name) out, err = self.run_django_admin(args) self.assertOutput( err, "CommandError: 'os' conflicts with the name of an existing " "Python module and cannot be used as an app name. Please try " "another name." ) self.assertFalse(os.path.exists(testproject_dir)) def test_invalid_target_name(self): for bad_target in ('invalid.dir_name', '7invalid_dir_name', '.invalid_dir_name'): with self.subTest(bad_target): _, err = self.run_django_admin(['startapp', 'app', bad_target]) self.assertOutput( err, "CommandError: '%s' is not a valid app directory. Please " "make sure the directory is a valid identifier." % bad_target ) def test_importable_target_name(self): _, err = self.run_django_admin(['startapp', 'app', 'os']) self.assertOutput( err, "CommandError: 'os' conflicts with the name of an existing Python " "module and cannot be used as an app directory. Please try " "another directory." ) def test_overlaying_app(self): # Use a subdirectory so it is outside the PYTHONPATH. os.makedirs(os.path.join(self.test_dir, 'apps/app1')) self.run_django_admin(['startapp', 'app1', 'apps/app1']) out, err = self.run_django_admin(['startapp', 'app2', 'apps/app1']) self.assertOutput( err, "already exists. Overlaying an app into an existing directory " "won't replace conflicting files." ) class DiffSettings(AdminScriptTestCase): """Tests for diffsettings management command.""" def test_basic(self): """Runs without error and emits settings diff.""" self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'}) args = ['diffsettings', '--settings=settings_to_diff'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "FOO = 'bar' ###") # Attributes from django.conf.Settings don't appear. self.assertNotInOutput(out, 'is_overridden = ') def test_settings_configured(self): out, err = self.run_manage(['diffsettings'], manage_py='configured_settings_manage.py') self.assertNoOutput(err) self.assertOutput(out, 'CUSTOM = 1 ###\nDEBUG = True') # Attributes from django.conf.UserSettingsHolder don't appear. self.assertNotInOutput(out, 'default_settings = ') def test_dynamic_settings_configured(self): # Custom default settings appear. out, err = self.run_manage(['diffsettings'], manage_py='configured_dynamic_settings_manage.py') self.assertNoOutput(err) self.assertOutput(out, "FOO = 'bar' ###") def test_all(self): """The all option also shows settings with the default value.""" self.write_settings('settings_to_diff.py', sdict={'STATIC_URL': 'None'}) args = ['diffsettings', '--settings=settings_to_diff', '--all'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "### STATIC_URL = None") def test_custom_default(self): """ The --default option specifies an alternate settings module for comparison. """ self.write_settings('settings_default.py', sdict={'FOO': '"foo"', 'BAR': '"bar1"'}) self.write_settings('settings_to_diff.py', sdict={'FOO': '"foo"', 'BAR': '"bar2"'}) out, err = self.run_manage(['diffsettings', '--settings=settings_to_diff', '--default=settings_default']) self.assertNoOutput(err) self.assertNotInOutput(out, "FOO") self.assertOutput(out, "BAR = 'bar2'") def test_unified(self): """--output=unified emits settings diff in unified mode.""" self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'}) args = ['diffsettings', '--settings=settings_to_diff', '--output=unified'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "+ FOO = 'bar'") self.assertOutput(out, "- SECRET_KEY = ''") self.assertOutput(out, "+ SECRET_KEY = 'django_tests_secret_key'") self.assertNotInOutput(out, " APPEND_SLASH = True") def test_unified_all(self): """ --output=unified --all emits settings diff in unified mode and includes settings with the default value. """ self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'}) args = ['diffsettings', '--settings=settings_to_diff', '--output=unified', '--all'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, " APPEND_SLASH = True") self.assertOutput(out, "+ FOO = 'bar'") self.assertOutput(out, "- SECRET_KEY = ''") class Dumpdata(AdminScriptTestCase): """Tests for dumpdata management command.""" def setUp(self): super().setUp() self.write_settings('settings.py') def test_pks_parsing(self): """Regression for #20509 Test would raise an exception rather than printing an error message. """ args = ['dumpdata', '--pks=1'] out, err = self.run_manage(args) self.assertOutput(err, "You can only use --pks option with one model") self.assertNoOutput(out) class MainModule(AdminScriptTestCase): """python -m django works like django-admin.""" def test_program_name_in_help(self): out, err = self.run_test(['-m', 'django', 'help']) self.assertOutput(out, "Type 'python -m django help <subcommand>' for help on a specific subcommand.") class DjangoAdminSuggestions(AdminScriptTestCase): def setUp(self): super().setUp() self.write_settings('settings.py') def test_suggestions(self): args = ['rnserver', '--settings=test_project.settings'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'rnserver'. Did you mean runserver?") def test_no_suggestions(self): args = ['abcdef', '--settings=test_project.settings'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertNotInOutput(err, 'Did you mean')
5ae8876bcf0251aa437b4790cd5406e634c7cb9faf40c715abc1b5d7565c846f
from math import ceil from operator import attrgetter from django.db import IntegrityError, NotSupportedError, connection from django.db.models import FileField, Value from django.db.models.functions import Lower from django.test import ( TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature, ) from .models import ( BigAutoFieldModel, Country, NoFields, NullableFields, Pizzeria, ProxyCountry, ProxyMultiCountry, ProxyMultiProxyCountry, ProxyProxyCountry, Restaurant, SmallAutoFieldModel, State, TwoFields, ) class BulkCreateTests(TestCase): def setUp(self): self.data = [ Country(name="United States of America", iso_two_letter="US"), Country(name="The Netherlands", iso_two_letter="NL"), Country(name="Germany", iso_two_letter="DE"), Country(name="Czech Republic", iso_two_letter="CZ") ] def test_simple(self): created = Country.objects.bulk_create(self.data) self.assertEqual(created, self.data) self.assertQuerysetEqual(Country.objects.order_by("-name"), [ "United States of America", "The Netherlands", "Germany", "Czech Republic" ], attrgetter("name")) created = Country.objects.bulk_create([]) self.assertEqual(created, []) self.assertEqual(Country.objects.count(), 4) @skipUnlessDBFeature('has_bulk_insert') def test_efficiency(self): with self.assertNumQueries(1): Country.objects.bulk_create(self.data) @skipUnlessDBFeature('has_bulk_insert') def test_long_non_ascii_text(self): """ Inserting non-ASCII values with a length in the range 2001 to 4000 characters, i.e. 4002 to 8000 bytes, must be set as a CLOB on Oracle (#22144). """ Country.objects.bulk_create([Country(description='Ж' * 3000)]) self.assertEqual(Country.objects.count(), 1) @skipUnlessDBFeature('has_bulk_insert') def test_long_and_short_text(self): Country.objects.bulk_create([ Country(description='a' * 4001), Country(description='a'), Country(description='Ж' * 2001), Country(description='Ж'), ]) self.assertEqual(Country.objects.count(), 4) def test_multi_table_inheritance_unsupported(self): expected_message = "Can't bulk create a multi-table inherited model" with self.assertRaisesMessage(ValueError, expected_message): Pizzeria.objects.bulk_create([ Pizzeria(name="The Art of Pizza"), ]) with self.assertRaisesMessage(ValueError, expected_message): ProxyMultiCountry.objects.bulk_create([ ProxyMultiCountry(name="Fillory", iso_two_letter="FL"), ]) with self.assertRaisesMessage(ValueError, expected_message): ProxyMultiProxyCountry.objects.bulk_create([ ProxyMultiProxyCountry(name="Fillory", iso_two_letter="FL"), ]) def test_proxy_inheritance_supported(self): ProxyCountry.objects.bulk_create([ ProxyCountry(name="Qwghlm", iso_two_letter="QW"), Country(name="Tortall", iso_two_letter="TA"), ]) self.assertQuerysetEqual(ProxyCountry.objects.all(), { "Qwghlm", "Tortall" }, attrgetter("name"), ordered=False) ProxyProxyCountry.objects.bulk_create([ ProxyProxyCountry(name="Netherlands", iso_two_letter="NT"), ]) self.assertQuerysetEqual(ProxyProxyCountry.objects.all(), { "Qwghlm", "Tortall", "Netherlands", }, attrgetter("name"), ordered=False) def test_non_auto_increment_pk(self): State.objects.bulk_create([ State(two_letter_code=s) for s in ["IL", "NY", "CA", "ME"] ]) self.assertQuerysetEqual(State.objects.order_by("two_letter_code"), [ "CA", "IL", "ME", "NY", ], attrgetter("two_letter_code")) @skipUnlessDBFeature('has_bulk_insert') def test_non_auto_increment_pk_efficiency(self): with self.assertNumQueries(1): State.objects.bulk_create([ State(two_letter_code=s) for s in ["IL", "NY", "CA", "ME"] ]) self.assertQuerysetEqual(State.objects.order_by("two_letter_code"), [ "CA", "IL", "ME", "NY", ], attrgetter("two_letter_code")) @skipIfDBFeature('allows_auto_pk_0') def test_zero_as_autoval(self): """ Zero as id for AutoField should raise exception in MySQL, because MySQL does not allow zero for automatic primary key if the NO_AUTO_VALUE_ON_ZERO SQL mode is not enabled. """ valid_country = Country(name='Germany', iso_two_letter='DE') invalid_country = Country(id=0, name='Poland', iso_two_letter='PL') msg = 'The database backend does not accept 0 as a value for AutoField.' with self.assertRaisesMessage(ValueError, msg): Country.objects.bulk_create([valid_country, invalid_country]) def test_batch_same_vals(self): # SQLite had a problem where all the same-valued models were # collapsed to one insert. Restaurant.objects.bulk_create([ Restaurant(name='foo') for i in range(0, 2) ]) self.assertEqual(Restaurant.objects.count(), 2) def test_large_batch(self): TwoFields.objects.bulk_create([ TwoFields(f1=i, f2=i + 1) for i in range(0, 1001) ]) self.assertEqual(TwoFields.objects.count(), 1001) self.assertEqual( TwoFields.objects.filter(f1__gte=450, f1__lte=550).count(), 101) self.assertEqual(TwoFields.objects.filter(f2__gte=901).count(), 101) @skipUnlessDBFeature('has_bulk_insert') def test_large_single_field_batch(self): # SQLite had a problem with more than 500 UNIONed selects in single # query. Restaurant.objects.bulk_create([ Restaurant() for i in range(0, 501) ]) @skipUnlessDBFeature('has_bulk_insert') def test_large_batch_efficiency(self): with override_settings(DEBUG=True): connection.queries_log.clear() TwoFields.objects.bulk_create([ TwoFields(f1=i, f2=i + 1) for i in range(0, 1001) ]) self.assertLess(len(connection.queries), 10) def test_large_batch_mixed(self): """ Test inserting a large batch with objects having primary key set mixed together with objects without PK set. """ TwoFields.objects.bulk_create([ TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1) for i in range(100000, 101000) ]) self.assertEqual(TwoFields.objects.count(), 1000) # We can't assume much about the ID's created, except that the above # created IDs must exist. id_range = range(100000, 101000, 2) self.assertEqual(TwoFields.objects.filter(id__in=id_range).count(), 500) self.assertEqual(TwoFields.objects.exclude(id__in=id_range).count(), 500) @skipUnlessDBFeature('has_bulk_insert') def test_large_batch_mixed_efficiency(self): """ Test inserting a large batch with objects having primary key set mixed together with objects without PK set. """ with override_settings(DEBUG=True): connection.queries_log.clear() TwoFields.objects.bulk_create([ TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1) for i in range(100000, 101000)]) self.assertLess(len(connection.queries), 10) def test_explicit_batch_size(self): objs = [TwoFields(f1=i, f2=i) for i in range(0, 4)] num_objs = len(objs) TwoFields.objects.bulk_create(objs, batch_size=1) self.assertEqual(TwoFields.objects.count(), num_objs) TwoFields.objects.all().delete() TwoFields.objects.bulk_create(objs, batch_size=2) self.assertEqual(TwoFields.objects.count(), num_objs) TwoFields.objects.all().delete() TwoFields.objects.bulk_create(objs, batch_size=3) self.assertEqual(TwoFields.objects.count(), num_objs) TwoFields.objects.all().delete() TwoFields.objects.bulk_create(objs, batch_size=num_objs) self.assertEqual(TwoFields.objects.count(), num_objs) def test_empty_model(self): NoFields.objects.bulk_create([NoFields() for i in range(2)]) self.assertEqual(NoFields.objects.count(), 2) @skipUnlessDBFeature('has_bulk_insert') def test_explicit_batch_size_efficiency(self): objs = [TwoFields(f1=i, f2=i) for i in range(0, 100)] with self.assertNumQueries(2): TwoFields.objects.bulk_create(objs, 50) TwoFields.objects.all().delete() with self.assertNumQueries(1): TwoFields.objects.bulk_create(objs, len(objs)) @skipUnlessDBFeature('has_bulk_insert') def test_explicit_batch_size_respects_max_batch_size(self): objs = [Country() for i in range(1000)] fields = ['name', 'iso_two_letter', 'description'] max_batch_size = max(connection.ops.bulk_batch_size(fields, objs), 1) with self.assertNumQueries(ceil(len(objs) / max_batch_size)): Country.objects.bulk_create(objs, batch_size=max_batch_size + 1) @skipUnlessDBFeature('has_bulk_insert') def test_bulk_insert_expressions(self): Restaurant.objects.bulk_create([ Restaurant(name="Sam's Shake Shack"), Restaurant(name=Lower(Value("Betty's Beetroot Bar"))) ]) bbb = Restaurant.objects.filter(name="betty's beetroot bar") self.assertEqual(bbb.count(), 1) @skipUnlessDBFeature('has_bulk_insert') def test_bulk_insert_nullable_fields(self): fk_to_auto_fields = { 'auto_field': NoFields.objects.create(), 'small_auto_field': SmallAutoFieldModel.objects.create(), 'big_auto_field': BigAutoFieldModel.objects.create(), } # NULL can be mixed with other values in nullable fields nullable_fields = [field for field in NullableFields._meta.get_fields() if field.name != 'id'] NullableFields.objects.bulk_create([ NullableFields(**{**fk_to_auto_fields, field.name: None}) for field in nullable_fields ]) self.assertEqual(NullableFields.objects.count(), len(nullable_fields)) for field in nullable_fields: with self.subTest(field=field): field_value = '' if isinstance(field, FileField) else None self.assertEqual(NullableFields.objects.filter(**{field.name: field_value}).count(), 1) @skipUnlessDBFeature('can_return_rows_from_bulk_insert') def test_set_pk_and_insert_single_item(self): with self.assertNumQueries(1): countries = Country.objects.bulk_create([self.data[0]]) self.assertEqual(len(countries), 1) self.assertEqual(Country.objects.get(pk=countries[0].pk), countries[0]) @skipUnlessDBFeature('can_return_rows_from_bulk_insert') def test_set_pk_and_query_efficiency(self): with self.assertNumQueries(1): countries = Country.objects.bulk_create(self.data) self.assertEqual(len(countries), 4) self.assertEqual(Country.objects.get(pk=countries[0].pk), countries[0]) self.assertEqual(Country.objects.get(pk=countries[1].pk), countries[1]) self.assertEqual(Country.objects.get(pk=countries[2].pk), countries[2]) self.assertEqual(Country.objects.get(pk=countries[3].pk), countries[3]) @skipUnlessDBFeature('can_return_rows_from_bulk_insert') def test_set_state(self): country_nl = Country(name='Netherlands', iso_two_letter='NL') country_be = Country(name='Belgium', iso_two_letter='BE') Country.objects.bulk_create([country_nl]) country_be.save() # Objects save via bulk_create() and save() should have equal state. self.assertEqual(country_nl._state.adding, country_be._state.adding) self.assertEqual(country_nl._state.db, country_be._state.db) def test_set_state_with_pk_specified(self): state_ca = State(two_letter_code='CA') state_ny = State(two_letter_code='NY') State.objects.bulk_create([state_ca]) state_ny.save() # Objects save via bulk_create() and save() should have equal state. self.assertEqual(state_ca._state.adding, state_ny._state.adding) self.assertEqual(state_ca._state.db, state_ny._state.db) @skipIfDBFeature('supports_ignore_conflicts') def test_ignore_conflicts_value_error(self): message = 'This database backend does not support ignoring conflicts.' with self.assertRaisesMessage(NotSupportedError, message): TwoFields.objects.bulk_create(self.data, ignore_conflicts=True) @skipUnlessDBFeature('supports_ignore_conflicts') def test_ignore_conflicts_ignore(self): data = [ TwoFields(f1=1, f2=1), TwoFields(f1=2, f2=2), TwoFields(f1=3, f2=3), ] TwoFields.objects.bulk_create(data) self.assertEqual(TwoFields.objects.count(), 3) # With ignore_conflicts=True, conflicts are ignored. conflicting_objects = [ TwoFields(f1=2, f2=2), TwoFields(f1=3, f2=3), ] TwoFields.objects.bulk_create([conflicting_objects[0]], ignore_conflicts=True) TwoFields.objects.bulk_create(conflicting_objects, ignore_conflicts=True) self.assertEqual(TwoFields.objects.count(), 3) self.assertIsNone(conflicting_objects[0].pk) self.assertIsNone(conflicting_objects[1].pk) # New objects are created and conflicts are ignored. new_object = TwoFields(f1=4, f2=4) TwoFields.objects.bulk_create(conflicting_objects + [new_object], ignore_conflicts=True) self.assertEqual(TwoFields.objects.count(), 4) self.assertIsNone(new_object.pk) # Without ignore_conflicts=True, there's a problem. with self.assertRaises(IntegrityError): TwoFields.objects.bulk_create(conflicting_objects) def test_nullable_fk_after_parent(self): parent = NoFields() child = NullableFields(auto_field=parent, integer_field=88) parent.save() NullableFields.objects.bulk_create([child]) child = NullableFields.objects.get(integer_field=88) self.assertEqual(child.auto_field, parent) @skipUnlessDBFeature('can_return_rows_from_bulk_insert') def test_nullable_fk_after_parent_bulk_create(self): parent = NoFields() child = NullableFields(auto_field=parent, integer_field=88) NoFields.objects.bulk_create([parent]) NullableFields.objects.bulk_create([child]) child = NullableFields.objects.get(integer_field=88) self.assertEqual(child.auto_field, parent) def test_unsaved_parent(self): parent = NoFields() msg = ( "bulk_create() prohibited to prevent data loss due to unsaved " "related object 'auto_field'." ) with self.assertRaisesMessage(ValueError, msg): NullableFields.objects.bulk_create([NullableFields(auto_field=parent)])
d19940fa1bb82826a894a1d5b0230d523e145e2241bfa019f6217d413198a594
import datetime from decimal import Decimal from unittest import mock, skipIf from django.core.exceptions import FieldError from django.db import NotSupportedError, connection from django.db.models import ( Avg, BooleanField, Case, F, Func, IntegerField, Max, Min, OuterRef, Q, RowRange, Subquery, Sum, Value, ValueRange, When, Window, WindowFrame, ) from django.db.models.fields.json import KeyTextTransform, KeyTransform from django.db.models.functions import ( Cast, CumeDist, DenseRank, ExtractYear, FirstValue, Lag, LastValue, Lead, NthValue, Ntile, PercentRank, Rank, RowNumber, Upper, ) from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from .models import Detail, Employee @skipUnlessDBFeature('supports_over_clause') class WindowFunctionTests(TestCase): @classmethod def setUpTestData(cls): Employee.objects.bulk_create([ Employee( name=e[0], salary=e[1], department=e[2], hire_date=e[3], age=e[4], bonus=Decimal(e[1]) / 400, ) for e in [ ('Jones', 45000, 'Accounting', datetime.datetime(2005, 11, 1), 20), ('Williams', 37000, 'Accounting', datetime.datetime(2009, 6, 1), 20), ('Jenson', 45000, 'Accounting', datetime.datetime(2008, 4, 1), 20), ('Adams', 50000, 'Accounting', datetime.datetime(2013, 7, 1), 50), ('Smith', 55000, 'Sales', datetime.datetime(2007, 6, 1), 30), ('Brown', 53000, 'Sales', datetime.datetime(2009, 9, 1), 30), ('Johnson', 40000, 'Marketing', datetime.datetime(2012, 3, 1), 30), ('Smith', 38000, 'Marketing', datetime.datetime(2009, 10, 1), 20), ('Wilkinson', 60000, 'IT', datetime.datetime(2011, 3, 1), 40), ('Moore', 34000, 'IT', datetime.datetime(2013, 8, 1), 40), ('Miller', 100000, 'Management', datetime.datetime(2005, 6, 1), 40), ('Johnson', 80000, 'Management', datetime.datetime(2005, 7, 1), 50), ] ]) def test_dense_rank(self): qs = Employee.objects.annotate(rank=Window( expression=DenseRank(), order_by=ExtractYear(F('hire_date')).asc(), )) self.assertQuerysetEqual(qs, [ ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 1), ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 1), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 1), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 2), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 3), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 4), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 4), ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 4), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 5), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 6), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 7), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 7), ], lambda entry: (entry.name, entry.salary, entry.department, entry.hire_date, entry.rank), ordered=False) def test_department_salary(self): qs = Employee.objects.annotate(department_sum=Window( expression=Sum('salary'), partition_by=F('department'), order_by=[F('hire_date').asc()], )).order_by('department', 'department_sum') self.assertQuerysetEqual(qs, [ ('Jones', 'Accounting', 45000, 45000), ('Jenson', 'Accounting', 45000, 90000), ('Williams', 'Accounting', 37000, 127000), ('Adams', 'Accounting', 50000, 177000), ('Wilkinson', 'IT', 60000, 60000), ('Moore', 'IT', 34000, 94000), ('Miller', 'Management', 100000, 100000), ('Johnson', 'Management', 80000, 180000), ('Smith', 'Marketing', 38000, 38000), ('Johnson', 'Marketing', 40000, 78000), ('Smith', 'Sales', 55000, 55000), ('Brown', 'Sales', 53000, 108000), ], lambda entry: (entry.name, entry.department, entry.salary, entry.department_sum)) def test_rank(self): """ Rank the employees based on the year they're were hired. Since there are multiple employees hired in different years, this will contain gaps. """ qs = Employee.objects.annotate(rank=Window( expression=Rank(), order_by=ExtractYear(F('hire_date')).asc(), )) self.assertQuerysetEqual(qs, [ ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 1), ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 1), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 1), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 4), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 5), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 6), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 6), ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 6), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 9), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 10), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 11), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 11), ], lambda entry: (entry.name, entry.salary, entry.department, entry.hire_date, entry.rank), ordered=False) def test_row_number(self): """ The row number window function computes the number based on the order in which the tuples were inserted. Depending on the backend, Oracle requires an ordering-clause in the Window expression. """ qs = Employee.objects.annotate(row_number=Window( expression=RowNumber(), order_by=F('pk').asc(), )).order_by('pk') self.assertQuerysetEqual(qs, [ ('Jones', 'Accounting', 1), ('Williams', 'Accounting', 2), ('Jenson', 'Accounting', 3), ('Adams', 'Accounting', 4), ('Smith', 'Sales', 5), ('Brown', 'Sales', 6), ('Johnson', 'Marketing', 7), ('Smith', 'Marketing', 8), ('Wilkinson', 'IT', 9), ('Moore', 'IT', 10), ('Miller', 'Management', 11), ('Johnson', 'Management', 12), ], lambda entry: (entry.name, entry.department, entry.row_number)) @skipIf(connection.vendor == 'oracle', "Oracle requires ORDER BY in row_number, ANSI:SQL doesn't") def test_row_number_no_ordering(self): """ The row number window function computes the number based on the order in which the tuples were inserted. """ # Add a default ordering for consistent results across databases. qs = Employee.objects.annotate(row_number=Window( expression=RowNumber(), )).order_by('pk') self.assertQuerysetEqual(qs, [ ('Jones', 'Accounting', 1), ('Williams', 'Accounting', 2), ('Jenson', 'Accounting', 3), ('Adams', 'Accounting', 4), ('Smith', 'Sales', 5), ('Brown', 'Sales', 6), ('Johnson', 'Marketing', 7), ('Smith', 'Marketing', 8), ('Wilkinson', 'IT', 9), ('Moore', 'IT', 10), ('Miller', 'Management', 11), ('Johnson', 'Management', 12), ], lambda entry: (entry.name, entry.department, entry.row_number)) def test_avg_salary_department(self): qs = Employee.objects.annotate(avg_salary=Window( expression=Avg('salary'), order_by=F('department').asc(), partition_by='department', )).order_by('department', '-salary', 'name') self.assertQuerysetEqual(qs, [ ('Adams', 50000, 'Accounting', 44250.00), ('Jenson', 45000, 'Accounting', 44250.00), ('Jones', 45000, 'Accounting', 44250.00), ('Williams', 37000, 'Accounting', 44250.00), ('Wilkinson', 60000, 'IT', 47000.00), ('Moore', 34000, 'IT', 47000.00), ('Miller', 100000, 'Management', 90000.00), ('Johnson', 80000, 'Management', 90000.00), ('Johnson', 40000, 'Marketing', 39000.00), ('Smith', 38000, 'Marketing', 39000.00), ('Smith', 55000, 'Sales', 54000.00), ('Brown', 53000, 'Sales', 54000.00), ], transform=lambda row: (row.name, row.salary, row.department, row.avg_salary)) def test_lag(self): """ Compute the difference between an employee's salary and the next highest salary in the employee's department. Return None if the employee has the lowest salary. """ qs = Employee.objects.annotate(lag=Window( expression=Lag(expression='salary', offset=1), partition_by=F('department'), order_by=[F('salary').asc(), F('name').asc()], )).order_by('department', F('salary').asc(), F('name').asc()) self.assertQuerysetEqual(qs, [ ('Williams', 37000, 'Accounting', None), ('Jenson', 45000, 'Accounting', 37000), ('Jones', 45000, 'Accounting', 45000), ('Adams', 50000, 'Accounting', 45000), ('Moore', 34000, 'IT', None), ('Wilkinson', 60000, 'IT', 34000), ('Johnson', 80000, 'Management', None), ('Miller', 100000, 'Management', 80000), ('Smith', 38000, 'Marketing', None), ('Johnson', 40000, 'Marketing', 38000), ('Brown', 53000, 'Sales', None), ('Smith', 55000, 'Sales', 53000), ], transform=lambda row: (row.name, row.salary, row.department, row.lag)) def test_lag_decimalfield(self): qs = Employee.objects.annotate(lag=Window( expression=Lag(expression='bonus', offset=1), partition_by=F('department'), order_by=[F('bonus').asc(), F('name').asc()], )).order_by('department', F('bonus').asc(), F('name').asc()) self.assertQuerysetEqual(qs, [ ('Williams', 92.5, 'Accounting', None), ('Jenson', 112.5, 'Accounting', 92.5), ('Jones', 112.5, 'Accounting', 112.5), ('Adams', 125, 'Accounting', 112.5), ('Moore', 85, 'IT', None), ('Wilkinson', 150, 'IT', 85), ('Johnson', 200, 'Management', None), ('Miller', 250, 'Management', 200), ('Smith', 95, 'Marketing', None), ('Johnson', 100, 'Marketing', 95), ('Brown', 132.5, 'Sales', None), ('Smith', 137.5, 'Sales', 132.5), ], transform=lambda row: (row.name, row.bonus, row.department, row.lag)) def test_first_value(self): qs = Employee.objects.annotate(first_value=Window( expression=FirstValue('salary'), partition_by=F('department'), order_by=F('hire_date').asc(), )).order_by('department', 'hire_date') self.assertQuerysetEqual(qs, [ ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 45000), ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 45000), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 45000), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 60000), ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 100000), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 38000), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 55000), ], lambda row: (row.name, row.salary, row.department, row.hire_date, row.first_value)) def test_last_value(self): qs = Employee.objects.annotate(last_value=Window( expression=LastValue('hire_date'), partition_by=F('department'), order_by=F('hire_date').asc(), )) self.assertQuerysetEqual(qs, [ ('Adams', 'Accounting', datetime.date(2013, 7, 1), 50000, datetime.date(2013, 7, 1)), ('Jenson', 'Accounting', datetime.date(2008, 4, 1), 45000, datetime.date(2008, 4, 1)), ('Jones', 'Accounting', datetime.date(2005, 11, 1), 45000, datetime.date(2005, 11, 1)), ('Williams', 'Accounting', datetime.date(2009, 6, 1), 37000, datetime.date(2009, 6, 1)), ('Moore', 'IT', datetime.date(2013, 8, 1), 34000, datetime.date(2013, 8, 1)), ('Wilkinson', 'IT', datetime.date(2011, 3, 1), 60000, datetime.date(2011, 3, 1)), ('Miller', 'Management', datetime.date(2005, 6, 1), 100000, datetime.date(2005, 6, 1)), ('Johnson', 'Management', datetime.date(2005, 7, 1), 80000, datetime.date(2005, 7, 1)), ('Johnson', 'Marketing', datetime.date(2012, 3, 1), 40000, datetime.date(2012, 3, 1)), ('Smith', 'Marketing', datetime.date(2009, 10, 1), 38000, datetime.date(2009, 10, 1)), ('Brown', 'Sales', datetime.date(2009, 9, 1), 53000, datetime.date(2009, 9, 1)), ('Smith', 'Sales', datetime.date(2007, 6, 1), 55000, datetime.date(2007, 6, 1)), ], transform=lambda row: (row.name, row.department, row.hire_date, row.salary, row.last_value), ordered=False) def test_function_list_of_values(self): qs = Employee.objects.annotate(lead=Window( expression=Lead(expression='salary'), order_by=[F('hire_date').asc(), F('name').desc()], partition_by='department', )).values_list('name', 'salary', 'department', 'hire_date', 'lead') \ .order_by('department', F('hire_date').asc(), F('name').desc()) self.assertNotIn('GROUP BY', str(qs.query)) self.assertSequenceEqual(qs, [ ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 37000), ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 50000), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 34000), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None), ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 80000), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 40000), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 53000), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None), ]) def test_min_department(self): """An alternative way to specify a query for FirstValue.""" qs = Employee.objects.annotate(min_salary=Window( expression=Min('salary'), partition_by=F('department'), order_by=[F('salary').asc(), F('name').asc()] )).order_by('department', 'salary', 'name') self.assertQuerysetEqual(qs, [ ('Williams', 'Accounting', 37000, 37000), ('Jenson', 'Accounting', 45000, 37000), ('Jones', 'Accounting', 45000, 37000), ('Adams', 'Accounting', 50000, 37000), ('Moore', 'IT', 34000, 34000), ('Wilkinson', 'IT', 60000, 34000), ('Johnson', 'Management', 80000, 80000), ('Miller', 'Management', 100000, 80000), ('Smith', 'Marketing', 38000, 38000), ('Johnson', 'Marketing', 40000, 38000), ('Brown', 'Sales', 53000, 53000), ('Smith', 'Sales', 55000, 53000), ], lambda row: (row.name, row.department, row.salary, row.min_salary)) def test_max_per_year(self): """ Find the maximum salary awarded in the same year as the employee was hired, regardless of the department. """ qs = Employee.objects.annotate(max_salary_year=Window( expression=Max('salary'), order_by=ExtractYear('hire_date').asc(), partition_by=ExtractYear('hire_date') )).order_by(ExtractYear('hire_date'), 'salary') self.assertQuerysetEqual(qs, [ ('Jones', 'Accounting', 45000, 2005, 100000), ('Johnson', 'Management', 80000, 2005, 100000), ('Miller', 'Management', 100000, 2005, 100000), ('Smith', 'Sales', 55000, 2007, 55000), ('Jenson', 'Accounting', 45000, 2008, 45000), ('Williams', 'Accounting', 37000, 2009, 53000), ('Smith', 'Marketing', 38000, 2009, 53000), ('Brown', 'Sales', 53000, 2009, 53000), ('Wilkinson', 'IT', 60000, 2011, 60000), ('Johnson', 'Marketing', 40000, 2012, 40000), ('Moore', 'IT', 34000, 2013, 50000), ('Adams', 'Accounting', 50000, 2013, 50000), ], lambda row: (row.name, row.department, row.salary, row.hire_date.year, row.max_salary_year)) def test_cume_dist(self): """ Compute the cumulative distribution for the employees based on the salary in increasing order. Equal to rank/total number of rows (12). """ qs = Employee.objects.annotate(cume_dist=Window( expression=CumeDist(), order_by=F('salary').asc(), )).order_by('salary', 'name') # Round result of cume_dist because Oracle uses greater precision. self.assertQuerysetEqual(qs, [ ('Moore', 'IT', 34000, 0.0833333333), ('Williams', 'Accounting', 37000, 0.1666666667), ('Smith', 'Marketing', 38000, 0.25), ('Johnson', 'Marketing', 40000, 0.3333333333), ('Jenson', 'Accounting', 45000, 0.5), ('Jones', 'Accounting', 45000, 0.5), ('Adams', 'Accounting', 50000, 0.5833333333), ('Brown', 'Sales', 53000, 0.6666666667), ('Smith', 'Sales', 55000, 0.75), ('Wilkinson', 'IT', 60000, 0.8333333333), ('Johnson', 'Management', 80000, 0.9166666667), ('Miller', 'Management', 100000, 1), ], lambda row: (row.name, row.department, row.salary, round(row.cume_dist, 10))) def test_nthvalue(self): qs = Employee.objects.annotate( nth_value=Window(expression=NthValue( expression='salary', nth=2), order_by=[F('hire_date').asc(), F('name').desc()], partition_by=F('department'), ) ).order_by('department', 'hire_date', 'name') self.assertQuerysetEqual(qs, [ ('Jones', 'Accounting', datetime.date(2005, 11, 1), 45000, None), ('Jenson', 'Accounting', datetime.date(2008, 4, 1), 45000, 45000), ('Williams', 'Accounting', datetime.date(2009, 6, 1), 37000, 45000), ('Adams', 'Accounting', datetime.date(2013, 7, 1), 50000, 45000), ('Wilkinson', 'IT', datetime.date(2011, 3, 1), 60000, None), ('Moore', 'IT', datetime.date(2013, 8, 1), 34000, 34000), ('Miller', 'Management', datetime.date(2005, 6, 1), 100000, None), ('Johnson', 'Management', datetime.date(2005, 7, 1), 80000, 80000), ('Smith', 'Marketing', datetime.date(2009, 10, 1), 38000, None), ('Johnson', 'Marketing', datetime.date(2012, 3, 1), 40000, 40000), ('Smith', 'Sales', datetime.date(2007, 6, 1), 55000, None), ('Brown', 'Sales', datetime.date(2009, 9, 1), 53000, 53000), ], lambda row: (row.name, row.department, row.hire_date, row.salary, row.nth_value)) def test_lead(self): """ Determine what the next person hired in the same department makes. Because the dataset is ambiguous, the name is also part of the ordering clause. No default is provided, so None/NULL should be returned. """ qs = Employee.objects.annotate(lead=Window( expression=Lead(expression='salary'), order_by=[F('hire_date').asc(), F('name').desc()], partition_by='department', )).order_by('department', F('hire_date').asc(), F('name').desc()) self.assertQuerysetEqual(qs, [ ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 37000), ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 50000), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 34000), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None), ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 80000), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 40000), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 53000), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None), ], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.lead)) def test_lead_offset(self): """ Determine what the person hired after someone makes. Due to ambiguity, the name is also included in the ordering. """ qs = Employee.objects.annotate(lead=Window( expression=Lead('salary', offset=2), partition_by='department', order_by=F('hire_date').asc(), )) self.assertQuerysetEqual(qs, [ ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 37000), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 50000), ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), None), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), None), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None), ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), None), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), None), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), None), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None), ], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.lead), ordered=False ) @skipUnlessDBFeature('supports_default_in_lead_lag') def test_lead_default(self): qs = Employee.objects.annotate(lead_default=Window( expression=Lead(expression='salary', offset=5, default=60000), partition_by=F('department'), order_by=F('department').asc(), )) self.assertEqual(list(qs.values_list('lead_default', flat=True).distinct()), [60000]) def test_ntile(self): """ Compute the group for each of the employees across the entire company, based on how high the salary is for them. There are twelve employees so it divides evenly into four groups. """ qs = Employee.objects.annotate(ntile=Window( expression=Ntile(num_buckets=4), order_by=F('salary').desc(), )).order_by('ntile', '-salary', 'name') self.assertQuerysetEqual(qs, [ ('Miller', 'Management', 100000, 1), ('Johnson', 'Management', 80000, 1), ('Wilkinson', 'IT', 60000, 1), ('Smith', 'Sales', 55000, 2), ('Brown', 'Sales', 53000, 2), ('Adams', 'Accounting', 50000, 2), ('Jenson', 'Accounting', 45000, 3), ('Jones', 'Accounting', 45000, 3), ('Johnson', 'Marketing', 40000, 3), ('Smith', 'Marketing', 38000, 4), ('Williams', 'Accounting', 37000, 4), ('Moore', 'IT', 34000, 4), ], lambda x: (x.name, x.department, x.salary, x.ntile)) def test_percent_rank(self): """ Calculate the percentage rank of the employees across the entire company based on salary and name (in case of ambiguity). """ qs = Employee.objects.annotate(percent_rank=Window( expression=PercentRank(), order_by=[F('salary').asc(), F('name').asc()], )).order_by('percent_rank') # Round to account for precision differences among databases. self.assertQuerysetEqual(qs, [ ('Moore', 'IT', 34000, 0.0), ('Williams', 'Accounting', 37000, 0.0909090909), ('Smith', 'Marketing', 38000, 0.1818181818), ('Johnson', 'Marketing', 40000, 0.2727272727), ('Jenson', 'Accounting', 45000, 0.3636363636), ('Jones', 'Accounting', 45000, 0.4545454545), ('Adams', 'Accounting', 50000, 0.5454545455), ('Brown', 'Sales', 53000, 0.6363636364), ('Smith', 'Sales', 55000, 0.7272727273), ('Wilkinson', 'IT', 60000, 0.8181818182), ('Johnson', 'Management', 80000, 0.9090909091), ('Miller', 'Management', 100000, 1.0), ], transform=lambda row: (row.name, row.department, row.salary, round(row.percent_rank, 10))) def test_nth_returns_null(self): """ Find the nth row of the data set. None is returned since there are fewer than 20 rows in the test data. """ qs = Employee.objects.annotate(nth_value=Window( expression=NthValue('salary', nth=20), order_by=F('salary').asc() )) self.assertEqual(list(qs.values_list('nth_value', flat=True).distinct()), [None]) def test_multiple_partitioning(self): """ Find the maximum salary for each department for people hired in the same year. """ qs = Employee.objects.annotate(max=Window( expression=Max('salary'), partition_by=[F('department'), ExtractYear(F('hire_date'))], )).order_by('department', 'hire_date', 'name') self.assertQuerysetEqual(qs, [ ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 45000), ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000), ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 100000), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000), ], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.max)) def test_multiple_ordering(self): """ Accumulate the salaries over the departments based on hire_date. If two people were hired on the same date in the same department, the ordering clause will render a different result for those people. """ qs = Employee.objects.annotate(sum=Window( expression=Sum('salary'), partition_by='department', order_by=[F('hire_date').asc(), F('name').asc()], )).order_by('department', 'sum') self.assertQuerysetEqual(qs, [ ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 90000), ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 127000), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 177000), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 94000), ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 180000), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 78000), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 108000), ], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum)) def test_related_ordering_with_count(self): qs = Employee.objects.annotate(department_sum=Window( expression=Sum('salary'), partition_by=F('department'), order_by=['classification__code'], )) self.assertEqual(qs.count(), 12) @skipUnlessDBFeature('supports_frame_range_fixed_distance') def test_range_n_preceding_and_following(self): qs = Employee.objects.annotate(sum=Window( expression=Sum('salary'), order_by=F('salary').asc(), partition_by='department', frame=ValueRange(start=-2, end=2), )) self.assertIn('RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING', str(qs.query)) self.assertQuerysetEqual(qs, [ ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000), ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 90000), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 90000), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000), ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 80000), ], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum), ordered=False) def test_range_unbound(self): """A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING.""" qs = Employee.objects.annotate(sum=Window( expression=Sum('salary'), partition_by='age', order_by=[F('age').asc()], frame=ValueRange(start=None, end=None), )).order_by('department', 'hire_date', 'name') self.assertIn('RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING', str(qs.query)) self.assertQuerysetEqual(qs, [ ('Jones', 'Accounting', 45000, datetime.date(2005, 11, 1), 165000), ('Jenson', 'Accounting', 45000, datetime.date(2008, 4, 1), 165000), ('Williams', 'Accounting', 37000, datetime.date(2009, 6, 1), 165000), ('Adams', 'Accounting', 50000, datetime.date(2013, 7, 1), 130000), ('Wilkinson', 'IT', 60000, datetime.date(2011, 3, 1), 194000), ('Moore', 'IT', 34000, datetime.date(2013, 8, 1), 194000), ('Miller', 'Management', 100000, datetime.date(2005, 6, 1), 194000), ('Johnson', 'Management', 80000, datetime.date(2005, 7, 1), 130000), ('Smith', 'Marketing', 38000, datetime.date(2009, 10, 1), 165000), ('Johnson', 'Marketing', 40000, datetime.date(2012, 3, 1), 148000), ('Smith', 'Sales', 55000, datetime.date(2007, 6, 1), 148000), ('Brown', 'Sales', 53000, datetime.date(2009, 9, 1), 148000) ], transform=lambda row: (row.name, row.department, row.salary, row.hire_date, row.sum)) @skipIf( connection.vendor == 'sqlite' and connection.Database.sqlite_version_info < (3, 27), 'Nondeterministic failure on SQLite < 3.27.' ) def test_subquery_row_range_rank(self): qs = Employee.objects.annotate( highest_avg_salary_date=Subquery( Employee.objects.filter( department=OuterRef('department'), ).annotate( avg_salary=Window( expression=Avg('salary'), order_by=[F('hire_date').asc()], frame=RowRange(start=-1, end=1), ), ).order_by('-avg_salary', 'hire_date').values('hire_date')[:1], ), ).order_by('department', 'name') self.assertQuerysetEqual(qs, [ ('Adams', 'Accounting', datetime.date(2005, 11, 1)), ('Jenson', 'Accounting', datetime.date(2005, 11, 1)), ('Jones', 'Accounting', datetime.date(2005, 11, 1)), ('Williams', 'Accounting', datetime.date(2005, 11, 1)), ('Moore', 'IT', datetime.date(2011, 3, 1)), ('Wilkinson', 'IT', datetime.date(2011, 3, 1)), ('Johnson', 'Management', datetime.date(2005, 6, 1)), ('Miller', 'Management', datetime.date(2005, 6, 1)), ('Johnson', 'Marketing', datetime.date(2009, 10, 1)), ('Smith', 'Marketing', datetime.date(2009, 10, 1)), ('Brown', 'Sales', datetime.date(2007, 6, 1)), ('Smith', 'Sales', datetime.date(2007, 6, 1)), ], transform=lambda row: (row.name, row.department, row.highest_avg_salary_date)) def test_row_range_rank(self): """ A query with ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING. The resulting sum is the sum of the three next (if they exist) and all previous rows according to the ordering clause. """ qs = Employee.objects.annotate(sum=Window( expression=Sum('salary'), order_by=[F('hire_date').asc(), F('name').desc()], frame=RowRange(start=None, end=3), )).order_by('sum', 'hire_date') self.assertIn('ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING', str(qs.query)) self.assertQuerysetEqual(qs, [ ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 280000), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 325000), ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 362000), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 415000), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 453000), ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 513000), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 553000), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 603000), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 637000), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 637000), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 637000), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 637000), ], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum)) @skipUnlessDBFeature('can_distinct_on_fields') def test_distinct_window_function(self): """ Window functions are not aggregates, and hence a query to filter out duplicates may be useful. """ qs = Employee.objects.annotate( sum=Window( expression=Sum('salary'), partition_by=ExtractYear('hire_date'), order_by=ExtractYear('hire_date') ), year=ExtractYear('hire_date'), ).values('year', 'sum').distinct('year').order_by('year') results = [ {'year': 2005, 'sum': 225000}, {'year': 2007, 'sum': 55000}, {'year': 2008, 'sum': 45000}, {'year': 2009, 'sum': 128000}, {'year': 2011, 'sum': 60000}, {'year': 2012, 'sum': 40000}, {'year': 2013, 'sum': 84000}, ] for idx, val in zip(range(len(results)), results): with self.subTest(result=val): self.assertEqual(qs[idx], val) def test_fail_update(self): """Window expressions can't be used in an UPDATE statement.""" msg = ( 'Window expressions are not allowed in this query (salary=<Window: ' 'Max(Col(expressions_window_employee, expressions_window.Employee.salary)) ' 'OVER (PARTITION BY Col(expressions_window_employee, ' 'expressions_window.Employee.department))>).' ) with self.assertRaisesMessage(FieldError, msg): Employee.objects.filter(department='Management').update( salary=Window(expression=Max('salary'), partition_by='department'), ) def test_fail_insert(self): """Window expressions can't be used in an INSERT statement.""" msg = ( 'Window expressions are not allowed in this query (salary=<Window: ' 'Sum(Value(10000), order_by=OrderBy(F(pk), descending=False)) OVER ()' ) with self.assertRaisesMessage(FieldError, msg): Employee.objects.create( name='Jameson', department='Management', hire_date=datetime.date(2007, 7, 1), salary=Window(expression=Sum(Value(10000), order_by=F('pk').asc())), ) def test_window_expression_within_subquery(self): subquery_qs = Employee.objects.annotate( highest=Window(FirstValue('id'), partition_by=F('department'), order_by=F('salary').desc()) ).values('highest') highest_salary = Employee.objects.filter(pk__in=subquery_qs) self.assertCountEqual(highest_salary.values('department', 'salary'), [ {'department': 'Accounting', 'salary': 50000}, {'department': 'Sales', 'salary': 55000}, {'department': 'Marketing', 'salary': 40000}, {'department': 'IT', 'salary': 60000}, {'department': 'Management', 'salary': 100000} ]) @skipUnlessDBFeature('supports_json_field') def test_key_transform(self): Detail.objects.bulk_create([ Detail(value={'department': 'IT', 'name': 'Smith', 'salary': 37000}), Detail(value={'department': 'IT', 'name': 'Nowak', 'salary': 32000}), Detail(value={'department': 'HR', 'name': 'Brown', 'salary': 50000}), Detail(value={'department': 'HR', 'name': 'Smith', 'salary': 55000}), Detail(value={'department': 'PR', 'name': 'Moore', 'salary': 90000}), ]) qs = Detail.objects.annotate(department_sum=Window( expression=Sum(Cast( KeyTextTransform('salary', 'value'), output_field=IntegerField(), )), partition_by=[KeyTransform('department', 'value')], order_by=[KeyTransform('name', 'value')], )).order_by('value__department', 'department_sum') self.assertQuerysetEqual(qs, [ ('Brown', 'HR', 50000, 50000), ('Smith', 'HR', 55000, 105000), ('Nowak', 'IT', 32000, 32000), ('Smith', 'IT', 37000, 69000), ('Moore', 'PR', 90000, 90000), ], lambda entry: ( entry.value['name'], entry.value['department'], entry.value['salary'], entry.department_sum, )) def test_invalid_start_value_range(self): msg = "start argument must be a negative integer, zero, or None, but got '3'." with self.assertRaisesMessage(ValueError, msg): list(Employee.objects.annotate(test=Window( expression=Sum('salary'), order_by=F('hire_date').asc(), frame=ValueRange(start=3), ))) def test_invalid_end_value_range(self): msg = "end argument must be a positive integer, zero, or None, but got '-3'." with self.assertRaisesMessage(ValueError, msg): list(Employee.objects.annotate(test=Window( expression=Sum('salary'), order_by=F('hire_date').asc(), frame=ValueRange(end=-3), ))) def test_invalid_type_end_value_range(self): msg = "end argument must be a positive integer, zero, or None, but got 'a'." with self.assertRaisesMessage(ValueError, msg): list(Employee.objects.annotate(test=Window( expression=Sum('salary'), order_by=F('hire_date').asc(), frame=ValueRange(end='a'), ))) def test_invalid_type_start_value_range(self): msg = "start argument must be a negative integer, zero, or None, but got 'a'." with self.assertRaisesMessage(ValueError, msg): list(Employee.objects.annotate(test=Window( expression=Sum('salary'), frame=ValueRange(start='a'), ))) def test_invalid_type_end_row_range(self): msg = "end argument must be a positive integer, zero, or None, but got 'a'." with self.assertRaisesMessage(ValueError, msg): list(Employee.objects.annotate(test=Window( expression=Sum('salary'), frame=RowRange(end='a'), ))) @skipUnlessDBFeature('only_supports_unbounded_with_preceding_and_following') def test_unsupported_range_frame_start(self): msg = '%s only supports UNBOUNDED together with PRECEDING and FOLLOWING.' % connection.display_name with self.assertRaisesMessage(NotSupportedError, msg): list(Employee.objects.annotate(test=Window( expression=Sum('salary'), order_by=F('hire_date').asc(), frame=ValueRange(start=-1), ))) @skipUnlessDBFeature('only_supports_unbounded_with_preceding_and_following') def test_unsupported_range_frame_end(self): msg = '%s only supports UNBOUNDED together with PRECEDING and FOLLOWING.' % connection.display_name with self.assertRaisesMessage(NotSupportedError, msg): list(Employee.objects.annotate(test=Window( expression=Sum('salary'), order_by=F('hire_date').asc(), frame=ValueRange(end=1), ))) def test_invalid_type_start_row_range(self): msg = "start argument must be a negative integer, zero, or None, but got 'a'." with self.assertRaisesMessage(ValueError, msg): list(Employee.objects.annotate(test=Window( expression=Sum('salary'), order_by=F('hire_date').asc(), frame=RowRange(start='a'), ))) class WindowUnsupportedTests(TestCase): def test_unsupported_backend(self): msg = 'This backend does not support window expressions.' with mock.patch.object(connection.features, 'supports_over_clause', False): with self.assertRaisesMessage(NotSupportedError, msg): Employee.objects.annotate(dense_rank=Window(expression=DenseRank())).get() class NonQueryWindowTests(SimpleTestCase): def test_window_repr(self): self.assertEqual( repr(Window(expression=Sum('salary'), partition_by='department')), '<Window: Sum(F(salary)) OVER (PARTITION BY F(department))>' ) self.assertEqual( repr(Window(expression=Avg('salary'), order_by=F('department').asc())), '<Window: Avg(F(salary)) OVER (ORDER BY OrderBy(F(department), descending=False))>' ) def test_window_frame_repr(self): self.assertEqual( repr(RowRange(start=-1)), '<RowRange: ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING>' ) self.assertEqual( repr(ValueRange(start=None, end=1)), '<ValueRange: RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING>' ) self.assertEqual( repr(ValueRange(start=0, end=0)), '<ValueRange: RANGE BETWEEN CURRENT ROW AND CURRENT ROW>' ) self.assertEqual( repr(RowRange(start=0, end=0)), '<RowRange: ROWS BETWEEN CURRENT ROW AND CURRENT ROW>' ) def test_empty_group_by_cols(self): window = Window(expression=Sum('pk')) self.assertEqual(window.get_group_by_cols(), []) self.assertFalse(window.contains_aggregate) def test_frame_empty_group_by_cols(self): frame = WindowFrame() self.assertEqual(frame.get_group_by_cols(), []) def test_frame_window_frame_notimplemented(self): frame = WindowFrame() msg = 'Subclasses must implement window_frame_start_end().' with self.assertRaisesMessage(NotImplementedError, msg): frame.window_frame_start_end(None, None, None) def test_invalid_filter(self): msg = 'Window is disallowed in the filter clause' qs = Employee.objects.annotate(dense_rank=Window(expression=DenseRank())) with self.assertRaisesMessage(NotSupportedError, msg): qs.filter(dense_rank__gte=1) with self.assertRaisesMessage(NotSupportedError, msg): qs.annotate(inc_rank=F('dense_rank') + Value(1)).filter(inc_rank__gte=1) with self.assertRaisesMessage(NotSupportedError, msg): qs.filter(id=F('dense_rank')) with self.assertRaisesMessage(NotSupportedError, msg): qs.filter(id=Func('dense_rank', 2, function='div')) with self.assertRaisesMessage(NotSupportedError, msg): qs.annotate(total=Sum('dense_rank', filter=Q(name='Jones'))).filter(total=1) def test_conditional_annotation(self): qs = Employee.objects.annotate( dense_rank=Window(expression=DenseRank()), ).annotate( equal=Case( When(id=F('dense_rank'), then=Value(True)), default=Value(False), output_field=BooleanField(), ), ) # The SQL standard disallows referencing window functions in the WHERE # clause. msg = 'Window is disallowed in the filter clause' with self.assertRaisesMessage(NotSupportedError, msg): qs.filter(equal=True) def test_invalid_order_by(self): msg = 'order_by must be either an Expression or a sequence of expressions' with self.assertRaisesMessage(ValueError, msg): Window(expression=Sum('power'), order_by='-horse') def test_invalid_source_expression(self): msg = "Expression 'Upper' isn't compatible with OVER clauses." with self.assertRaisesMessage(ValueError, msg): Window(expression=Upper('name'))
b03d1bc5494f46754d1394bfd8790df6cf117f41937bc51c25e3b717e4d38e8c
from django.db import models class Classification(models.Model): code = models.CharField(max_length=10) class Employee(models.Model): name = models.CharField(max_length=40, blank=False, null=False) salary = models.PositiveIntegerField() department = models.CharField(max_length=40, blank=False, null=False) hire_date = models.DateField(blank=False, null=False) age = models.IntegerField(blank=False, null=False) classification = models.ForeignKey('Classification', on_delete=models.CASCADE, null=True) bonus = models.DecimalField(decimal_places=2, max_digits=15, null=True) class Detail(models.Model): value = models.JSONField() class Meta: required_db_features = {'supports_json_field'}
eb8820419aefbaa5ba0207d90eaa01e5124673a280d231366c041768556bcc4f
import asyncio import threading from asgiref.sync import async_to_sync from django.contrib.admindocs.middleware import XViewMiddleware from django.contrib.auth.middleware import ( AuthenticationMiddleware, RemoteUserMiddleware, ) from django.contrib.flatpages.middleware import FlatpageFallbackMiddleware from django.contrib.messages.middleware import MessageMiddleware from django.contrib.redirects.middleware import RedirectFallbackMiddleware from django.contrib.sessions.middleware import SessionMiddleware from django.contrib.sites.middleware import CurrentSiteMiddleware from django.db import connection from django.http.request import HttpRequest from django.http.response import HttpResponse from django.middleware.cache import ( CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware, ) from django.middleware.clickjacking import XFrameOptionsMiddleware from django.middleware.common import ( BrokenLinkEmailsMiddleware, CommonMiddleware, ) from django.middleware.csrf import CsrfViewMiddleware from django.middleware.gzip import GZipMiddleware from django.middleware.http import ConditionalGetMiddleware from django.middleware.locale import LocaleMiddleware from django.middleware.security import SecurityMiddleware from django.test import SimpleTestCase from django.utils.deprecation import MiddlewareMixin, RemovedInDjango40Warning class MiddlewareMixinTests(SimpleTestCase): """ Deprecation warning is raised when using get_response=None. """ msg = 'Passing None for the middleware get_response argument is deprecated.' middlewares = [ AuthenticationMiddleware, BrokenLinkEmailsMiddleware, CacheMiddleware, CommonMiddleware, ConditionalGetMiddleware, CsrfViewMiddleware, CurrentSiteMiddleware, FetchFromCacheMiddleware, FlatpageFallbackMiddleware, GZipMiddleware, LocaleMiddleware, MessageMiddleware, RedirectFallbackMiddleware, RemoteUserMiddleware, SecurityMiddleware, SessionMiddleware, UpdateCacheMiddleware, XFrameOptionsMiddleware, XViewMiddleware, ] def test_deprecation(self): for middleware in self.middlewares: with self.subTest(middleware=middleware): with self.assertRaisesMessage(RemovedInDjango40Warning, self.msg): middleware() def test_passing_explicit_none(self): for middleware in self.middlewares: with self.subTest(middleware=middleware): with self.assertRaisesMessage(RemovedInDjango40Warning, self.msg): middleware(None) def test_coroutine(self): async def async_get_response(request): return HttpResponse() def sync_get_response(request): return HttpResponse() for middleware in self.middlewares: with self.subTest(middleware=middleware.__qualname__): # Middleware appears as coroutine if get_function is # a coroutine. middleware_instance = middleware(async_get_response) self.assertIs(asyncio.iscoroutinefunction(middleware_instance), True) # Middleware doesn't appear as coroutine if get_function is not # a coroutine. middleware_instance = middleware(sync_get_response) self.assertIs(asyncio.iscoroutinefunction(middleware_instance), False) def test_sync_to_async_uses_base_thread_and_connection(self): """ The process_request() and process_response() hooks must be called with the sync_to_async thread_sensitive flag enabled, so that database operations use the correct thread and connection. """ def request_lifecycle(): """Fake request_started/request_finished.""" return (threading.get_ident(), id(connection)) async def get_response(self): return HttpResponse() class SimpleMiddleWare(MiddlewareMixin): def process_request(self, request): request.thread_and_connection = request_lifecycle() def process_response(self, request, response): response.thread_and_connection = request_lifecycle() return response threads_and_connections = [] threads_and_connections.append(request_lifecycle()) request = HttpRequest() response = async_to_sync(SimpleMiddleWare(get_response))(request) threads_and_connections.append(request.thread_and_connection) threads_and_connections.append(response.thread_and_connection) threads_and_connections.append(request_lifecycle()) self.assertEqual(len(threads_and_connections), 4) self.assertEqual(len(set(threads_and_connections)), 1)
50c39b60f32414ed20293a604fd6bc971c48708527cd8edff88b8064315d4570
import base64 import hashlib import os import shutil import sys import tempfile as sys_tempfile import unittest from io import BytesIO, StringIO from unittest import mock from urllib.parse import quote from django.core.files import temp as tempfile from django.core.files.uploadedfile import SimpleUploadedFile from django.http.multipartparser import ( FILE, MultiPartParser, MultiPartParserError, Parser, parse_header, ) from django.test import SimpleTestCase, TestCase, client, override_settings from . import uploadhandler from .models import FileModel UNICODE_FILENAME = 'test-0123456789_中文_Orléans.jpg' MEDIA_ROOT = sys_tempfile.mkdtemp() UPLOAD_TO = os.path.join(MEDIA_ROOT, 'test_upload') @override_settings(MEDIA_ROOT=MEDIA_ROOT, ROOT_URLCONF='file_uploads.urls', MIDDLEWARE=[]) class FileUploadTests(TestCase): @classmethod def setUpClass(cls): super().setUpClass() os.makedirs(MEDIA_ROOT, exist_ok=True) @classmethod def tearDownClass(cls): shutil.rmtree(MEDIA_ROOT) super().tearDownClass() def test_simple_upload(self): with open(__file__, 'rb') as fp: post_data = { 'name': 'Ringo', 'file_field': fp, } response = self.client.post('/upload/', post_data) self.assertEqual(response.status_code, 200) def test_large_upload(self): file = tempfile.NamedTemporaryFile with file(suffix=".file1") as file1, file(suffix=".file2") as file2: file1.write(b'a' * (2 ** 21)) file1.seek(0) file2.write(b'a' * (10 * 2 ** 20)) file2.seek(0) post_data = { 'name': 'Ringo', 'file_field1': file1, 'file_field2': file2, } for key in list(post_data): try: post_data[key + '_hash'] = hashlib.sha1(post_data[key].read()).hexdigest() post_data[key].seek(0) except AttributeError: post_data[key + '_hash'] = hashlib.sha1(post_data[key].encode()).hexdigest() response = self.client.post('/verify/', post_data) self.assertEqual(response.status_code, 200) def _test_base64_upload(self, content, encode=base64.b64encode): payload = client.FakePayload("\r\n".join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name="file"; filename="test.txt"', 'Content-Type: application/octet-stream', 'Content-Transfer-Encoding: base64', ''])) payload.write(b'\r\n' + encode(content.encode()) + b'\r\n') payload.write('--' + client.BOUNDARY + '--\r\n') r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': "/echo_content/", 'REQUEST_METHOD': 'POST', 'wsgi.input': payload, } response = self.client.request(**r) self.assertEqual(response.json()['file'], content) def test_base64_upload(self): self._test_base64_upload("This data will be transmitted base64-encoded.") def test_big_base64_upload(self): self._test_base64_upload("Big data" * 68000) # > 512Kb def test_big_base64_newlines_upload(self): self._test_base64_upload("Big data" * 68000, encode=base64.encodebytes) def test_unicode_file_name(self): with sys_tempfile.TemporaryDirectory() as temp_dir: # This file contains Chinese symbols and an accented char in the name. with open(os.path.join(temp_dir, UNICODE_FILENAME), 'w+b') as file1: file1.write(b'b' * (2 ** 10)) file1.seek(0) response = self.client.post('/unicode_name/', {'file_unicode': file1}) self.assertEqual(response.status_code, 200) def test_unicode_file_name_rfc2231(self): """ Test receiving file upload when filename is encoded with RFC2231 (#22971). """ payload = client.FakePayload() payload.write('\r\n'.join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name="file_unicode"; filename*=UTF-8\'\'%s' % quote(UNICODE_FILENAME), 'Content-Type: application/octet-stream', '', 'You got pwnd.\r\n', '\r\n--' + client.BOUNDARY + '--\r\n' ])) r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': "/unicode_name/", 'REQUEST_METHOD': 'POST', 'wsgi.input': payload, } response = self.client.request(**r) self.assertEqual(response.status_code, 200) def test_unicode_name_rfc2231(self): """ Test receiving file upload when filename is encoded with RFC2231 (#22971). """ payload = client.FakePayload() payload.write( '\r\n'.join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name*=UTF-8\'\'file_unicode; filename*=UTF-8\'\'%s' % quote( UNICODE_FILENAME ), 'Content-Type: application/octet-stream', '', 'You got pwnd.\r\n', '\r\n--' + client.BOUNDARY + '--\r\n' ]) ) r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': "/unicode_name/", 'REQUEST_METHOD': 'POST', 'wsgi.input': payload, } response = self.client.request(**r) self.assertEqual(response.status_code, 200) def test_unicode_file_name_rfc2231_with_double_quotes(self): payload = client.FakePayload() payload.write('\r\n'.join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name="file_unicode"; ' 'filename*="UTF-8\'\'%s"' % quote(UNICODE_FILENAME), 'Content-Type: application/octet-stream', '', 'You got pwnd.\r\n', '\r\n--' + client.BOUNDARY + '--\r\n', ])) r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': '/unicode_name/', 'REQUEST_METHOD': 'POST', 'wsgi.input': payload, } response = self.client.request(**r) self.assertEqual(response.status_code, 200) def test_unicode_name_rfc2231_with_double_quotes(self): payload = client.FakePayload() payload.write('\r\n'.join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name*="UTF-8\'\'file_unicode"; ' 'filename*="UTF-8\'\'%s"' % quote(UNICODE_FILENAME), 'Content-Type: application/octet-stream', '', 'You got pwnd.\r\n', '\r\n--' + client.BOUNDARY + '--\r\n' ])) r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': '/unicode_name/', 'REQUEST_METHOD': 'POST', 'wsgi.input': payload, } response = self.client.request(**r) self.assertEqual(response.status_code, 200) def test_blank_filenames(self): """ Receiving file upload when filename is blank (before and after sanitization) should be okay. """ filenames = [ '', # Normalized by MultiPartParser.IE_sanitize(). 'C:\\Windows\\', # Normalized by os.path.basename(). '/', 'ends-with-slash/', ] payload = client.FakePayload() for i, name in enumerate(filenames): payload.write('\r\n'.join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name), 'Content-Type: application/octet-stream', '', 'You got pwnd.\r\n' ])) payload.write('\r\n--' + client.BOUNDARY + '--\r\n') r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': '/echo/', 'REQUEST_METHOD': 'POST', 'wsgi.input': payload, } response = self.client.request(**r) self.assertEqual(response.status_code, 200) # Empty filenames should be ignored received = response.json() for i, name in enumerate(filenames): self.assertIsNone(received.get('file%s' % i)) def test_dangerous_file_names(self): """Uploaded file names should be sanitized before ever reaching the view.""" # This test simulates possible directory traversal attacks by a # malicious uploader We have to do some monkeybusiness here to construct # a malicious payload with an invalid file name (containing os.sep or # os.pardir). This similar to what an attacker would need to do when # trying such an attack. scary_file_names = [ "/tmp/hax0rd.txt", # Absolute path, *nix-style. "C:\\Windows\\hax0rd.txt", # Absolute path, win-style. "C:/Windows/hax0rd.txt", # Absolute path, broken-style. "\\tmp\\hax0rd.txt", # Absolute path, broken in a different way. "/tmp\\hax0rd.txt", # Absolute path, broken by mixing. "subdir/hax0rd.txt", # Descendant path, *nix-style. "subdir\\hax0rd.txt", # Descendant path, win-style. "sub/dir\\hax0rd.txt", # Descendant path, mixed. "../../hax0rd.txt", # Relative path, *nix-style. "..\\..\\hax0rd.txt", # Relative path, win-style. "../..\\hax0rd.txt" # Relative path, mixed. ] payload = client.FakePayload() for i, name in enumerate(scary_file_names): payload.write('\r\n'.join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name), 'Content-Type: application/octet-stream', '', 'You got pwnd.\r\n' ])) payload.write('\r\n--' + client.BOUNDARY + '--\r\n') r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': "/echo/", 'REQUEST_METHOD': 'POST', 'wsgi.input': payload, } response = self.client.request(**r) # The filenames should have been sanitized by the time it got to the view. received = response.json() for i, name in enumerate(scary_file_names): got = received["file%s" % i] self.assertEqual(got, "hax0rd.txt") def test_filename_overflow(self): """File names over 256 characters (dangerous on some platforms) get fixed up.""" long_str = 'f' * 300 cases = [ # field name, filename, expected ('long_filename', '%s.txt' % long_str, '%s.txt' % long_str[:251]), ('long_extension', 'foo.%s' % long_str, '.%s' % long_str[:254]), ('no_extension', long_str, long_str[:255]), ('no_filename', '.%s' % long_str, '.%s' % long_str[:254]), ('long_everything', '%s.%s' % (long_str, long_str), '.%s' % long_str[:254]), ] payload = client.FakePayload() for name, filename, _ in cases: payload.write("\r\n".join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name="{}"; filename="{}"', 'Content-Type: application/octet-stream', '', 'Oops.', '' ]).format(name, filename)) payload.write('\r\n--' + client.BOUNDARY + '--\r\n') r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': "/echo/", 'REQUEST_METHOD': 'POST', 'wsgi.input': payload, } response = self.client.request(**r) result = response.json() for name, _, expected in cases: got = result[name] self.assertEqual(expected, got, 'Mismatch for {}'.format(name)) self.assertLess(len(got), 256, "Got a long file name (%s characters)." % len(got)) def test_file_content(self): file = tempfile.NamedTemporaryFile with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file: no_content_type.write(b'no content') no_content_type.seek(0) simple_file.write(b'text content') simple_file.seek(0) simple_file.content_type = 'text/plain' string_io = StringIO('string content') bytes_io = BytesIO(b'binary content') response = self.client.post('/echo_content/', { 'no_content_type': no_content_type, 'simple_file': simple_file, 'string': string_io, 'binary': bytes_io, }) received = response.json() self.assertEqual(received['no_content_type'], 'no content') self.assertEqual(received['simple_file'], 'text content') self.assertEqual(received['string'], 'string content') self.assertEqual(received['binary'], 'binary content') def test_content_type_extra(self): """Uploaded files may have content type parameters available.""" file = tempfile.NamedTemporaryFile with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file: no_content_type.write(b'something') no_content_type.seek(0) simple_file.write(b'something') simple_file.seek(0) simple_file.content_type = 'text/plain; test-key=test_value' response = self.client.post('/echo_content_type_extra/', { 'no_content_type': no_content_type, 'simple_file': simple_file, }) received = response.json() self.assertEqual(received['no_content_type'], {}) self.assertEqual(received['simple_file'], {'test-key': 'test_value'}) def test_truncated_multipart_handled_gracefully(self): """ If passed an incomplete multipart message, MultiPartParser does not attempt to read beyond the end of the stream, and simply will handle the part that can be parsed gracefully. """ payload_str = "\r\n".join([ '--' + client.BOUNDARY, 'Content-Disposition: form-data; name="file"; filename="foo.txt"', 'Content-Type: application/octet-stream', '', 'file contents' '--' + client.BOUNDARY + '--', '', ]) payload = client.FakePayload(payload_str[:-10]) r = { 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': '/echo/', 'REQUEST_METHOD': 'POST', 'wsgi.input': payload, } self.assertEqual(self.client.request(**r).json(), {}) def test_empty_multipart_handled_gracefully(self): """ If passed an empty multipart message, MultiPartParser will return an empty QueryDict. """ r = { 'CONTENT_LENGTH': 0, 'CONTENT_TYPE': client.MULTIPART_CONTENT, 'PATH_INFO': '/echo/', 'REQUEST_METHOD': 'POST', 'wsgi.input': client.FakePayload(b''), } self.assertEqual(self.client.request(**r).json(), {}) def test_custom_upload_handler(self): file = tempfile.NamedTemporaryFile with file() as smallfile, file() as bigfile: # A small file (under the 5M quota) smallfile.write(b'a' * (2 ** 21)) smallfile.seek(0) # A big file (over the quota) bigfile.write(b'a' * (10 * 2 ** 20)) bigfile.seek(0) # Small file posting should work. self.assertIn('f', self.client.post('/quota/', {'f': smallfile}).json()) # Large files don't go through. self.assertNotIn('f', self.client.post("/quota/", {'f': bigfile}).json()) def test_broken_custom_upload_handler(self): with tempfile.NamedTemporaryFile() as file: file.write(b'a' * (2 ** 21)) file.seek(0) msg = 'You cannot alter upload handlers after the upload has been processed.' with self.assertRaisesMessage(AttributeError, msg): self.client.post('/quota/broken/', {'f': file}) def test_stop_upload_temporary_file_handler(self): with tempfile.NamedTemporaryFile() as temp_file: temp_file.write(b'a') temp_file.seek(0) response = self.client.post('/temp_file/stop_upload/', {'file': temp_file}) temp_path = response.json()['temp_path'] self.assertIs(os.path.exists(temp_path), False) def test_upload_interrupted_temporary_file_handler(self): # Simulate an interrupted upload by omitting the closing boundary. class MockedParser(Parser): def __iter__(self): for item in super().__iter__(): item_type, meta_data, field_stream = item yield item_type, meta_data, field_stream if item_type == FILE: return with tempfile.NamedTemporaryFile() as temp_file: temp_file.write(b'a') temp_file.seek(0) with mock.patch( 'django.http.multipartparser.Parser', MockedParser, ): response = self.client.post( '/temp_file/upload_interrupted/', {'file': temp_file}, ) temp_path = response.json()['temp_path'] self.assertIs(os.path.exists(temp_path), False) def test_fileupload_getlist(self): file = tempfile.NamedTemporaryFile with file() as file1, file() as file2, file() as file2a: file1.write(b'a' * (2 ** 23)) file1.seek(0) file2.write(b'a' * (2 * 2 ** 18)) file2.seek(0) file2a.write(b'a' * (5 * 2 ** 20)) file2a.seek(0) response = self.client.post('/getlist_count/', { 'file1': file1, 'field1': 'test', 'field2': 'test3', 'field3': 'test5', 'field4': 'test6', 'field5': 'test7', 'file2': (file2, file2a) }) got = response.json() self.assertEqual(got.get('file1'), 1) self.assertEqual(got.get('file2'), 2) def test_fileuploads_closed_at_request_end(self): file = tempfile.NamedTemporaryFile with file() as f1, file() as f2a, file() as f2b: response = self.client.post('/fd_closing/t/', { 'file': f1, 'file2': (f2a, f2b), }) request = response.wsgi_request # The files were parsed. self.assertTrue(hasattr(request, '_files')) file = request._files['file'] self.assertTrue(file.closed) files = request._files.getlist('file2') self.assertTrue(files[0].closed) self.assertTrue(files[1].closed) def test_no_parsing_triggered_by_fd_closing(self): file = tempfile.NamedTemporaryFile with file() as f1, file() as f2a, file() as f2b: response = self.client.post('/fd_closing/f/', { 'file': f1, 'file2': (f2a, f2b), }) request = response.wsgi_request # The fd closing logic doesn't trigger parsing of the stream self.assertFalse(hasattr(request, '_files')) def test_file_error_blocking(self): """ The server should not block when there are upload errors (bug #8622). This can happen if something -- i.e. an exception handler -- tries to access POST while handling an error in parsing POST. This shouldn't cause an infinite loop! """ class POSTAccessingHandler(client.ClientHandler): """A handler that'll access POST during an exception.""" def handle_uncaught_exception(self, request, resolver, exc_info): ret = super().handle_uncaught_exception(request, resolver, exc_info) request.POST # evaluate return ret # Maybe this is a little more complicated that it needs to be; but if # the django.test.client.FakePayload.read() implementation changes then # this test would fail. So we need to know exactly what kind of error # it raises when there is an attempt to read more than the available bytes: try: client.FakePayload(b'a').read(2) except Exception as err: reference_error = err # install the custom handler that tries to access request.POST self.client.handler = POSTAccessingHandler() with open(__file__, 'rb') as fp: post_data = { 'name': 'Ringo', 'file_field': fp, } try: self.client.post('/upload_errors/', post_data) except reference_error.__class__ as err: self.assertNotEqual( str(err), str(reference_error), "Caught a repeated exception that'll cause an infinite loop in file uploads." ) except Exception as err: # CustomUploadError is the error that should have been raised self.assertEqual(err.__class__, uploadhandler.CustomUploadError) def test_filename_case_preservation(self): """ The storage backend shouldn't mess with the case of the filenames uploaded. """ # Synthesize the contents of a file upload with a mixed case filename # so we don't have to carry such a file in the Django tests source code # tree. vars = {'boundary': 'oUrBoUnDaRyStRiNg'} post_data = [ '--%(boundary)s', 'Content-Disposition: form-data; name="file_field"; filename="MiXeD_cAsE.txt"', 'Content-Type: application/octet-stream', '', 'file contents\n' '', '--%(boundary)s--\r\n', ] response = self.client.post( '/filename_case/', '\r\n'.join(post_data) % vars, 'multipart/form-data; boundary=%(boundary)s' % vars ) self.assertEqual(response.status_code, 200) id = int(response.content) obj = FileModel.objects.get(pk=id) # The name of the file uploaded and the file stored in the server-side # shouldn't differ. self.assertEqual(os.path.basename(obj.testfile.path), 'MiXeD_cAsE.txt') @override_settings(MEDIA_ROOT=MEDIA_ROOT) class DirectoryCreationTests(SimpleTestCase): """ Tests for error handling during directory creation via _save_FIELD_file (ticket #6450) """ @classmethod def setUpClass(cls): super().setUpClass() os.makedirs(MEDIA_ROOT, exist_ok=True) @classmethod def tearDownClass(cls): shutil.rmtree(MEDIA_ROOT) super().tearDownClass() def setUp(self): self.obj = FileModel() @unittest.skipIf(sys.platform == 'win32', "Python on Windows doesn't have working os.chmod().") def test_readonly_root(self): """Permission errors are not swallowed""" os.chmod(MEDIA_ROOT, 0o500) self.addCleanup(os.chmod, MEDIA_ROOT, 0o700) with self.assertRaises(PermissionError): self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', b'x'), save=False) def test_not_a_directory(self): # Create a file with the upload directory name open(UPLOAD_TO, 'wb').close() self.addCleanup(os.remove, UPLOAD_TO) msg = '%s exists and is not a directory.' % UPLOAD_TO with self.assertRaisesMessage(FileExistsError, msg): with SimpleUploadedFile('foo.txt', b'x') as file: self.obj.testfile.save('foo.txt', file, save=False) class MultiParserTests(SimpleTestCase): def test_empty_upload_handlers(self): # We're not actually parsing here; just checking if the parser properly # instantiates with empty upload handlers. MultiPartParser({ 'CONTENT_TYPE': 'multipart/form-data; boundary=_foo', 'CONTENT_LENGTH': '1' }, StringIO('x'), [], 'utf-8') def test_invalid_content_type(self): with self.assertRaisesMessage(MultiPartParserError, 'Invalid Content-Type: text/plain'): MultiPartParser({ 'CONTENT_TYPE': 'text/plain', 'CONTENT_LENGTH': '1', }, StringIO('x'), [], 'utf-8') def test_negative_content_length(self): with self.assertRaisesMessage(MultiPartParserError, 'Invalid content length: -1'): MultiPartParser({ 'CONTENT_TYPE': 'multipart/form-data; boundary=_foo', 'CONTENT_LENGTH': -1, }, StringIO('x'), [], 'utf-8') def test_bad_type_content_length(self): multipart_parser = MultiPartParser({ 'CONTENT_TYPE': 'multipart/form-data; boundary=_foo', 'CONTENT_LENGTH': 'a', }, StringIO('x'), [], 'utf-8') self.assertEqual(multipart_parser._content_length, 0) def test_rfc2231_parsing(self): test_data = ( (b"Content-Type: application/x-stuff; title*=us-ascii'en-us'This%20is%20%2A%2A%2Afun%2A%2A%2A", "This is ***fun***"), (b"Content-Type: application/x-stuff; title*=UTF-8''foo-%c3%a4.html", "foo-ä.html"), (b"Content-Type: application/x-stuff; title*=iso-8859-1''foo-%E4.html", "foo-ä.html"), ) for raw_line, expected_title in test_data: parsed = parse_header(raw_line) self.assertEqual(parsed[1]['title'], expected_title) def test_rfc2231_wrong_title(self): """ Test wrongly formatted RFC 2231 headers (missing double single quotes). Parsing should not crash (#24209). """ test_data = ( (b"Content-Type: application/x-stuff; title*='This%20is%20%2A%2A%2Afun%2A%2A%2A", b"'This%20is%20%2A%2A%2Afun%2A%2A%2A"), (b"Content-Type: application/x-stuff; title*='foo.html", b"'foo.html"), (b"Content-Type: application/x-stuff; title*=bar.html", b"bar.html"), ) for raw_line, expected_title in test_data: parsed = parse_header(raw_line) self.assertEqual(parsed[1]['title'], expected_title)
98fd5a32743e44723c48600a79fc61737ad664e7f0ff067d489dfca2f48808c7
""" Upload handlers to test the upload API. """ from django.core.files.uploadhandler import ( FileUploadHandler, StopUpload, TemporaryFileUploadHandler, ) class QuotaUploadHandler(FileUploadHandler): """ This test upload handler terminates the connection if more than a quota (5MB) is uploaded. """ QUOTA = 5 * 2 ** 20 # 5 MB def __init__(self, request=None): super().__init__(request) self.total_upload = 0 def receive_data_chunk(self, raw_data, start): self.total_upload += len(raw_data) if self.total_upload >= self.QUOTA: raise StopUpload(connection_reset=True) return raw_data def file_complete(self, file_size): return None class StopUploadTemporaryFileHandler(TemporaryFileUploadHandler): """A handler that raises a StopUpload exception.""" def receive_data_chunk(self, raw_data, start): raise StopUpload() class CustomUploadError(Exception): pass class ErroringUploadHandler(FileUploadHandler): """A handler that raises an exception.""" def receive_data_chunk(self, raw_data, start): raise CustomUploadError("Oops!")
c6a4b8558f99a78ed712d80f16cdcde3c0e6532c45018edeaeb11f32286027bd
from django.urls import path, re_path from . import views urlpatterns = [ path('upload/', views.file_upload_view), path('verify/', views.file_upload_view_verify), path('unicode_name/', views.file_upload_unicode_name), path('echo/', views.file_upload_echo), path('echo_content_type_extra/', views.file_upload_content_type_extra), path('echo_content/', views.file_upload_echo_content), path('quota/', views.file_upload_quota), path('quota/broken/', views.file_upload_quota_broken), path('getlist_count/', views.file_upload_getlist_count), path('upload_errors/', views.file_upload_errors), path('temp_file/stop_upload/', views.file_stop_upload_temporary_file), path('temp_file/upload_interrupted/', views.file_upload_interrupted_temporary_file), path('filename_case/', views.file_upload_filename_case_view), re_path(r'^fd_closing/(?P<access>t|f)/$', views.file_upload_fd_closing), ]
de27718a04b9ea87b08d1e6fec9a300102b978fb9fda724ac38f3c8b8c07ed9c
import hashlib import os from django.core.files.uploadedfile import UploadedFile from django.core.files.uploadhandler import TemporaryFileUploadHandler from django.http import HttpResponse, HttpResponseServerError, JsonResponse from .models import FileModel from .tests import UNICODE_FILENAME, UPLOAD_TO from .uploadhandler import ( ErroringUploadHandler, QuotaUploadHandler, StopUploadTemporaryFileHandler, ) def file_upload_view(request): """ A file upload can be updated into the POST dictionary. """ form_data = request.POST.copy() form_data.update(request.FILES) if isinstance(form_data.get('file_field'), UploadedFile) and isinstance(form_data['name'], str): # If a file is posted, the dummy client should only post the file name, # not the full path. if os.path.dirname(form_data['file_field'].name) != '': return HttpResponseServerError() return HttpResponse() else: return HttpResponseServerError() def file_upload_view_verify(request): """ Use the sha digest hash to verify the uploaded contents. """ form_data = request.POST.copy() form_data.update(request.FILES) for key, value in form_data.items(): if key.endswith('_hash'): continue if key + '_hash' not in form_data: continue submitted_hash = form_data[key + '_hash'] if isinstance(value, UploadedFile): new_hash = hashlib.sha1(value.read()).hexdigest() else: new_hash = hashlib.sha1(value.encode()).hexdigest() if new_hash != submitted_hash: return HttpResponseServerError() # Adding large file to the database should succeed largefile = request.FILES['file_field2'] obj = FileModel() obj.testfile.save(largefile.name, largefile) return HttpResponse() def file_upload_unicode_name(request): # Check to see if Unicode name came through properly. if not request.FILES['file_unicode'].name.endswith(UNICODE_FILENAME): return HttpResponseServerError() # Check to make sure the exotic characters are preserved even # through file save. uni_named_file = request.FILES['file_unicode'] FileModel.objects.create(testfile=uni_named_file) full_name = '%s/%s' % (UPLOAD_TO, uni_named_file.name) return HttpResponse() if os.path.exists(full_name) else HttpResponseServerError() def file_upload_echo(request): """ Simple view to echo back info about uploaded files for tests. """ r = {k: f.name for k, f in request.FILES.items()} return JsonResponse(r) def file_upload_echo_content(request): """ Simple view to echo back the content of uploaded files for tests. """ def read_and_close(f): with f: return f.read().decode() r = {k: read_and_close(f) for k, f in request.FILES.items()} return JsonResponse(r) def file_upload_quota(request): """ Dynamically add in an upload handler. """ request.upload_handlers.insert(0, QuotaUploadHandler()) return file_upload_echo(request) def file_upload_quota_broken(request): """ You can't change handlers after reading FILES; this view shouldn't work. """ response = file_upload_echo(request) request.upload_handlers.insert(0, QuotaUploadHandler()) return response def file_stop_upload_temporary_file(request): request.upload_handlers.insert(0, StopUploadTemporaryFileHandler()) request.upload_handlers.pop(2) request.FILES # Trigger file parsing. return JsonResponse( {'temp_path': request.upload_handlers[0].file.temporary_file_path()}, ) def file_upload_interrupted_temporary_file(request): request.upload_handlers.insert(0, TemporaryFileUploadHandler()) request.upload_handlers.pop(2) request.FILES # Trigger file parsing. return JsonResponse( {'temp_path': request.upload_handlers[0].file.temporary_file_path()}, ) def file_upload_getlist_count(request): """ Check the .getlist() function to ensure we receive the correct number of files. """ file_counts = {} for key in request.FILES: file_counts[key] = len(request.FILES.getlist(key)) return JsonResponse(file_counts) def file_upload_errors(request): request.upload_handlers.insert(0, ErroringUploadHandler()) return file_upload_echo(request) def file_upload_filename_case_view(request): """ Check adding the file to the database will preserve the filename case. """ file = request.FILES['file_field'] obj = FileModel() obj.testfile.save(file.name, file) return HttpResponse('%d' % obj.pk) def file_upload_content_type_extra(request): """ Simple view to echo back extra content-type parameters. """ params = {} for file_name, uploadedfile in request.FILES.items(): params[file_name] = {k: v.decode() for k, v in uploadedfile.content_type_extra.items()} return JsonResponse(params) def file_upload_fd_closing(request, access): if access == 't': request.FILES # Trigger file parsing. return HttpResponse()
9615aad97f961c8e46c290bfe86ae82f8694398602b4a2244ecb2dc24431a71f
import datetime import os from decimal import Decimal from unittest import mock, skipUnless from django import forms from django.core.exceptions import ( NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError, ) from django.core.files.uploadedfile import SimpleUploadedFile from django.db import connection, models from django.db.models.query import EmptyQuerySet from django.forms.models import ( ModelFormMetaclass, construct_instance, fields_for_model, model_to_dict, modelform_factory, ) from django.template import Context, Template from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from .models import ( Article, ArticleStatus, Author, Author1, Award, BetterWriter, BigInt, Book, Category, Character, Colour, ColourfulItem, CustomErrorMessage, CustomFF, CustomFieldForExclusionModel, DateTimePost, DerivedBook, DerivedPost, Document, ExplicitPK, FilePathModel, FlexibleDatePost, Homepage, ImprovedArticle, ImprovedArticleWithParentLink, Inventory, NullableUniqueCharFieldModel, Person, Photo, Post, Price, Product, Publication, PublicationDefaults, StrictAssignmentAll, StrictAssignmentFieldSpecific, Student, StumpJoke, TextFile, Triple, Writer, WriterProfile, test_images, ) if test_images: from .models import ImageFile, NoExtensionImageFile, OptionalImageFile class ImageFileForm(forms.ModelForm): class Meta: model = ImageFile fields = '__all__' class OptionalImageFileForm(forms.ModelForm): class Meta: model = OptionalImageFile fields = '__all__' class NoExtensionImageFileForm(forms.ModelForm): class Meta: model = NoExtensionImageFile fields = '__all__' class ProductForm(forms.ModelForm): class Meta: model = Product fields = '__all__' class PriceForm(forms.ModelForm): class Meta: model = Price fields = '__all__' class BookForm(forms.ModelForm): class Meta: model = Book fields = '__all__' class DerivedBookForm(forms.ModelForm): class Meta: model = DerivedBook fields = '__all__' class ExplicitPKForm(forms.ModelForm): class Meta: model = ExplicitPK fields = ('key', 'desc',) class PostForm(forms.ModelForm): class Meta: model = Post fields = '__all__' class DerivedPostForm(forms.ModelForm): class Meta: model = DerivedPost fields = '__all__' class CustomWriterForm(forms.ModelForm): name = forms.CharField(required=False) class Meta: model = Writer fields = '__all__' class BaseCategoryForm(forms.ModelForm): class Meta: model = Category fields = '__all__' class ArticleForm(forms.ModelForm): class Meta: model = Article fields = '__all__' class RoykoForm(forms.ModelForm): class Meta: model = Writer fields = '__all__' class ArticleStatusForm(forms.ModelForm): class Meta: model = ArticleStatus fields = '__all__' class InventoryForm(forms.ModelForm): class Meta: model = Inventory fields = '__all__' class SelectInventoryForm(forms.Form): items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode') class CustomFieldForExclusionForm(forms.ModelForm): class Meta: model = CustomFieldForExclusionModel fields = ['name', 'markup'] class TextFileForm(forms.ModelForm): class Meta: model = TextFile fields = '__all__' class BigIntForm(forms.ModelForm): class Meta: model = BigInt fields = '__all__' class ModelFormWithMedia(forms.ModelForm): class Media: js = ('/some/form/javascript',) css = { 'all': ('/some/form/css',) } class Meta: model = TextFile fields = '__all__' class CustomErrorMessageForm(forms.ModelForm): name1 = forms.CharField(error_messages={'invalid': 'Form custom error message.'}) class Meta: fields = '__all__' model = CustomErrorMessage class ModelFormBaseTest(TestCase): def test_base_form(self): self.assertEqual(list(BaseCategoryForm.base_fields), ['name', 'slug', 'url']) def test_no_model_class(self): class NoModelModelForm(forms.ModelForm): pass with self.assertRaisesMessage(ValueError, 'ModelForm has no model class specified.'): NoModelModelForm() def test_empty_fields_to_fields_for_model(self): """ An argument of fields=() to fields_for_model should return an empty dictionary """ field_dict = fields_for_model(Person, fields=()) self.assertEqual(len(field_dict), 0) def test_empty_fields_on_modelform(self): """ No fields on a ModelForm should actually result in no fields. """ class EmptyPersonForm(forms.ModelForm): class Meta: model = Person fields = () form = EmptyPersonForm() self.assertEqual(len(form.fields), 0) def test_empty_fields_to_construct_instance(self): """ No fields should be set on a model instance if construct_instance receives fields=(). """ form = modelform_factory(Person, fields="__all__")({'name': 'John Doe'}) self.assertTrue(form.is_valid()) instance = construct_instance(form, Person(), fields=()) self.assertEqual(instance.name, '') def test_blank_with_null_foreign_key_field(self): """ #13776 -- ModelForm's with models having a FK set to null=False and required=False should be valid. """ class FormForTestingIsValid(forms.ModelForm): class Meta: model = Student fields = '__all__' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['character'].required = False char = Character.objects.create(username='user', last_action=datetime.datetime.today()) data = {'study': 'Engineering'} data2 = {'study': 'Engineering', 'character': char.pk} # form is valid because required=False for field 'character' f1 = FormForTestingIsValid(data) self.assertTrue(f1.is_valid()) f2 = FormForTestingIsValid(data2) self.assertTrue(f2.is_valid()) obj = f2.save() self.assertEqual(obj.character, char) def test_blank_false_with_null_true_foreign_key_field(self): """ A ModelForm with a model having ForeignKey(blank=False, null=True) and the form field set to required=False should allow the field to be unset. """ class AwardForm(forms.ModelForm): class Meta: model = Award fields = '__all__' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['character'].required = False character = Character.objects.create(username='user', last_action=datetime.datetime.today()) award = Award.objects.create(name='Best sprinter', character=character) data = {'name': 'Best tester', 'character': ''} # remove character form = AwardForm(data=data, instance=award) self.assertTrue(form.is_valid()) award = form.save() self.assertIsNone(award.character) def test_blank_foreign_key_with_radio(self): class BookForm(forms.ModelForm): class Meta: model = Book fields = ['author'] widgets = {'author': forms.RadioSelect()} writer = Writer.objects.create(name='Joe Doe') form = BookForm() self.assertEqual(list(form.fields['author'].choices), [ ('', '---------'), (writer.pk, 'Joe Doe'), ]) def test_non_blank_foreign_key_with_radio(self): class AwardForm(forms.ModelForm): class Meta: model = Award fields = ['character'] widgets = {'character': forms.RadioSelect()} character = Character.objects.create( username='user', last_action=datetime.datetime.today(), ) form = AwardForm() self.assertEqual( list(form.fields['character'].choices), [(character.pk, 'user')], ) def test_save_blank_false_with_required_false(self): """ A ModelForm with a model with a field set to blank=False and the form field set to required=False should allow the field to be unset. """ obj = Writer.objects.create(name='test') form = CustomWriterForm(data={'name': ''}, instance=obj) self.assertTrue(form.is_valid()) obj = form.save() self.assertEqual(obj.name, '') def test_save_blank_null_unique_charfield_saves_null(self): form_class = modelform_factory(model=NullableUniqueCharFieldModel, fields='__all__') empty_value = '' if connection.features.interprets_empty_strings_as_nulls else None data = { 'codename': '', 'email': '', 'slug': '', 'url': '', } form = form_class(data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(form.instance.codename, empty_value) self.assertEqual(form.instance.email, empty_value) self.assertEqual(form.instance.slug, empty_value) self.assertEqual(form.instance.url, empty_value) # Save a second form to verify there isn't a unique constraint violation. form = form_class(data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(form.instance.codename, empty_value) self.assertEqual(form.instance.email, empty_value) self.assertEqual(form.instance.slug, empty_value) self.assertEqual(form.instance.url, empty_value) def test_missing_fields_attribute(self): message = ( "Creating a ModelForm without either the 'fields' attribute " "or the 'exclude' attribute is prohibited; form " "MissingFieldsForm needs updating." ) with self.assertRaisesMessage(ImproperlyConfigured, message): class MissingFieldsForm(forms.ModelForm): class Meta: model = Category def test_extra_fields(self): class ExtraFields(BaseCategoryForm): some_extra_field = forms.BooleanField() self.assertEqual(list(ExtraFields.base_fields), ['name', 'slug', 'url', 'some_extra_field']) def test_extra_field_model_form(self): with self.assertRaisesMessage(FieldError, 'no-field'): class ExtraPersonForm(forms.ModelForm): """ ModelForm with an extra field """ age = forms.IntegerField() class Meta: model = Person fields = ('name', 'no-field') def test_extra_declared_field_model_form(self): class ExtraPersonForm(forms.ModelForm): """ ModelForm with an extra field """ age = forms.IntegerField() class Meta: model = Person fields = ('name', 'age') def test_extra_field_modelform_factory(self): with self.assertRaisesMessage(FieldError, 'Unknown field(s) (no-field) specified for Person'): modelform_factory(Person, fields=['no-field', 'name']) def test_replace_field(self): class ReplaceField(forms.ModelForm): url = forms.BooleanField() class Meta: model = Category fields = '__all__' self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField) def test_replace_field_variant_2(self): # Should have the same result as before, # but 'fields' attribute specified differently class ReplaceField(forms.ModelForm): url = forms.BooleanField() class Meta: model = Category fields = ['url'] self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField) def test_replace_field_variant_3(self): # Should have the same result as before, # but 'fields' attribute specified differently class ReplaceField(forms.ModelForm): url = forms.BooleanField() class Meta: model = Category fields = [] # url will still appear, since it is explicit above self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField) def test_override_field(self): class WriterForm(forms.ModelForm): book = forms.CharField(required=False) class Meta: model = Writer fields = '__all__' wf = WriterForm({'name': 'Richard Lockridge'}) self.assertTrue(wf.is_valid()) def test_limit_nonexistent_field(self): expected_msg = 'Unknown field(s) (nonexistent) specified for Category' with self.assertRaisesMessage(FieldError, expected_msg): class InvalidCategoryForm(forms.ModelForm): class Meta: model = Category fields = ['nonexistent'] def test_limit_fields_with_string(self): expected_msg = "CategoryForm.Meta.fields cannot be a string. Did you mean to type: ('url',)?" with self.assertRaisesMessage(TypeError, expected_msg): class CategoryForm(forms.ModelForm): class Meta: model = Category fields = ('url') # note the missing comma def test_exclude_fields(self): class ExcludeFields(forms.ModelForm): class Meta: model = Category exclude = ['url'] self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug']) def test_exclude_nonexistent_field(self): class ExcludeFields(forms.ModelForm): class Meta: model = Category exclude = ['nonexistent'] self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug', 'url']) def test_exclude_fields_with_string(self): expected_msg = "CategoryForm.Meta.exclude cannot be a string. Did you mean to type: ('url',)?" with self.assertRaisesMessage(TypeError, expected_msg): class CategoryForm(forms.ModelForm): class Meta: model = Category exclude = ('url') # note the missing comma def test_exclude_and_validation(self): # This Price instance generated by this form is not valid because the quantity # field is required, but the form is valid because the field is excluded from # the form. This is for backwards compatibility. class PriceFormWithoutQuantity(forms.ModelForm): class Meta: model = Price exclude = ('quantity',) form = PriceFormWithoutQuantity({'price': '6.00'}) self.assertTrue(form.is_valid()) price = form.save(commit=False) msg = "{'quantity': ['This field cannot be null.']}" with self.assertRaisesMessage(ValidationError, msg): price.full_clean() # The form should not validate fields that it doesn't contain even if they are # specified using 'fields', not 'exclude'. class PriceFormWithoutQuantity(forms.ModelForm): class Meta: model = Price fields = ('price',) form = PriceFormWithoutQuantity({'price': '6.00'}) self.assertTrue(form.is_valid()) # The form should still have an instance of a model that is not complete and # not saved into a DB yet. self.assertEqual(form.instance.price, Decimal('6.00')) self.assertIsNone(form.instance.quantity) self.assertIsNone(form.instance.pk) def test_confused_form(self): class ConfusedForm(forms.ModelForm): """ Using 'fields' *and* 'exclude'. Not sure why you'd want to do this, but uh, "be liberal in what you accept" and all. """ class Meta: model = Category fields = ['name', 'url'] exclude = ['url'] self.assertEqual(list(ConfusedForm.base_fields), ['name']) def test_mixmodel_form(self): class MixModelForm(BaseCategoryForm): """ Don't allow more than one 'model' definition in the inheritance hierarchy. Technically, it would generate a valid form, but the fact that the resulting save method won't deal with multiple objects is likely to trip up people not familiar with the mechanics. """ class Meta: model = Article fields = '__all__' # MixModelForm is now an Article-related thing, because MixModelForm.Meta # overrides BaseCategoryForm.Meta. self.assertEqual( list(MixModelForm.base_fields), ['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status'] ) def test_article_form(self): self.assertEqual( list(ArticleForm.base_fields), ['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status'] ) def test_bad_form(self): # First class with a Meta class wins... class BadForm(ArticleForm, BaseCategoryForm): pass self.assertEqual( list(BadForm.base_fields), ['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status'] ) def test_invalid_meta_model(self): class InvalidModelForm(forms.ModelForm): class Meta: pass # no model # Can't create new form msg = 'ModelForm has no model class specified.' with self.assertRaisesMessage(ValueError, msg): InvalidModelForm() # Even if you provide a model instance with self.assertRaisesMessage(ValueError, msg): InvalidModelForm(instance=Category) def test_subcategory_form(self): class SubCategoryForm(BaseCategoryForm): """ Subclassing without specifying a Meta on the class will use the parent's Meta (or the first parent in the MRO if there are multiple parent classes). """ pass self.assertEqual(list(SubCategoryForm.base_fields), ['name', 'slug', 'url']) def test_subclassmeta_form(self): class SomeCategoryForm(forms.ModelForm): checkbox = forms.BooleanField() class Meta: model = Category fields = '__all__' class SubclassMeta(SomeCategoryForm): """ We can also subclass the Meta inner class to change the fields list. """ class Meta(SomeCategoryForm.Meta): exclude = ['url'] self.assertHTMLEqual( str(SubclassMeta()), """<tr><th><label for="id_name">Name:</label></th> <td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr> <tr><th><label for="id_slug">Slug:</label></th> <td><input id="id_slug" type="text" name="slug" maxlength="20" required></td></tr> <tr><th><label for="id_checkbox">Checkbox:</label></th> <td><input type="checkbox" name="checkbox" id="id_checkbox" required></td></tr>""" ) def test_orderfields_form(self): class OrderFields(forms.ModelForm): class Meta: model = Category fields = ['url', 'name'] self.assertEqual(list(OrderFields.base_fields), ['url', 'name']) self.assertHTMLEqual( str(OrderFields()), """<tr><th><label for="id_url">The URL:</label></th> <td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr> <tr><th><label for="id_name">Name:</label></th> <td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr>""" ) def test_orderfields2_form(self): class OrderFields2(forms.ModelForm): class Meta: model = Category fields = ['slug', 'url', 'name'] exclude = ['url'] self.assertEqual(list(OrderFields2.base_fields), ['slug', 'name']) def test_default_populated_on_optional_field(self): class PubForm(forms.ModelForm): mode = forms.CharField(max_length=255, required=False) class Meta: model = PublicationDefaults fields = ('mode',) # Empty data uses the model field default. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, 'di') self.assertEqual(m1._meta.get_field('mode').get_default(), 'di') # Blank data doesn't use the model field default. mf2 = PubForm({'mode': ''}) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.mode, '') def test_default_not_populated_on_non_empty_value_in_cleaned_data(self): class PubForm(forms.ModelForm): mode = forms.CharField(max_length=255, required=False) mocked_mode = None def clean(self): self.cleaned_data['mode'] = self.mocked_mode return self.cleaned_data class Meta: model = PublicationDefaults fields = ('mode',) pub_form = PubForm({}) pub_form.mocked_mode = 'de' pub = pub_form.save(commit=False) self.assertEqual(pub.mode, 'de') # Default should be populated on an empty value in cleaned_data. default_mode = 'di' for empty_value in pub_form.fields['mode'].empty_values: with self.subTest(empty_value=empty_value): pub_form = PubForm({}) pub_form.mocked_mode = empty_value pub = pub_form.save(commit=False) self.assertEqual(pub.mode, default_mode) def test_default_not_populated_on_optional_checkbox_input(self): class PubForm(forms.ModelForm): class Meta: model = PublicationDefaults fields = ('active',) # Empty data doesn't use the model default because CheckboxInput # doesn't have a value in HTML form submission. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertIs(m1.active, False) self.assertIsInstance(mf1.fields['active'].widget, forms.CheckboxInput) self.assertIs(m1._meta.get_field('active').get_default(), True) def test_default_not_populated_on_checkboxselectmultiple(self): class PubForm(forms.ModelForm): mode = forms.CharField(required=False, widget=forms.CheckboxSelectMultiple) class Meta: model = PublicationDefaults fields = ('mode',) # Empty data doesn't use the model default because an unchecked # CheckboxSelectMultiple doesn't have a value in HTML form submission. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, '') self.assertEqual(m1._meta.get_field('mode').get_default(), 'di') def test_default_not_populated_on_selectmultiple(self): class PubForm(forms.ModelForm): mode = forms.CharField(required=False, widget=forms.SelectMultiple) class Meta: model = PublicationDefaults fields = ('mode',) # Empty data doesn't use the model default because an unselected # SelectMultiple doesn't have a value in HTML form submission. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, '') self.assertEqual(m1._meta.get_field('mode').get_default(), 'di') def test_prefixed_form_with_default_field(self): class PubForm(forms.ModelForm): prefix = 'form-prefix' class Meta: model = PublicationDefaults fields = ('mode',) mode = 'de' self.assertNotEqual(mode, PublicationDefaults._meta.get_field('mode').get_default()) mf1 = PubForm({'form-prefix-mode': mode}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, mode) def test_renderer_kwarg(self): custom = object() self.assertIs(ProductForm(renderer=custom).renderer, custom) def test_default_splitdatetime_field(self): class PubForm(forms.ModelForm): datetime_published = forms.SplitDateTimeField(required=False) class Meta: model = PublicationDefaults fields = ('datetime_published',) mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.datetime_published, datetime.datetime(2000, 1, 1)) mf2 = PubForm({'datetime_published_0': '2010-01-01', 'datetime_published_1': '0:00:00'}) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.datetime_published, datetime.datetime(2010, 1, 1)) def test_default_filefield(self): class PubForm(forms.ModelForm): class Meta: model = PublicationDefaults fields = ('file',) mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.file.name, 'default.txt') mf2 = PubForm({}, {'file': SimpleUploadedFile('name', b'foo')}) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.file.name, 'name') def test_default_selectdatewidget(self): class PubForm(forms.ModelForm): date_published = forms.DateField(required=False, widget=forms.SelectDateWidget) class Meta: model = PublicationDefaults fields = ('date_published',) mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.date_published, datetime.date.today()) mf2 = PubForm({'date_published_year': '2010', 'date_published_month': '1', 'date_published_day': '1'}) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.date_published, datetime.date(2010, 1, 1)) class FieldOverridesByFormMetaForm(forms.ModelForm): class Meta: model = Category fields = ['name', 'url', 'slug'] widgets = { 'name': forms.Textarea, 'url': forms.TextInput(attrs={'class': 'url'}) } labels = { 'name': 'Title', } help_texts = { 'slug': 'Watch out! Letters, numbers, underscores and hyphens only.', } error_messages = { 'slug': { 'invalid': ( "Didn't you read the help text? " "We said letters, numbers, underscores and hyphens only!" ) } } field_classes = { 'url': forms.URLField, } class TestFieldOverridesByFormMeta(SimpleTestCase): def test_widget_overrides(self): form = FieldOverridesByFormMetaForm() self.assertHTMLEqual( str(form['name']), '<textarea id="id_name" rows="10" cols="40" name="name" maxlength="20" required></textarea>', ) self.assertHTMLEqual( str(form['url']), '<input id="id_url" type="text" class="url" name="url" maxlength="40" required>', ) self.assertHTMLEqual( str(form['slug']), '<input id="id_slug" type="text" name="slug" maxlength="20" required>', ) def test_label_overrides(self): form = FieldOverridesByFormMetaForm() self.assertHTMLEqual( str(form['name'].label_tag()), '<label for="id_name">Title:</label>', ) self.assertHTMLEqual( str(form['url'].label_tag()), '<label for="id_url">The URL:</label>', ) self.assertHTMLEqual( str(form['slug'].label_tag()), '<label for="id_slug">Slug:</label>', ) def test_help_text_overrides(self): form = FieldOverridesByFormMetaForm() self.assertEqual( form['slug'].help_text, 'Watch out! Letters, numbers, underscores and hyphens only.', ) def test_error_messages_overrides(self): form = FieldOverridesByFormMetaForm(data={ 'name': 'Category', 'url': 'http://www.example.com/category/', 'slug': '!%#*@', }) form.full_clean() error = [ "Didn't you read the help text? " "We said letters, numbers, underscores and hyphens only!", ] self.assertEqual(form.errors, {'slug': error}) def test_field_type_overrides(self): form = FieldOverridesByFormMetaForm() self.assertIs(Category._meta.get_field('url').__class__, models.CharField) self.assertIsInstance(form.fields['url'], forms.URLField) class IncompleteCategoryFormWithFields(forms.ModelForm): """ A form that replaces the model's url field with a custom one. This should prevent the model field's validation from being called. """ url = forms.CharField(required=False) class Meta: fields = ('name', 'slug') model = Category class IncompleteCategoryFormWithExclude(forms.ModelForm): """ A form that replaces the model's url field with a custom one. This should prevent the model field's validation from being called. """ url = forms.CharField(required=False) class Meta: exclude = ['url'] model = Category class ValidationTest(SimpleTestCase): def test_validates_with_replaced_field_not_specified(self): form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'}) self.assertIs(form.is_valid(), True) def test_validates_with_replaced_field_excluded(self): form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'}) self.assertIs(form.is_valid(), True) def test_notrequired_overrides_notblank(self): form = CustomWriterForm({}) self.assertIs(form.is_valid(), True) class UniqueTest(TestCase): """ unique/unique_together validation. """ @classmethod def setUpTestData(cls): cls.writer = Writer.objects.create(name='Mike Royko') def test_simple_unique(self): form = ProductForm({'slug': 'teddy-bear-blue'}) self.assertTrue(form.is_valid()) obj = form.save() form = ProductForm({'slug': 'teddy-bear-blue'}) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.']) form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj) self.assertTrue(form.is_valid()) def test_unique_together(self): """ModelForm test of unique_together constraint""" form = PriceForm({'price': '6.00', 'quantity': '1'}) self.assertTrue(form.is_valid()) form.save() form = PriceForm({'price': '6.00', 'quantity': '1'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.']) def test_unique_together_exclusion(self): """ Forms don't validate unique_together constraints when only part of the constraint is included in the form's fields. This allows using form.save(commit=False) and then assigning the missing field(s) to the model instance. """ class BookForm(forms.ModelForm): class Meta: model = DerivedBook fields = ('isbn', 'suffix1') # The unique_together is on suffix1/suffix2 but only suffix1 is part # of the form. The fields must have defaults, otherwise they'll be # skipped by other logic. self.assertEqual(DerivedBook._meta.unique_together, (('suffix1', 'suffix2'),)) for name in ('suffix1', 'suffix2'): with self.subTest(name=name): field = DerivedBook._meta.get_field(name) self.assertEqual(field.default, 0) # The form fails validation with "Derived book with this Suffix1 and # Suffix2 already exists." if the unique_together validation isn't # skipped. DerivedBook.objects.create(isbn='12345') form = BookForm({'isbn': '56789', 'suffix1': '0'}) self.assertTrue(form.is_valid(), form.errors) def test_multiple_field_unique_together(self): """ When the same field is involved in multiple unique_together constraints, we need to make sure we don't remove the data for it before doing all the validation checking (not just failing after the first one). """ class TripleForm(forms.ModelForm): class Meta: model = Triple fields = '__all__' Triple.objects.create(left=1, middle=2, right=3) form = TripleForm({'left': '1', 'middle': '2', 'right': '3'}) self.assertFalse(form.is_valid()) form = TripleForm({'left': '1', 'middle': '3', 'right': '1'}) self.assertTrue(form.is_valid()) @skipUnlessDBFeature('supports_nullable_unique_constraints') def test_unique_null(self): title = 'I May Be Wrong But I Doubt It' form = BookForm({'title': title, 'author': self.writer.pk}) self.assertTrue(form.is_valid()) form.save() form = BookForm({'title': title, 'author': self.writer.pk}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.']) form = BookForm({'title': title}) self.assertTrue(form.is_valid()) form.save() form = BookForm({'title': title}) self.assertTrue(form.is_valid()) def test_inherited_unique(self): title = 'Boss' Book.objects.create(title=title, author=self.writer, special_id=1) form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.']) def test_inherited_unique_together(self): title = 'Boss' form = BookForm({'title': title, 'author': self.writer.pk}) self.assertTrue(form.is_valid()) form.save() form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.']) def test_abstract_inherited_unique(self): title = 'Boss' isbn = '12345' DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn) form = DerivedBookForm({ 'title': 'Other', 'author': self.writer.pk, 'isbn': isbn, 'suffix1': '1', 'suffix2': '2', }) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.']) def test_abstract_inherited_unique_together(self): title = 'Boss' isbn = '12345' DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn) form = DerivedBookForm({ 'title': 'Other', 'author': self.writer.pk, 'isbn': '9876', 'suffix1': '0', 'suffix2': '0' }) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual( form.errors['__all__'], ['Derived book with this Suffix1 and Suffix2 already exists.'], ) def test_explicitpk_unspecified(self): """Test for primary_key being in the form and failing validation.""" form = ExplicitPKForm({'key': '', 'desc': ''}) self.assertFalse(form.is_valid()) def test_explicitpk_unique(self): """Ensure keys and blank character strings are tested for uniqueness.""" form = ExplicitPKForm({'key': 'key1', 'desc': ''}) self.assertTrue(form.is_valid()) form.save() form = ExplicitPKForm({'key': 'key1', 'desc': ''}) self.assertFalse(form.is_valid()) if connection.features.interprets_empty_strings_as_nulls: self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.']) else: self.assertEqual(len(form.errors), 3) self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.']) self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.']) self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.']) def test_unique_for_date(self): p = Post.objects.create( title="Django 1.0 is released", slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3), ) form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.']) form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'}) self.assertTrue(form.is_valid()) form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'}) self.assertTrue(form.is_valid()) form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.']) form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'}) self.assertFalse(form.is_valid()) self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.']) data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'} form = PostForm(data, instance=p) self.assertTrue(form.is_valid()) form = PostForm({'title': "Django 1.0 is released"}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['posted'], ['This field is required.']) def test_unique_for_date_in_exclude(self): """ If the date for unique_for_* constraints is excluded from the ModelForm (in this case 'posted' has editable=False, then the constraint should be ignored. """ class DateTimePostForm(forms.ModelForm): class Meta: model = DateTimePost fields = '__all__' DateTimePost.objects.create( title="Django 1.0 is released", slug="Django 1.0", subtitle="Finally", posted=datetime.datetime(2008, 9, 3, 10, 10, 1), ) # 'title' has unique_for_date='posted' form = DateTimePostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'}) self.assertTrue(form.is_valid()) # 'slug' has unique_for_year='posted' form = DateTimePostForm({'slug': "Django 1.0", 'posted': '2008-01-01'}) self.assertTrue(form.is_valid()) # 'subtitle' has unique_for_month='posted' form = DateTimePostForm({'subtitle': "Finally", 'posted': '2008-09-30'}) self.assertTrue(form.is_valid()) def test_inherited_unique_for_date(self): p = Post.objects.create( title="Django 1.0 is released", slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3), ) form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.']) form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'}) self.assertTrue(form.is_valid()) form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'}) self.assertTrue(form.is_valid()) form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.']) form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'}) self.assertFalse(form.is_valid()) self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.']) data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'} form = DerivedPostForm(data, instance=p) self.assertTrue(form.is_valid()) def test_unique_for_date_with_nullable_date(self): class FlexDatePostForm(forms.ModelForm): class Meta: model = FlexibleDatePost fields = '__all__' p = FlexibleDatePost.objects.create( title="Django 1.0 is released", slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3), ) form = FlexDatePostForm({'title': "Django 1.0 is released"}) self.assertTrue(form.is_valid()) form = FlexDatePostForm({'slug': "Django 1.0"}) self.assertTrue(form.is_valid()) form = FlexDatePostForm({'subtitle': "Finally"}) self.assertTrue(form.is_valid()) data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0"} form = FlexDatePostForm(data, instance=p) self.assertTrue(form.is_valid()) def test_override_unique_message(self): class CustomProductForm(ProductForm): class Meta(ProductForm.Meta): error_messages = { 'slug': { 'unique': "%(model_name)s's %(field_label)s not unique.", } } Product.objects.create(slug='teddy-bear-blue') form = CustomProductForm({'slug': 'teddy-bear-blue'}) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['slug'], ["Product's Slug not unique."]) def test_override_unique_together_message(self): class CustomPriceForm(PriceForm): class Meta(PriceForm.Meta): error_messages = { NON_FIELD_ERRORS: { 'unique_together': "%(model_name)s's %(field_labels)s not unique.", } } Price.objects.create(price=6.00, quantity=1) form = CustomPriceForm({'price': '6.00', 'quantity': '1'}) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors[NON_FIELD_ERRORS], ["Price's Price and Quantity not unique."]) def test_override_unique_for_date_message(self): class CustomPostForm(PostForm): class Meta(PostForm.Meta): error_messages = { 'title': { 'unique_for_date': ( "%(model_name)s's %(field_label)s not unique " "for %(date_field_label)s date." ), } } Post.objects.create( title="Django 1.0 is released", slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3), ) form = CustomPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'}) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['title'], ["Post's Title not unique for Posted date."]) class ModelFormBasicTests(TestCase): def create_basic_data(self): self.c1 = Category.objects.create(name='Entertainment', slug='entertainment', url='entertainment') self.c2 = Category.objects.create(name="It's a test", slug='its-test', url='test') self.c3 = Category.objects.create(name='Third test', slug='third-test', url='third') self.w_royko = Writer.objects.create(name='Mike Royko') self.w_woodward = Writer.objects.create(name='Bob Woodward') def test_base_form(self): self.assertEqual(Category.objects.count(), 0) f = BaseCategoryForm() self.assertHTMLEqual( str(f), """<tr><th><label for="id_name">Name:</label></th> <td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr> <tr><th><label for="id_slug">Slug:</label></th> <td><input id="id_slug" type="text" name="slug" maxlength="20" required></td></tr> <tr><th><label for="id_url">The URL:</label></th> <td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>""" ) self.assertHTMLEqual( str(f.as_ul()), """<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" required></li> <li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" required></li> <li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" required></li>""" ) self.assertHTMLEqual( str(f["name"]), """<input id="id_name" type="text" name="name" maxlength="20" required>""") def test_auto_id(self): f = BaseCategoryForm(auto_id=False) self.assertHTMLEqual( str(f.as_ul()), """<li>Name: <input type="text" name="name" maxlength="20" required></li> <li>Slug: <input type="text" name="slug" maxlength="20" required></li> <li>The URL: <input type="text" name="url" maxlength="40" required></li>""" ) def test_initial_values(self): self.create_basic_data() # Initial values can be provided for model forms f = ArticleForm( auto_id=False, initial={ 'headline': 'Your headline here', 'categories': [str(self.c1.id), str(self.c2.id)] }) self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" required></li> <li>Writer: <select name="writer" required> <option value="" selected>---------</option> <option value="%s">Bob Woodward</option> <option value="%s">Mike Royko</option> </select></li> <li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li> <li>Categories: <select multiple name="categories"> <option value="%s" selected>Entertainment</option> <option value="%s" selected>It&#x27;s a test</option> <option value="%s">Third test</option> </select></li> <li>Status: <select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk)) # When the ModelForm is passed an instance, that instance's current values are # inserted as 'initial' data in each Field. f = RoykoForm(auto_id=False, instance=self.w_royko) self.assertHTMLEqual( str(f), '''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" required><br> <span class="helptext">Use both first and last names.</span></td></tr>''' ) art = Article.objects.create( headline='Test article', slug='test-article', pub_date=datetime.date(1988, 1, 4), writer=self.w_royko, article='Hello.' ) art_id_1 = art.id f = ArticleForm(auto_id=False, instance=art) self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li> <li>Writer: <select name="writer" required> <option value="">---------</option> <option value="%s">Bob Woodward</option> <option value="%s" selected>Mike Royko</option> </select></li> <li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li> <li>Categories: <select multiple name="categories"> <option value="%s">Entertainment</option> <option value="%s">It&#x27;s a test</option> <option value="%s">Third test</option> </select></li> <li>Status: <select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk)) f = ArticleForm({ 'headline': 'Test headline', 'slug': 'test-headline', 'pub_date': '1984-02-06', 'writer': str(self.w_royko.pk), 'article': 'Hello.' }, instance=art) self.assertEqual(f.errors, {}) self.assertTrue(f.is_valid()) test_art = f.save() self.assertEqual(test_art.id, art_id_1) test_art = Article.objects.get(id=art_id_1) self.assertEqual(test_art.headline, 'Test headline') def test_m2m_initial_callable(self): """ Regression for #10349: A callable can be provided as the initial value for an m2m field """ self.maxDiff = 1200 self.create_basic_data() # Set up a callable initial value def formfield_for_dbfield(db_field, **kwargs): if db_field.name == 'categories': kwargs['initial'] = lambda: Category.objects.all().order_by('name')[:2] return db_field.formfield(**kwargs) # Create a ModelForm, instantiate it, and check that the output is as expected ModelForm = modelform_factory( Article, fields=['headline', 'categories'], formfield_callback=formfield_for_dbfield, ) form = ModelForm() self.assertHTMLEqual( form.as_ul(), """<li><label for="id_headline">Headline:</label> <input id="id_headline" type="text" name="headline" maxlength="50" required></li> <li><label for="id_categories">Categories:</label> <select multiple name="categories" id="id_categories"> <option value="%d" selected>Entertainment</option> <option value="%d" selected>It&#x27;s a test</option> <option value="%d">Third test</option> </select></li>""" % (self.c1.pk, self.c2.pk, self.c3.pk)) def test_basic_creation(self): self.assertEqual(Category.objects.count(), 0) f = BaseCategoryForm({ 'name': 'Entertainment', 'slug': 'entertainment', 'url': 'entertainment', }) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['name'], 'Entertainment') self.assertEqual(f.cleaned_data['slug'], 'entertainment') self.assertEqual(f.cleaned_data['url'], 'entertainment') c1 = f.save() # Testing whether the same object is returned from the # ORM... not the fastest way... self.assertEqual(Category.objects.count(), 1) self.assertEqual(c1, Category.objects.all()[0]) self.assertEqual(c1.name, "Entertainment") def test_save_commit_false(self): # If you call save() with commit=False, then it will return an object that # hasn't yet been saved to the database. In this case, it's up to you to call # save() on the resulting model instance. f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'}) self.assertTrue(f.is_valid()) c1 = f.save(commit=False) self.assertEqual(c1.name, "Third test") self.assertEqual(Category.objects.count(), 0) c1.save() self.assertEqual(Category.objects.count(), 1) def test_save_with_data_errors(self): # If you call save() with invalid data, you'll get a ValueError. f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'}) self.assertEqual(f.errors['name'], ['This field is required.']) self.assertEqual( f.errors['slug'], ['Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.'] ) self.assertEqual(f.cleaned_data, {'url': 'foo'}) msg = "The Category could not be created because the data didn't validate." with self.assertRaisesMessage(ValueError, msg): f.save() f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'}) with self.assertRaisesMessage(ValueError, msg): f.save() def test_multi_fields(self): self.create_basic_data() self.maxDiff = None # ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any # fields with the 'choices' attribute are represented by a ChoiceField. f = ArticleForm(auto_id=False) self.assertHTMLEqual( str(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required></td></tr> <tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" required></td></tr> <tr><th>Pub date:</th><td><input type="text" name="pub_date" required></td></tr> <tr><th>Writer:</th><td><select name="writer" required> <option value="" selected>---------</option> <option value="%s">Bob Woodward</option> <option value="%s">Mike Royko</option> </select></td></tr> <tr><th>Article:</th><td><textarea rows="10" cols="40" name="article" required></textarea></td></tr> <tr><th>Categories:</th><td><select multiple name="categories"> <option value="%s">Entertainment</option> <option value="%s">It&#x27;s a test</option> <option value="%s">Third test</option> </select></td></tr> <tr><th>Status:</th><td><select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></td></tr>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk)) # Add some categories and test the many-to-many form output. new_art = Article.objects.create( article="Hello.", headline="New headline", slug="new-headline", pub_date=datetime.date(1988, 1, 4), writer=self.w_royko) new_art.categories.add(Category.objects.get(name='Entertainment')) self.assertQuerysetEqual(new_art.categories.all(), ["Entertainment"]) f = ArticleForm(auto_id=False, instance=new_art) self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li> <li>Writer: <select name="writer" required> <option value="">---------</option> <option value="%s">Bob Woodward</option> <option value="%s" selected>Mike Royko</option> </select></li> <li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li> <li>Categories: <select multiple name="categories"> <option value="%s" selected>Entertainment</option> <option value="%s">It&#x27;s a test</option> <option value="%s">Third test</option> </select></li> <li>Status: <select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk)) def test_subset_fields(self): # You can restrict a form to a subset of the complete list of fields # by providing a 'fields' argument. If you try to save a # model created with such a form, you need to ensure that the fields # that are _not_ on the form have default values, or are allowed to have # a value of None. If a field isn't specified on a form, the object created # from the form can't provide a value for that field! class PartialArticleForm(forms.ModelForm): class Meta: model = Article fields = ('headline', 'pub_date') f = PartialArticleForm(auto_id=False) self.assertHTMLEqual( str(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required></td></tr> <tr><th>Pub date:</th><td><input type="text" name="pub_date" required></td></tr>''') class PartialArticleFormWithSlug(forms.ModelForm): class Meta: model = Article fields = ('headline', 'slug', 'pub_date') w_royko = Writer.objects.create(name='Mike Royko') art = Article.objects.create( article="Hello.", headline="New headline", slug="new-headline", pub_date=datetime.date(1988, 1, 4), writer=w_royko) f = PartialArticleFormWithSlug({ 'headline': 'New headline', 'slug': 'new-headline', 'pub_date': '1988-01-04' }, auto_id=False, instance=art) self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li>''' ) self.assertTrue(f.is_valid()) new_art = f.save() self.assertEqual(new_art.id, art.id) new_art = Article.objects.get(id=art.id) self.assertEqual(new_art.headline, 'New headline') def test_m2m_editing(self): self.create_basic_data() form_data = { 'headline': 'New headline', 'slug': 'new-headline', 'pub_date': '1988-01-04', 'writer': str(self.w_royko.pk), 'article': 'Hello.', 'categories': [str(self.c1.id), str(self.c2.id)] } # Create a new article, with categories, via the form. f = ArticleForm(form_data) new_art = f.save() new_art = Article.objects.get(id=new_art.id) art_id_1 = new_art.id self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"]) # Now, submit form data with no categories. This deletes the existing categories. form_data['categories'] = [] f = ArticleForm(form_data, instance=new_art) new_art = f.save() self.assertEqual(new_art.id, art_id_1) new_art = Article.objects.get(id=art_id_1) self.assertQuerysetEqual(new_art.categories.all(), []) # Create a new article, with no categories, via the form. f = ArticleForm(form_data) new_art = f.save() art_id_2 = new_art.id self.assertNotIn(art_id_2, (None, art_id_1)) new_art = Article.objects.get(id=art_id_2) self.assertQuerysetEqual(new_art.categories.all(), []) # Create a new article, with categories, via the form, but use commit=False. # The m2m data won't be saved until save_m2m() is invoked on the form. form_data['categories'] = [str(self.c1.id), str(self.c2.id)] f = ArticleForm(form_data) new_art = f.save(commit=False) # Manually save the instance new_art.save() art_id_3 = new_art.id self.assertNotIn(art_id_3, (None, art_id_1, art_id_2)) # The instance doesn't have m2m data yet new_art = Article.objects.get(id=art_id_3) self.assertQuerysetEqual(new_art.categories.all(), []) # Save the m2m data on the form f.save_m2m() self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"]) def test_custom_form_fields(self): # Here, we define a custom ModelForm. Because it happens to have the same fields as # the Category model, we can just call the form's save() to apply its changes to an # existing Category instance. class ShortCategory(forms.ModelForm): name = forms.CharField(max_length=5) slug = forms.CharField(max_length=5) url = forms.CharField(max_length=3) class Meta: model = Category fields = '__all__' cat = Category.objects.create(name='Third test') form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat) self.assertEqual(form.save().name, 'Third') self.assertEqual(Category.objects.get(id=cat.id).name, 'Third') def test_runtime_choicefield_populated(self): self.maxDiff = None # Here, we demonstrate that choices for a ForeignKey ChoiceField are determined # at runtime, based on the data in the database when the form is displayed, not # the data in the database when the form is instantiated. self.create_basic_data() f = ArticleForm(auto_id=False) self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" required></li> <li>Writer: <select name="writer" required> <option value="" selected>---------</option> <option value="%s">Bob Woodward</option> <option value="%s">Mike Royko</option> </select></li> <li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li> <li>Categories: <select multiple name="categories"> <option value="%s">Entertainment</option> <option value="%s">It&#x27;s a test</option> <option value="%s">Third test</option> </select> </li> <li>Status: <select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk)) c4 = Category.objects.create(name='Fourth', url='4th') w_bernstein = Writer.objects.create(name='Carl Bernstein') self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" required></li> <li>Writer: <select name="writer" required> <option value="" selected>---------</option> <option value="%s">Bob Woodward</option> <option value="%s">Carl Bernstein</option> <option value="%s">Mike Royko</option> </select></li> <li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li> <li>Categories: <select multiple name="categories"> <option value="%s">Entertainment</option> <option value="%s">It&#x27;s a test</option> <option value="%s">Third test</option> <option value="%s">Fourth</option> </select></li> <li>Status: <select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></li>''' % (self.w_woodward.pk, w_bernstein.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk, c4.pk)) def test_recleaning_model_form_instance(self): """ Re-cleaning an instance that was added via a ModelForm shouldn't raise a pk uniqueness error. """ class AuthorForm(forms.ModelForm): class Meta: model = Author fields = '__all__' form = AuthorForm({'full_name': 'Bob'}) self.assertTrue(form.is_valid()) obj = form.save() obj.name = 'Alice' obj.full_clean() def test_validate_foreign_key_uses_default_manager(self): class MyForm(forms.ModelForm): class Meta: model = Article fields = '__all__' # Archived writers are filtered out by the default manager. w = Writer.objects.create(name='Randy', archived=True) data = { 'headline': 'My Article', 'slug': 'my-article', 'pub_date': datetime.date.today(), 'writer': w.pk, 'article': 'lorem ipsum', } form = MyForm(data) self.assertIs(form.is_valid(), False) self.assertEqual( form.errors, {'writer': ['Select a valid choice. That choice is not one of the available choices.']}, ) def test_validate_foreign_key_to_model_with_overridden_manager(self): class MyForm(forms.ModelForm): class Meta: model = Article fields = '__all__' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Allow archived authors. self.fields['writer'].queryset = Writer._base_manager.all() w = Writer.objects.create(name='Randy', archived=True) data = { 'headline': 'My Article', 'slug': 'my-article', 'pub_date': datetime.date.today(), 'writer': w.pk, 'article': 'lorem ipsum', } form = MyForm(data) self.assertIs(form.is_valid(), True) article = form.save() self.assertEqual(article.writer, w) class ModelMultipleChoiceFieldTests(TestCase): @classmethod def setUpTestData(cls): cls.c1 = Category.objects.create(name='Entertainment', slug='entertainment', url='entertainment') cls.c2 = Category.objects.create(name="It's a test", slug='its-test', url='test') cls.c3 = Category.objects.create(name='Third', slug='third-test', url='third') def test_model_multiple_choice_field(self): f = forms.ModelMultipleChoiceField(Category.objects.all()) self.assertEqual(list(f.choices), [ (self.c1.pk, 'Entertainment'), (self.c2.pk, "It's a test"), (self.c3.pk, 'Third')]) with self.assertRaises(ValidationError): f.clean(None) with self.assertRaises(ValidationError): f.clean([]) self.assertQuerysetEqual(f.clean([self.c1.id]), ["Entertainment"]) self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"]) self.assertQuerysetEqual(f.clean([str(self.c1.id)]), ["Entertainment"]) self.assertQuerysetEqual( f.clean([str(self.c1.id), str(self.c2.id)]), ["Entertainment", "It's a test"], ordered=False ) self.assertQuerysetEqual( f.clean([self.c1.id, str(self.c2.id)]), ["Entertainment", "It's a test"], ordered=False ) self.assertQuerysetEqual( f.clean((self.c1.id, str(self.c2.id))), ["Entertainment", "It's a test"], ordered=False ) with self.assertRaises(ValidationError): f.clean(['100']) with self.assertRaises(ValidationError): f.clean('hello') with self.assertRaises(ValidationError): f.clean(['fail']) # Invalid types that require TypeError to be caught (#22808). with self.assertRaises(ValidationError): f.clean([['fail']]) with self.assertRaises(ValidationError): f.clean([{'foo': 'bar'}]) # Add a Category object *after* the ModelMultipleChoiceField has already been # instantiated. This proves clean() checks the database during clean() rather # than caching it at time of instantiation. # Note, we are using an id of 1006 here since tests that run before # this may create categories with primary keys up to 6. Use # a number that will not conflict. c6 = Category.objects.create(id=1006, name='Sixth', url='6th') self.assertQuerysetEqual(f.clean([c6.id]), ["Sixth"]) # Delete a Category object *after* the ModelMultipleChoiceField has already been # instantiated. This proves clean() checks the database during clean() rather # than caching it at time of instantiation. Category.objects.get(url='6th').delete() with self.assertRaises(ValidationError): f.clean([c6.id]) def test_model_multiple_choice_required_false(self): f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False) self.assertIsInstance(f.clean([]), EmptyQuerySet) self.assertIsInstance(f.clean(()), EmptyQuerySet) with self.assertRaises(ValidationError): f.clean(['0']) with self.assertRaises(ValidationError): f.clean([str(self.c3.id), '0']) with self.assertRaises(ValidationError): f.clean([str(self.c1.id), '0']) # queryset can be changed after the field is created. f.queryset = Category.objects.exclude(name='Third') self.assertEqual(list(f.choices), [ (self.c1.pk, 'Entertainment'), (self.c2.pk, "It's a test")]) self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"]) with self.assertRaises(ValidationError): f.clean([self.c3.id]) with self.assertRaises(ValidationError): f.clean([str(self.c2.id), str(self.c3.id)]) f.queryset = Category.objects.all() f.label_from_instance = lambda obj: "multicategory " + str(obj) self.assertEqual(list(f.choices), [ (self.c1.pk, 'multicategory Entertainment'), (self.c2.pk, "multicategory It's a test"), (self.c3.pk, 'multicategory Third')]) def test_model_multiple_choice_number_of_queries(self): """ ModelMultipleChoiceField does O(1) queries instead of O(n) (#10156). """ persons = [Writer.objects.create(name="Person %s" % i) for i in range(30)] f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all()) self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]]) def test_model_multiple_choice_run_validators(self): """ ModelMultipleChoiceField run given validators (#14144). """ for i in range(30): Writer.objects.create(name="Person %s" % i) self._validator_run = False def my_validator(value): self._validator_run = True f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all(), validators=[my_validator]) f.clean([p.pk for p in Writer.objects.all()[8:9]]) self.assertTrue(self._validator_run) def test_model_multiple_choice_show_hidden_initial(self): """ Test support of show_hidden_initial by ModelMultipleChoiceField. """ class WriterForm(forms.Form): persons = forms.ModelMultipleChoiceField(show_hidden_initial=True, queryset=Writer.objects.all()) person1 = Writer.objects.create(name="Person 1") person2 = Writer.objects.create(name="Person 2") form = WriterForm( initial={'persons': [person1, person2]}, data={ 'initial-persons': [str(person1.pk), str(person2.pk)], 'persons': [str(person1.pk), str(person2.pk)], }, ) self.assertTrue(form.is_valid()) self.assertFalse(form.has_changed()) form = WriterForm( initial={'persons': [person1, person2]}, data={ 'initial-persons': [str(person1.pk), str(person2.pk)], 'persons': [str(person2.pk)], }, ) self.assertTrue(form.is_valid()) self.assertTrue(form.has_changed()) def test_model_multiple_choice_field_22745(self): """ #22745 -- Make sure that ModelMultipleChoiceField with CheckboxSelectMultiple widget doesn't produce unnecessary db queries when accessing its BoundField's attrs. """ class ModelMultipleChoiceForm(forms.Form): categories = forms.ModelMultipleChoiceField(Category.objects.all(), widget=forms.CheckboxSelectMultiple) form = ModelMultipleChoiceForm() field = form['categories'] # BoundField template = Template('{{ field.name }}{{ field }}{{ field.help_text }}') with self.assertNumQueries(1): template.render(Context({'field': field})) def test_show_hidden_initial_changed_queries_efficiently(self): class WriterForm(forms.Form): persons = forms.ModelMultipleChoiceField( show_hidden_initial=True, queryset=Writer.objects.all()) writers = (Writer.objects.create(name=str(x)) for x in range(0, 50)) writer_pks = tuple(x.pk for x in writers) form = WriterForm(data={'initial-persons': writer_pks}) with self.assertNumQueries(1): self.assertTrue(form.has_changed()) def test_clean_does_deduplicate_values(self): class PersonForm(forms.Form): persons = forms.ModelMultipleChoiceField(queryset=Person.objects.all()) person1 = Person.objects.create(name='Person 1') form = PersonForm(data={}) queryset = form.fields['persons'].clean([str(person1.pk)] * 50) sql, params = queryset.query.sql_with_params() self.assertEqual(len(params), 1) def test_to_field_name_with_initial_data(self): class ArticleCategoriesForm(forms.ModelForm): categories = forms.ModelMultipleChoiceField(Category.objects.all(), to_field_name='slug') class Meta: model = Article fields = ['categories'] article = Article.objects.create( headline='Test article', slug='test-article', pub_date=datetime.date(1988, 1, 4), writer=Writer.objects.create(name='Test writer'), article='Hello.', ) article.categories.add(self.c2, self.c3) form = ArticleCategoriesForm(instance=article) self.assertCountEqual(form['categories'].value(), [self.c2.slug, self.c3.slug]) class ModelOneToOneFieldTests(TestCase): def test_modelform_onetoonefield(self): class ImprovedArticleForm(forms.ModelForm): class Meta: model = ImprovedArticle fields = '__all__' class ImprovedArticleWithParentLinkForm(forms.ModelForm): class Meta: model = ImprovedArticleWithParentLink fields = '__all__' self.assertEqual(list(ImprovedArticleForm.base_fields), ['article']) self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), []) def test_modelform_subclassed_model(self): class BetterWriterForm(forms.ModelForm): class Meta: # BetterWriter model is a subclass of Writer with an additional `score` field model = BetterWriter fields = '__all__' bw = BetterWriter.objects.create(name='Joe Better', score=10) self.assertEqual(sorted(model_to_dict(bw)), ['id', 'name', 'score', 'writer_ptr']) self.assertEqual(sorted(model_to_dict(bw, fields=[])), []) self.assertEqual(sorted(model_to_dict(bw, fields=['id', 'name'])), ['id', 'name']) self.assertEqual(sorted(model_to_dict(bw, exclude=[])), ['id', 'name', 'score', 'writer_ptr']) self.assertEqual(sorted(model_to_dict(bw, exclude=['id', 'name'])), ['score', 'writer_ptr']) form = BetterWriterForm({'name': 'Some Name', 'score': 12}) self.assertTrue(form.is_valid()) bw2 = form.save() self.assertEqual(bw2.score, 12) def test_onetoonefield(self): class WriterProfileForm(forms.ModelForm): class Meta: # WriterProfile has a OneToOneField to Writer model = WriterProfile fields = '__all__' self.w_royko = Writer.objects.create(name='Mike Royko') self.w_woodward = Writer.objects.create(name='Bob Woodward') form = WriterProfileForm() self.assertHTMLEqual( form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required> <option value="" selected>---------</option> <option value="%s">Bob Woodward</option> <option value="%s">Mike Royko</option> </select></p> <p><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" min="0" required></p>''' % ( self.w_woodward.pk, self.w_royko.pk, ) ) data = { 'writer': str(self.w_woodward.pk), 'age': '65', } form = WriterProfileForm(data) instance = form.save() self.assertEqual(str(instance), 'Bob Woodward is 65') form = WriterProfileForm(instance=instance) self.assertHTMLEqual( form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required> <option value="">---------</option> <option value="%s" selected>Bob Woodward</option> <option value="%s">Mike Royko</option> </select></p> <p><label for="id_age">Age:</label> <input type="number" name="age" value="65" id="id_age" min="0" required></p>''' % ( self.w_woodward.pk, self.w_royko.pk, ) ) def test_assignment_of_none(self): class AuthorForm(forms.ModelForm): class Meta: model = Author fields = ['publication', 'full_name'] publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22)) author = Author.objects.create(publication=publication, full_name='John Doe') form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author) self.assertTrue(form.is_valid()) self.assertIsNone(form.cleaned_data['publication']) author = form.save() # author object returned from form still retains original publication object # that's why we need to retrieve it from database again new_author = Author.objects.get(pk=author.pk) self.assertIsNone(new_author.publication) def test_assignment_of_none_null_false(self): class AuthorForm(forms.ModelForm): class Meta: model = Author1 fields = ['publication', 'full_name'] publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22)) author = Author1.objects.create(publication=publication, full_name='John Doe') form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author) self.assertFalse(form.is_valid()) class FileAndImageFieldTests(TestCase): def test_clean_false(self): """ If the ``clean`` method on a non-required FileField receives False as the data (meaning clear the field value), it returns False, regardless of the value of ``initial``. """ f = forms.FileField(required=False) self.assertIs(f.clean(False), False) self.assertIs(f.clean(False, 'initial'), False) def test_clean_false_required(self): """ If the ``clean`` method on a required FileField receives False as the data, it has the same effect as None: initial is returned if non-empty, otherwise the validation catches the lack of a required value. """ f = forms.FileField(required=True) self.assertEqual(f.clean(False, 'initial'), 'initial') with self.assertRaises(ValidationError): f.clean(False) def test_full_clear(self): """ Integration happy-path test that a model FileField can actually be set and cleared via a ModelForm. """ class DocumentForm(forms.ModelForm): class Meta: model = Document fields = '__all__' form = DocumentForm() self.assertIn('name="myfile"', str(form)) self.assertNotIn('myfile-clear', str(form)) form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')}) self.assertTrue(form.is_valid()) doc = form.save(commit=False) self.assertEqual(doc.myfile.name, 'something.txt') form = DocumentForm(instance=doc) self.assertIn('myfile-clear', str(form)) form = DocumentForm(instance=doc, data={'myfile-clear': 'true'}) doc = form.save(commit=False) self.assertFalse(doc.myfile) def test_clear_and_file_contradiction(self): """ If the user submits a new file upload AND checks the clear checkbox, they get a validation error, and the bound redisplay of the form still includes the current file and the clear checkbox. """ class DocumentForm(forms.ModelForm): class Meta: model = Document fields = '__all__' form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')}) self.assertTrue(form.is_valid()) doc = form.save(commit=False) form = DocumentForm( instance=doc, files={'myfile': SimpleUploadedFile('something.txt', b'content')}, data={'myfile-clear': 'true'}, ) self.assertTrue(not form.is_valid()) self.assertEqual(form.errors['myfile'], ['Please either submit a file or check the clear checkbox, not both.']) rendered = str(form) self.assertIn('something.txt', rendered) self.assertIn('myfile-clear', rendered) def test_render_empty_file_field(self): class DocumentForm(forms.ModelForm): class Meta: model = Document fields = '__all__' doc = Document.objects.create() form = DocumentForm(instance=doc) self.assertHTMLEqual( str(form['myfile']), '<input id="id_myfile" name="myfile" type="file">' ) def test_file_field_data(self): # Test conditions when files is either not given or empty. f = TextFileForm(data={'description': 'Assistance'}) self.assertFalse(f.is_valid()) f = TextFileForm(data={'description': 'Assistance'}, files={}) self.assertFalse(f.is_valid()) # Upload a file and ensure it all works as expected. f = TextFileForm( data={'description': 'Assistance'}, files={'file': SimpleUploadedFile('test1.txt', b'hello world')}, ) self.assertTrue(f.is_valid()) self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile) instance = f.save() self.assertEqual(instance.file.name, 'tests/test1.txt') instance.file.delete() # If the previous file has been deleted, the file name can be reused f = TextFileForm( data={'description': 'Assistance'}, files={'file': SimpleUploadedFile('test1.txt', b'hello world')}, ) self.assertTrue(f.is_valid()) self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile) instance = f.save() self.assertEqual(instance.file.name, 'tests/test1.txt') # Check if the max_length attribute has been inherited from the model. f = TextFileForm( data={'description': 'Assistance'}, files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')}, ) self.assertFalse(f.is_valid()) # Edit an instance that already has the file defined in the model. This will not # save the file again, but leave it exactly as it is. f = TextFileForm({'description': 'Assistance'}, instance=instance) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt') instance = f.save() self.assertEqual(instance.file.name, 'tests/test1.txt') # Delete the current file since this is not done by Django. instance.file.delete() # Override the file by uploading a new one. f = TextFileForm( data={'description': 'Assistance'}, files={'file': SimpleUploadedFile('test2.txt', b'hello world')}, instance=instance, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.file.name, 'tests/test2.txt') # Delete the current file since this is not done by Django. instance.file.delete() instance.delete() def test_filefield_required_false(self): # Test the non-required FileField f = TextFileForm(data={'description': 'Assistance'}) f.fields['file'].required = False self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.file.name, '') f = TextFileForm( data={'description': 'Assistance'}, files={'file': SimpleUploadedFile('test3.txt', b'hello world')}, instance=instance, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.file.name, 'tests/test3.txt') # Instance can be edited w/out re-uploading the file and existing file should be preserved. f = TextFileForm({'description': 'New Description'}, instance=instance) f.fields['file'].required = False self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.description, 'New Description') self.assertEqual(instance.file.name, 'tests/test3.txt') # Delete the current file since this is not done by Django. instance.file.delete() instance.delete() def test_custom_file_field_save(self): """ Regression for #11149: save_form_data should be called only once """ class CFFForm(forms.ModelForm): class Meta: model = CustomFF fields = '__all__' # It's enough that the form saves without error -- the custom save routine will # generate an AssertionError if it is called more than once during save. form = CFFForm(data={'f': None}) form.save() def test_file_field_multiple_save(self): """ Simulate a file upload and check how many times Model.save() gets called. Test for bug #639. """ class PhotoForm(forms.ModelForm): class Meta: model = Photo fields = '__all__' # Grab an image for testing. filename = os.path.join(os.path.dirname(__file__), 'test.png') with open(filename, "rb") as fp: img = fp.read() # Fake a POST QueryDict and FILES MultiValueDict. data = {'title': 'Testing'} files = {"image": SimpleUploadedFile('test.png', img, 'image/png')} form = PhotoForm(data=data, files=files) p = form.save() try: # Check the savecount stored on the object (see the model). self.assertEqual(p._savecount, 1) finally: # Delete the "uploaded" file to avoid clogging /tmp. p = Photo.objects.get() p.image.delete(save=False) def test_file_path_field_blank(self): """FilePathField(blank=True) includes the empty option.""" class FPForm(forms.ModelForm): class Meta: model = FilePathModel fields = '__all__' form = FPForm() self.assertEqual([name for _, name in form['path'].field.choices], ['---------', 'models.py']) @skipUnless(test_images, "Pillow not installed") def test_image_field(self): # ImageField and FileField are nearly identical, but they differ slightly when # it comes to validation. This specifically tests that #6302 is fixed for # both file fields and image fields. with open(os.path.join(os.path.dirname(__file__), 'test.png'), 'rb') as fp: image_data = fp.read() with open(os.path.join(os.path.dirname(__file__), 'test2.png'), 'rb') as fp: image_data2 = fp.read() f = ImageFileForm( data={'description': 'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)}, ) self.assertTrue(f.is_valid()) self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile) instance = f.save() self.assertEqual(instance.image.name, 'tests/test.png') self.assertEqual(instance.width, 16) self.assertEqual(instance.height, 16) # Delete the current file since this is not done by Django, but don't save # because the dimension fields are not null=True. instance.image.delete(save=False) f = ImageFileForm( data={'description': 'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)}, ) self.assertTrue(f.is_valid()) self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile) instance = f.save() self.assertEqual(instance.image.name, 'tests/test.png') self.assertEqual(instance.width, 16) self.assertEqual(instance.height, 16) # Edit an instance that already has the (required) image defined in the model. This will not # save the image again, but leave it exactly as it is. f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png') instance = f.save() self.assertEqual(instance.image.name, 'tests/test.png') self.assertEqual(instance.height, 16) self.assertEqual(instance.width, 16) # Delete the current file since this is not done by Django, but don't save # because the dimension fields are not null=True. instance.image.delete(save=False) # Override the file by uploading a new one. f = ImageFileForm( data={'description': 'Changed it'}, files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'tests/test2.png') self.assertEqual(instance.height, 32) self.assertEqual(instance.width, 48) # Delete the current file since this is not done by Django, but don't save # because the dimension fields are not null=True. instance.image.delete(save=False) instance.delete() f = ImageFileForm( data={'description': 'Changed it'}, files={'image': SimpleUploadedFile('test2.png', image_data2)}, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'tests/test2.png') self.assertEqual(instance.height, 32) self.assertEqual(instance.width, 48) # Delete the current file since this is not done by Django, but don't save # because the dimension fields are not null=True. instance.image.delete(save=False) instance.delete() # Test the non-required ImageField # Note: In Oracle, we expect a null ImageField to return '' instead of # None. if connection.features.interprets_empty_strings_as_nulls: expected_null_imagefield_repr = '' else: expected_null_imagefield_repr = None f = OptionalImageFileForm(data={'description': 'Test'}) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, expected_null_imagefield_repr) self.assertIsNone(instance.width) self.assertIsNone(instance.height) f = OptionalImageFileForm( data={'description': 'And a final one'}, files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'tests/test3.png') self.assertEqual(instance.width, 16) self.assertEqual(instance.height, 16) # Editing the instance without re-uploading the image should not affect # the image or its width/height properties. f = OptionalImageFileForm({'description': 'New Description'}, instance=instance) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.description, 'New Description') self.assertEqual(instance.image.name, 'tests/test3.png') self.assertEqual(instance.width, 16) self.assertEqual(instance.height, 16) # Delete the current file since this is not done by Django. instance.image.delete() instance.delete() f = OptionalImageFileForm( data={'description': 'And a final one'}, files={'image': SimpleUploadedFile('test4.png', image_data2)} ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'tests/test4.png') self.assertEqual(instance.width, 48) self.assertEqual(instance.height, 32) instance.delete() # Test callable upload_to behavior that's dependent on the value of another field in the model f = ImageFileForm( data={'description': 'And a final one', 'path': 'foo'}, files={'image': SimpleUploadedFile('test4.png', image_data)}, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'foo/test4.png') instance.delete() # Editing an instance that has an image without an extension shouldn't # fail validation. First create: f = NoExtensionImageFileForm( data={'description': 'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)}, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'tests/no_extension') # Then edit: f = NoExtensionImageFileForm(data={'description': 'Edited image'}, instance=instance) self.assertTrue(f.is_valid()) class ModelOtherFieldTests(SimpleTestCase): def test_big_integer_field(self): bif = BigIntForm({'biggie': '-9223372036854775808'}) self.assertTrue(bif.is_valid()) bif = BigIntForm({'biggie': '-9223372036854775809'}) self.assertFalse(bif.is_valid()) self.assertEqual( bif.errors, {'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']} ) bif = BigIntForm({'biggie': '9223372036854775807'}) self.assertTrue(bif.is_valid()) bif = BigIntForm({'biggie': '9223372036854775808'}) self.assertFalse(bif.is_valid()) self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']}) def test_url_on_modelform(self): "Check basic URL field validation on model forms" class HomepageForm(forms.ModelForm): class Meta: model = Homepage fields = '__all__' self.assertFalse(HomepageForm({'url': 'foo'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://example'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://example.'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://com.'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://localhost'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://example.com'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com/test'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000/test'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://example.com/foo/bar'}).is_valid()) def test_modelform_non_editable_field(self): """ When explicitly including a non-editable field in a ModelForm, the error message should be explicit. """ # 'created', non-editable, is excluded by default self.assertNotIn('created', ArticleForm().fields) msg = "'created' cannot be specified for Article model form as it is a non-editable field" with self.assertRaisesMessage(FieldError, msg): class InvalidArticleForm(forms.ModelForm): class Meta: model = Article fields = ('headline', 'created') def test_http_prefixing(self): """ If the http:// prefix is omitted on form input, the field adds it again. (Refs #13613) """ class HomepageForm(forms.ModelForm): class Meta: model = Homepage fields = '__all__' form = HomepageForm({'url': 'example.com'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['url'], 'http://example.com') form = HomepageForm({'url': 'example.com/test'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['url'], 'http://example.com/test') class OtherModelFormTests(TestCase): def test_media_on_modelform(self): # Similar to a regular Form class you can define custom media to be used on # the ModelForm. f = ModelFormWithMedia() self.assertHTMLEqual( str(f.media), '''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet"> <script src="/some/form/javascript"></script>''' ) def test_choices_type(self): # Choices on CharField and IntegerField f = ArticleForm() with self.assertRaises(ValidationError): f.fields['status'].clean('42') f = ArticleStatusForm() with self.assertRaises(ValidationError): f.fields['status'].clean('z') def test_prefetch_related_queryset(self): """ ModelChoiceField should respect a prefetch_related() on its queryset. """ blue = Colour.objects.create(name='blue') red = Colour.objects.create(name='red') multicolor_item = ColourfulItem.objects.create() multicolor_item.colours.add(blue, red) red_item = ColourfulItem.objects.create() red_item.colours.add(red) class ColorModelChoiceField(forms.ModelChoiceField): def label_from_instance(self, obj): return ', '.join(c.name for c in obj.colours.all()) field = ColorModelChoiceField(ColourfulItem.objects.prefetch_related('colours')) with self.assertNumQueries(3): # would be 4 if prefetch is ignored self.assertEqual(tuple(field.choices), ( ('', '---------'), (multicolor_item.pk, 'blue, red'), (red_item.pk, 'red'), )) def test_foreignkeys_which_use_to_field(self): apple = Inventory.objects.create(barcode=86, name='Apple') Inventory.objects.create(barcode=22, name='Pear') core = Inventory.objects.create(barcode=87, name='Core', parent=apple) field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode') self.assertEqual(tuple(field.choices), ( ('', '---------'), (86, 'Apple'), (87, 'Core'), (22, 'Pear'))) form = InventoryForm(instance=core) self.assertHTMLEqual(str(form['parent']), '''<select name="parent" id="id_parent"> <option value="">---------</option> <option value="86" selected>Apple</option> <option value="87">Core</option> <option value="22">Pear</option> </select>''') data = model_to_dict(core) data['parent'] = '22' form = InventoryForm(data=data, instance=core) core = form.save() self.assertEqual(core.parent.name, 'Pear') class CategoryForm(forms.ModelForm): description = forms.CharField() class Meta: model = Category fields = ['description', 'url'] self.assertEqual(list(CategoryForm.base_fields), ['description', 'url']) self.assertHTMLEqual( str(CategoryForm()), '''<tr><th><label for="id_description">Description:</label></th> <td><input type="text" name="description" id="id_description" required></td></tr> <tr><th><label for="id_url">The URL:</label></th> <td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>''' ) # to_field_name should also work on ModelMultipleChoiceField ################## field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode') self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear'))) self.assertQuerysetEqual(field.clean([86]), ['Apple']) form = SelectInventoryForm({'items': [87, 22]}) self.assertTrue(form.is_valid()) self.assertEqual(len(form.cleaned_data), 1) self.assertQuerysetEqual(form.cleaned_data['items'], ['Core', 'Pear']) def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self): self.assertEqual(list(CustomFieldForExclusionForm.base_fields), ['name']) self.assertHTMLEqual( str(CustomFieldForExclusionForm()), '''<tr><th><label for="id_name">Name:</label></th> <td><input id="id_name" type="text" name="name" maxlength="10" required></td></tr>''' ) def test_iterable_model_m2m(self): class ColourfulItemForm(forms.ModelForm): class Meta: model = ColourfulItem fields = '__all__' colour = Colour.objects.create(name='Blue') form = ColourfulItemForm() self.maxDiff = 1024 self.assertHTMLEqual( form.as_p(), """<p><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="50" required></p> <p><label for="id_colours">Colours:</label> <select multiple name="colours" id="id_colours" required> <option value="%(blue_pk)s">Blue</option> </select></p>""" % {'blue_pk': colour.pk}) def test_callable_field_default(self): class PublicationDefaultsForm(forms.ModelForm): class Meta: model = PublicationDefaults fields = ('title', 'date_published', 'mode', 'category') self.maxDiff = 2000 form = PublicationDefaultsForm() today_str = str(datetime.date.today()) self.assertHTMLEqual( form.as_p(), """ <p><label for="id_title">Title:</label> <input id="id_title" maxlength="30" name="title" type="text" required></p> <p><label for="id_date_published">Date published:</label> <input id="id_date_published" name="date_published" type="text" value="{0}" required> <input id="initial-id_date_published" name="initial-date_published" type="hidden" value="{0}"></p> <p><label for="id_mode">Mode:</label> <select id="id_mode" name="mode"> <option value="di" selected>direct</option> <option value="de">delayed</option></select> <input id="initial-id_mode" name="initial-mode" type="hidden" value="di"></p> <p><label for="id_category">Category:</label> <select id="id_category" name="category"> <option value="1">Games</option> <option value="2">Comics</option> <option value="3" selected>Novel</option></select> <input id="initial-id_category" name="initial-category" type="hidden" value="3"> """.format(today_str) ) empty_data = { 'title': '', 'date_published': today_str, 'initial-date_published': today_str, 'mode': 'di', 'initial-mode': 'di', 'category': '3', 'initial-category': '3', } bound_form = PublicationDefaultsForm(empty_data) self.assertFalse(bound_form.has_changed()) class ModelFormCustomErrorTests(SimpleTestCase): def test_custom_error_messages(self): data = {'name1': '@#$!!**@#$', 'name2': '@#$!!**@#$'} errors = CustomErrorMessageForm(data).errors self.assertHTMLEqual( str(errors['name1']), '<ul class="errorlist"><li>Form custom error message.</li></ul>' ) self.assertHTMLEqual( str(errors['name2']), '<ul class="errorlist"><li>Model custom error message.</li></ul>' ) def test_model_clean_error_messages(self): data = {'name1': 'FORBIDDEN_VALUE', 'name2': 'ABC'} form = CustomErrorMessageForm(data) self.assertFalse(form.is_valid()) self.assertHTMLEqual( str(form.errors['name1']), '<ul class="errorlist"><li>Model.clean() error messages.</li></ul>' ) data = {'name1': 'FORBIDDEN_VALUE2', 'name2': 'ABC'} form = CustomErrorMessageForm(data) self.assertFalse(form.is_valid()) self.assertHTMLEqual( str(form.errors['name1']), '<ul class="errorlist"><li>Model.clean() error messages (simpler syntax).</li></ul>' ) data = {'name1': 'GLOBAL_ERROR', 'name2': 'ABC'} form = CustomErrorMessageForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors['__all__'], ['Global error message.']) class CustomCleanTests(TestCase): def test_override_clean(self): """ Regression for #12596: Calling super from ModelForm.clean() should be optional. """ class TripleFormWithCleanOverride(forms.ModelForm): class Meta: model = Triple fields = '__all__' def clean(self): if not self.cleaned_data['left'] == self.cleaned_data['right']: raise ValidationError('Left and right should be equal') return self.cleaned_data form = TripleFormWithCleanOverride({'left': 1, 'middle': 2, 'right': 1}) self.assertTrue(form.is_valid()) # form.instance.left will be None if the instance was not constructed # by form.full_clean(). self.assertEqual(form.instance.left, 1) def test_model_form_clean_applies_to_model(self): """ Regression test for #12960. Make sure the cleaned_data returned from ModelForm.clean() is applied to the model instance. """ class CategoryForm(forms.ModelForm): class Meta: model = Category fields = '__all__' def clean(self): self.cleaned_data['name'] = self.cleaned_data['name'].upper() return self.cleaned_data data = {'name': 'Test', 'slug': 'test', 'url': '/test'} form = CategoryForm(data) category = form.save() self.assertEqual(category.name, 'TEST') class ModelFormInheritanceTests(SimpleTestCase): def test_form_subclass_inheritance(self): class Form(forms.Form): age = forms.IntegerField() class ModelForm(forms.ModelForm, Form): class Meta: model = Writer fields = '__all__' self.assertEqual(list(ModelForm().fields), ['name', 'age']) def test_field_removal(self): class ModelForm(forms.ModelForm): class Meta: model = Writer fields = '__all__' class Mixin: age = None class Form(forms.Form): age = forms.IntegerField() class Form2(forms.Form): foo = forms.IntegerField() self.assertEqual(list(ModelForm().fields), ['name']) self.assertEqual(list(type('NewForm', (Mixin, Form), {})().fields), []) self.assertEqual(list(type('NewForm', (Form2, Mixin, Form), {})().fields), ['foo']) self.assertEqual(list(type('NewForm', (Mixin, ModelForm, Form), {})().fields), ['name']) self.assertEqual(list(type('NewForm', (ModelForm, Mixin, Form), {})().fields), ['name']) self.assertEqual(list(type('NewForm', (ModelForm, Form, Mixin), {})().fields), ['name', 'age']) self.assertEqual(list(type('NewForm', (ModelForm, Form), {'age': None})().fields), ['name']) def test_field_removal_name_clashes(self): """ Form fields can be removed in subclasses by setting them to None (#22510). """ class MyForm(forms.ModelForm): media = forms.CharField() class Meta: model = Writer fields = '__all__' class SubForm(MyForm): media = None self.assertIn('media', MyForm().fields) self.assertNotIn('media', SubForm().fields) self.assertTrue(hasattr(MyForm, 'media')) self.assertTrue(hasattr(SubForm, 'media')) class StumpJokeForm(forms.ModelForm): class Meta: model = StumpJoke fields = '__all__' class CustomFieldWithQuerysetButNoLimitChoicesTo(forms.Field): queryset = 42 class StumpJokeWithCustomFieldForm(forms.ModelForm): custom = CustomFieldWithQuerysetButNoLimitChoicesTo() class Meta: model = StumpJoke fields = () class LimitChoicesToTests(TestCase): """ Tests the functionality of ``limit_choices_to``. """ @classmethod def setUpTestData(cls): cls.threepwood = Character.objects.create( username='threepwood', last_action=datetime.datetime.today() + datetime.timedelta(days=1), ) cls.marley = Character.objects.create( username='marley', last_action=datetime.datetime.today() - datetime.timedelta(days=1), ) def test_limit_choices_to_callable_for_fk_rel(self): """ A ForeignKey can use limit_choices_to as a callable (#2554). """ stumpjokeform = StumpJokeForm() self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood]) def test_limit_choices_to_callable_for_m2m_rel(self): """ A ManyToManyField can use limit_choices_to as a callable (#2554). """ stumpjokeform = StumpJokeForm() self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood]) def test_custom_field_with_queryset_but_no_limit_choices_to(self): """ A custom field with a `queryset` attribute but no `limit_choices_to` works (#23795). """ f = StumpJokeWithCustomFieldForm() self.assertEqual(f.fields['custom'].queryset, 42) def test_fields_for_model_applies_limit_choices_to(self): fields = fields_for_model(StumpJoke, ['has_fooled_today']) self.assertSequenceEqual(fields['has_fooled_today'].queryset, [self.threepwood]) def test_callable_called_each_time_form_is_instantiated(self): field = StumpJokeForm.base_fields['most_recently_fooled'] with mock.patch.object(field, 'limit_choices_to') as today_callable_dict: StumpJokeForm() self.assertEqual(today_callable_dict.call_count, 1) StumpJokeForm() self.assertEqual(today_callable_dict.call_count, 2) StumpJokeForm() self.assertEqual(today_callable_dict.call_count, 3) class FormFieldCallbackTests(SimpleTestCase): def test_baseform_with_widgets_in_meta(self): """Regression for #13095: Using base forms with widgets defined in Meta should not raise errors.""" widget = forms.Textarea() class BaseForm(forms.ModelForm): class Meta: model = Person widgets = {'name': widget} fields = "__all__" Form = modelform_factory(Person, form=BaseForm) self.assertIsInstance(Form.base_fields['name'].widget, forms.Textarea) def test_factory_with_widget_argument(self): """ Regression for #15315: modelform_factory should accept widgets argument """ widget = forms.Textarea() # Without a widget should not set the widget to textarea Form = modelform_factory(Person, fields="__all__") self.assertNotEqual(Form.base_fields['name'].widget.__class__, forms.Textarea) # With a widget should not set the widget to textarea Form = modelform_factory(Person, fields="__all__", widgets={'name': widget}) self.assertEqual(Form.base_fields['name'].widget.__class__, forms.Textarea) def test_modelform_factory_without_fields(self): """ Regression for #19733 """ message = ( "Calling modelform_factory without defining 'fields' or 'exclude' " "explicitly is prohibited." ) with self.assertRaisesMessage(ImproperlyConfigured, message): modelform_factory(Person) def test_modelform_factory_with_all_fields(self): """ Regression for #19733 """ form = modelform_factory(Person, fields="__all__") self.assertEqual(list(form.base_fields), ["name"]) def test_custom_callback(self): """A custom formfield_callback is used if provided""" callback_args = [] def callback(db_field, **kwargs): callback_args.append((db_field, kwargs)) return db_field.formfield(**kwargs) widget = forms.Textarea() class BaseForm(forms.ModelForm): class Meta: model = Person widgets = {'name': widget} fields = "__all__" modelform_factory(Person, form=BaseForm, formfield_callback=callback) id_field, name_field = Person._meta.fields self.assertEqual(callback_args, [(id_field, {}), (name_field, {'widget': widget})]) def test_bad_callback(self): # A bad callback provided by user still gives an error with self.assertRaises(TypeError): modelform_factory(Person, fields="__all__", formfield_callback='not a function or callable') def test_inherit_after_custom_callback(self): def callback(db_field, **kwargs): if isinstance(db_field, models.CharField): return forms.CharField(widget=forms.Textarea) return db_field.formfield(**kwargs) class BaseForm(forms.ModelForm): class Meta: model = Person fields = '__all__' NewForm = modelform_factory(Person, form=BaseForm, formfield_callback=callback) class InheritedForm(NewForm): pass for name in NewForm.base_fields: self.assertEqual( type(InheritedForm.base_fields[name].widget), type(NewForm.base_fields[name].widget) ) class LocalizedModelFormTest(TestCase): def test_model_form_applies_localize_to_some_fields(self): class PartiallyLocalizedTripleForm(forms.ModelForm): class Meta: model = Triple localized_fields = ('left', 'right',) fields = '__all__' f = PartiallyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10}) self.assertTrue(f.is_valid()) self.assertTrue(f.fields['left'].localize) self.assertFalse(f.fields['middle'].localize) self.assertTrue(f.fields['right'].localize) def test_model_form_applies_localize_to_all_fields(self): class FullyLocalizedTripleForm(forms.ModelForm): class Meta: model = Triple localized_fields = '__all__' fields = '__all__' f = FullyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10}) self.assertTrue(f.is_valid()) self.assertTrue(f.fields['left'].localize) self.assertTrue(f.fields['middle'].localize) self.assertTrue(f.fields['right'].localize) def test_model_form_refuses_arbitrary_string(self): msg = ( "BrokenLocalizedTripleForm.Meta.localized_fields " "cannot be a string. Did you mean to type: ('foo',)?" ) with self.assertRaisesMessage(TypeError, msg): class BrokenLocalizedTripleForm(forms.ModelForm): class Meta: model = Triple localized_fields = "foo" class CustomMetaclass(ModelFormMetaclass): def __new__(cls, name, bases, attrs): new = super().__new__(cls, name, bases, attrs) new.base_fields = {} return new class CustomMetaclassForm(forms.ModelForm, metaclass=CustomMetaclass): pass class CustomMetaclassTestCase(SimpleTestCase): def test_modelform_factory_metaclass(self): new_cls = modelform_factory(Person, fields="__all__", form=CustomMetaclassForm) self.assertEqual(new_cls.base_fields, {}) class StrictAssignmentTests(SimpleTestCase): """ Should a model do anything special with __setattr__() or descriptors which raise a ValidationError, a model form should catch the error (#24706). """ def test_setattr_raises_validation_error_field_specific(self): """ A model ValidationError using the dict form should put the error message into the correct key of form.errors. """ form_class = modelform_factory(model=StrictAssignmentFieldSpecific, fields=['title']) form = form_class(data={'title': 'testing setattr'}, files=None) # This line turns on the ValidationError; it avoids the model erroring # when its own __init__() is called when creating form.instance. form.instance._should_error = True self.assertFalse(form.is_valid()) self.assertEqual(form.errors, { 'title': ['Cannot set attribute', 'This field cannot be blank.'] }) def test_setattr_raises_validation_error_non_field(self): """ A model ValidationError not using the dict form should put the error message into __all__ (i.e. non-field errors) on the form. """ form_class = modelform_factory(model=StrictAssignmentAll, fields=['title']) form = form_class(data={'title': 'testing setattr'}, files=None) # This line turns on the ValidationError; it avoids the model erroring # when its own __init__() is called when creating form.instance. form.instance._should_error = True self.assertFalse(form.is_valid()) self.assertEqual(form.errors, { '__all__': ['Cannot set attribute'], 'title': ['This field cannot be blank.'] }) class ModelToDictTests(TestCase): def test_many_to_many(self): """Data for a ManyToManyField is a list rather than a lazy QuerySet.""" blue = Colour.objects.create(name='blue') red = Colour.objects.create(name='red') item = ColourfulItem.objects.create() item.colours.set([blue]) data = model_to_dict(item)['colours'] self.assertEqual(data, [blue]) item.colours.set([red]) # If data were a QuerySet, it would be reevaluated here and give "red" # instead of the original value. self.assertEqual(data, [blue])
f7c75c7e5cd45bb0204756bc20509839214c2aed67c983efed5d3447b17105c1
import datetime import os import tempfile import uuid from django.core import validators from django.core.exceptions import ValidationError from django.core.files.storage import FileSystemStorage from django.db import models temp_storage_dir = tempfile.mkdtemp() temp_storage = FileSystemStorage(temp_storage_dir) class Person(models.Model): name = models.CharField(max_length=100) class Category(models.Model): name = models.CharField(max_length=20) slug = models.SlugField(max_length=20) url = models.CharField('The URL', max_length=40) def __str__(self): return self.name def __repr__(self): return self.__str__() class WriterManager(models.Manager): def get_queryset(self): qs = super().get_queryset() return qs.filter(archived=False) class Writer(models.Model): name = models.CharField(max_length=50, help_text='Use both first and last names.') archived = models.BooleanField(default=False, editable=False) objects = WriterManager() class Meta: ordering = ('name',) def __str__(self): return self.name class Article(models.Model): ARTICLE_STATUS = ( (1, 'Draft'), (2, 'Pending'), (3, 'Live'), ) headline = models.CharField(max_length=50) slug = models.SlugField() pub_date = models.DateField() created = models.DateField(editable=False) writer = models.ForeignKey(Writer, models.CASCADE) article = models.TextField() categories = models.ManyToManyField(Category, blank=True) status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True) def save(self, *args, **kwargs): if not self.id: self.created = datetime.date.today() return super().save(*args, **kwargs) def __str__(self): return self.headline class ImprovedArticle(models.Model): article = models.OneToOneField(Article, models.CASCADE) class ImprovedArticleWithParentLink(models.Model): article = models.OneToOneField(Article, models.CASCADE, parent_link=True) class BetterWriter(Writer): score = models.IntegerField() class Publication(models.Model): title = models.CharField(max_length=30) date_published = models.DateField() def __str__(self): return self.title def default_mode(): return 'di' def default_category(): return 3 class PublicationDefaults(models.Model): MODE_CHOICES = (('di', 'direct'), ('de', 'delayed')) CATEGORY_CHOICES = ((1, 'Games'), (2, 'Comics'), (3, 'Novel')) title = models.CharField(max_length=30) date_published = models.DateField(default=datetime.date.today) datetime_published = models.DateTimeField(default=datetime.datetime(2000, 1, 1)) mode = models.CharField(max_length=2, choices=MODE_CHOICES, default=default_mode) category = models.IntegerField(choices=CATEGORY_CHOICES, default=default_category) active = models.BooleanField(default=True) file = models.FileField(default='default.txt') class Author(models.Model): publication = models.OneToOneField(Publication, models.SET_NULL, null=True, blank=True) full_name = models.CharField(max_length=255) class Author1(models.Model): publication = models.OneToOneField(Publication, models.CASCADE, null=False) full_name = models.CharField(max_length=255) class WriterProfile(models.Model): writer = models.OneToOneField(Writer, models.CASCADE, primary_key=True) age = models.PositiveIntegerField() def __str__(self): return "%s is %s" % (self.writer, self.age) class Document(models.Model): myfile = models.FileField(upload_to='unused', blank=True) class TextFile(models.Model): description = models.CharField(max_length=20) file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15) def __str__(self): return self.description class CustomFileField(models.FileField): def save_form_data(self, instance, data): been_here = getattr(self, 'been_saved', False) assert not been_here, "save_form_data called more than once" setattr(self, 'been_saved', True) class CustomFF(models.Model): f = CustomFileField(upload_to='unused', blank=True) class FilePathModel(models.Model): path = models.FilePathField(path=os.path.dirname(__file__), match='models.py', blank=True) try: from PIL import Image # NOQA: detect if Pillow is installed test_images = True class ImageFile(models.Model): def custom_upload_path(self, filename): path = self.path or 'tests' return '%s/%s' % (path, filename) description = models.CharField(max_length=20) # Deliberately put the image field *after* the width/height fields to # trigger the bug in #10404 with width/height not getting assigned. width = models.IntegerField(editable=False) height = models.IntegerField(editable=False) image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path, width_field='width', height_field='height') path = models.CharField(max_length=16, blank=True, default='') def __str__(self): return self.description class OptionalImageFile(models.Model): def custom_upload_path(self, filename): path = self.path or 'tests' return '%s/%s' % (path, filename) description = models.CharField(max_length=20) image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path, width_field='width', height_field='height', blank=True, null=True) width = models.IntegerField(editable=False, null=True) height = models.IntegerField(editable=False, null=True) path = models.CharField(max_length=16, blank=True, default='') def __str__(self): return self.description class NoExtensionImageFile(models.Model): def upload_to(self, filename): return 'tests/no_extension' description = models.CharField(max_length=20) image = models.ImageField(storage=temp_storage, upload_to=upload_to) def __str__(self): return self.description except ImportError: test_images = False class Homepage(models.Model): url = models.URLField() class Product(models.Model): slug = models.SlugField(unique=True) def __str__(self): return self.slug class Price(models.Model): price = models.DecimalField(max_digits=10, decimal_places=2) quantity = models.PositiveIntegerField() class Meta: unique_together = (('price', 'quantity'),) def __str__(self): return "%s for %s" % (self.quantity, self.price) class Triple(models.Model): left = models.IntegerField() middle = models.IntegerField() right = models.IntegerField() class Meta: unique_together = (('left', 'middle'), ('middle', 'right')) class ArticleStatus(models.Model): ARTICLE_STATUS_CHAR = ( ('d', 'Draft'), ('p', 'Pending'), ('l', 'Live'), ) status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True) class Inventory(models.Model): barcode = models.PositiveIntegerField(unique=True) parent = models.ForeignKey('self', models.SET_NULL, to_field='barcode', blank=True, null=True) name = models.CharField(blank=False, max_length=20) class Meta: ordering = ('name',) def __str__(self): return self.name def __repr__(self): return self.__str__() class Book(models.Model): title = models.CharField(max_length=40) author = models.ForeignKey(Writer, models.SET_NULL, blank=True, null=True) special_id = models.IntegerField(blank=True, null=True, unique=True) class Meta: unique_together = ('title', 'author') class BookXtra(models.Model): isbn = models.CharField(max_length=16, unique=True) suffix1 = models.IntegerField(blank=True, default=0) suffix2 = models.IntegerField(blank=True, default=0) class Meta: unique_together = (('suffix1', 'suffix2')) abstract = True class DerivedBook(Book, BookXtra): pass class ExplicitPK(models.Model): key = models.CharField(max_length=20, primary_key=True) desc = models.CharField(max_length=20, blank=True, unique=True) class Meta: unique_together = ('key', 'desc') def __str__(self): return self.key class Post(models.Model): title = models.CharField(max_length=50, unique_for_date='posted', blank=True) slug = models.CharField(max_length=50, unique_for_year='posted', blank=True) subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True) posted = models.DateField() def __str__(self): return self.title class DateTimePost(models.Model): title = models.CharField(max_length=50, unique_for_date='posted', blank=True) slug = models.CharField(max_length=50, unique_for_year='posted', blank=True) subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True) posted = models.DateTimeField(editable=False) def __str__(self): return self.title class DerivedPost(Post): pass class BigInt(models.Model): biggie = models.BigIntegerField() def __str__(self): return str(self.biggie) class MarkupField(models.CharField): def __init__(self, *args, **kwargs): kwargs["max_length"] = 20 super().__init__(*args, **kwargs) def formfield(self, **kwargs): # don't allow this field to be used in form (real use-case might be # that you know the markup will always be X, but it is among an app # that allows the user to say it could be something else) # regressed at r10062 return None class CustomFieldForExclusionModel(models.Model): name = models.CharField(max_length=10) markup = MarkupField() class FlexibleDatePost(models.Model): title = models.CharField(max_length=50, unique_for_date='posted', blank=True) slug = models.CharField(max_length=50, unique_for_year='posted', blank=True) subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True) posted = models.DateField(blank=True, null=True) class Colour(models.Model): name = models.CharField(max_length=50) def __iter__(self): yield from range(5) def __str__(self): return self.name class ColourfulItem(models.Model): name = models.CharField(max_length=50) colours = models.ManyToManyField(Colour) class CustomErrorMessage(models.Model): name1 = models.CharField( max_length=50, validators=[validators.validate_slug], error_messages={'invalid': 'Model custom error message.'}, ) name2 = models.CharField( max_length=50, validators=[validators.validate_slug], error_messages={'invalid': 'Model custom error message.'}, ) def clean(self): if self.name1 == 'FORBIDDEN_VALUE': raise ValidationError({'name1': [ValidationError('Model.clean() error messages.')]}) elif self.name1 == 'FORBIDDEN_VALUE2': raise ValidationError({'name1': 'Model.clean() error messages (simpler syntax).'}) elif self.name1 == 'GLOBAL_ERROR': raise ValidationError("Global error message.") def today_callable_dict(): return {"last_action__gte": datetime.datetime.today()} def today_callable_q(): return models.Q(last_action__gte=datetime.datetime.today()) class Character(models.Model): username = models.CharField(max_length=100) last_action = models.DateTimeField() def __str__(self): return self.username class StumpJoke(models.Model): most_recently_fooled = models.ForeignKey( Character, models.CASCADE, limit_choices_to=today_callable_dict, related_name="+", ) has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+") # Model for #13776 class Student(models.Model): character = models.ForeignKey(Character, models.CASCADE) study = models.CharField(max_length=30) # Model for #639 class Photo(models.Model): title = models.CharField(max_length=30) image = models.FileField(storage=temp_storage, upload_to='tests') # Support code for the tests; this keeps track of how many times save() # gets called on each instance. def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._savecount = 0 def save(self, force_insert=False, force_update=False): super().save(force_insert, force_update) self._savecount += 1 class UUIDPK(models.Model): uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) name = models.CharField(max_length=30) # Models for #24706 class StrictAssignmentFieldSpecific(models.Model): title = models.CharField(max_length=30) _should_error = False def __setattr__(self, key, value): if self._should_error is True: raise ValidationError(message={key: "Cannot set attribute"}, code='invalid') super().__setattr__(key, value) class StrictAssignmentAll(models.Model): title = models.CharField(max_length=30) _should_error = False def __setattr__(self, key, value): if self._should_error is True: raise ValidationError(message="Cannot set attribute", code='invalid') super().__setattr__(key, value) # A model with ForeignKey(blank=False, null=True) class Award(models.Model): name = models.CharField(max_length=30) character = models.ForeignKey(Character, models.SET_NULL, blank=False, null=True) class NullableUniqueCharFieldModel(models.Model): codename = models.CharField(max_length=50, blank=True, null=True, unique=True) email = models.EmailField(blank=True, null=True) slug = models.SlugField(blank=True, null=True) url = models.URLField(blank=True, null=True)
c07c4998a53dfd717e97f7e63451f009cdbc6d048255e7431d2886bc6d0fdc89
import re from django.db import connection from django.test import TestCase, skipUnlessDBFeature from django.utils.functional import cached_property from .utils import oracle, postgis, spatialite test_srs = ({ 'srid': 4326, 'auth_name': ('EPSG', True), 'auth_srid': 4326, # Only the beginning, because there are differences depending on installed libs 'srtext': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84"', # +ellps=WGS84 has been removed in the 4326 proj string in proj-4.8 'proj_re': r'\+proj=longlat (\+ellps=WGS84 )?(\+datum=WGS84 |\+towgs84=0,0,0,0,0,0,0 )\+no_defs ?', 'spheroid': 'WGS 84', 'name': 'WGS 84', 'geographic': True, 'projected': False, 'spatialite': True, # From proj's "cs2cs -le" and Wikipedia (semi-minor only) 'ellipsoid': (6378137.0, 6356752.3, 298.257223563), 'eprec': (1, 1, 9), 'wkt': re.sub(r'[\s+]', '', """ GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.01745329251994328, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4326"]] """) }, { 'srid': 32140, 'auth_name': ('EPSG', False), 'auth_srid': 32140, 'srtext': ( 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",' 'DATUM["North_American_Datum_1983",SPHEROID["GRS 1980"' ), 'proj_re': r'\+proj=lcc (\+lat_1=30.28333333333333? |\+lat_2=28.38333333333333? |\+lat_0=27.83333333333333? |' r'\+lon_0=-99 ){4}\+x_0=600000 \+y_0=4000000 (\+ellps=GRS80 )?' r'(\+datum=NAD83 |\+towgs84=0,0,0,0,0,0,0 )?\+units=m \+no_defs ?', 'spheroid': 'GRS 1980', 'name': 'NAD83 / Texas South Central', 'geographic': False, 'projected': True, 'spatialite': False, # From proj's "cs2cs -le" and Wikipedia (semi-minor only) 'ellipsoid': (6378137.0, 6356752.31414, 298.257222101), 'eprec': (1, 5, 10), }) @skipUnlessDBFeature("has_spatialrefsys_table") class SpatialRefSysTest(TestCase): @cached_property def SpatialRefSys(self): return connection.ops.connection.ops.spatial_ref_sys() def test_get_units(self): epsg_4326 = next(f for f in test_srs if f['srid'] == 4326) unit, unit_name = self.SpatialRefSys().get_units(epsg_4326['wkt']) self.assertEqual(unit_name, 'degree') self.assertAlmostEqual(unit, 0.01745329251994328) def test_retrieve(self): """ Test retrieval of SpatialRefSys model objects. """ for sd in test_srs: srs = self.SpatialRefSys.objects.get(srid=sd['srid']) self.assertEqual(sd['srid'], srs.srid) # Some of the authority names are borked on Oracle, e.g., SRID=32140. # also, Oracle Spatial seems to add extraneous info to fields, hence the # the testing with the 'startswith' flag. auth_name, oracle_flag = sd['auth_name'] if postgis or (oracle and oracle_flag): self.assertTrue(srs.auth_name.startswith(auth_name)) self.assertEqual(sd['auth_srid'], srs.auth_srid) # No PROJ and different srtext on oracle backends :( if postgis: self.assertTrue(srs.wkt.startswith(sd['srtext'])) self.assertRegex(srs.proj4text, sd['proj_re']) def test_osr(self): """ Test getting OSR objects from SpatialRefSys model objects. """ for sd in test_srs: sr = self.SpatialRefSys.objects.get(srid=sd['srid']) self.assertTrue(sr.spheroid.startswith(sd['spheroid'])) self.assertEqual(sd['geographic'], sr.geographic) self.assertEqual(sd['projected'], sr.projected) if not (spatialite and not sd['spatialite']): # Can't get 'NAD83 / Texas South Central' from PROJ string # on SpatiaLite self.assertTrue(sr.name.startswith(sd['name'])) # Testing the SpatialReference object directly. if postgis or spatialite: srs = sr.srs self.assertRegex(srs.proj, sd['proj_re']) self.assertTrue(srs.wkt.startswith(sd['srtext'])) def test_ellipsoid(self): """ Test the ellipsoid property. """ for sd in test_srs: # Getting the ellipsoid and precision parameters. ellps1 = sd['ellipsoid'] prec = sd['eprec'] # Getting our spatial reference and its ellipsoid srs = self.SpatialRefSys.objects.get(srid=sd['srid']) ellps2 = srs.ellipsoid for i in range(3): self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i]) @skipUnlessDBFeature('supports_add_srs_entry') def test_add_entry(self): """ Test adding a new entry in the SpatialRefSys model using the add_srs_entry utility. """ from django.contrib.gis.utils import add_srs_entry add_srs_entry(3857) self.assertTrue( self.SpatialRefSys.objects.filter(srid=3857).exists() ) srs = self.SpatialRefSys.objects.get(srid=3857) self.assertTrue( self.SpatialRefSys.get_spheroid(srs.wkt).startswith('SPHEROID[') )
2e9b01ba8c014bf610bc8df582f997b34bff84983331734d26f2494cd6a97385
import copy import unittest from functools import wraps from unittest import mock from django.conf import settings from django.db import DEFAULT_DB_ALIAS, connection from django.db.models import Func def skipUnlessGISLookup(*gis_lookups): """ Skip a test unless a database supports all of gis_lookups. """ def decorator(test_func): @wraps(test_func) def skip_wrapper(*args, **kwargs): if any(key not in connection.ops.gis_operators for key in gis_lookups): raise unittest.SkipTest( "Database doesn't support all the lookups: %s" % ", ".join(gis_lookups) ) return test_func(*args, **kwargs) return skip_wrapper return decorator # Shortcut booleans to omit only portions of tests. _default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] oracle = _default_db == 'oracle' postgis = _default_db == 'postgis' mysql = _default_db == 'mysql' mariadb = mysql and connection.mysql_is_mariadb spatialite = _default_db == 'spatialite' # MySQL spatial indices can't handle NULL geometries. gisfield_may_be_null = not mysql class FuncTestMixin: """Assert that Func expressions aren't mutated during their as_sql().""" def setUp(self): def as_sql_wrapper(original_as_sql): def inner(*args, **kwargs): func = original_as_sql.__self__ # Resolve output_field before as_sql() so touching it in # as_sql() won't change __dict__. func.output_field __dict__original = copy.deepcopy(func.__dict__) result = original_as_sql(*args, **kwargs) msg = '%s Func was mutated during compilation.' % func.__class__.__name__ self.assertEqual(func.__dict__, __dict__original, msg) return result return inner def __getattribute__(self, name): if name != vendor_impl: return __getattribute__original(self, name) try: as_sql = __getattribute__original(self, vendor_impl) except AttributeError: as_sql = __getattribute__original(self, 'as_sql') return as_sql_wrapper(as_sql) vendor_impl = 'as_' + connection.vendor __getattribute__original = Func.__getattribute__ self.func_patcher = mock.patch.object(Func, '__getattribute__', __getattribute__) self.func_patcher.start() super().setUp() def tearDown(self): super().tearDown() self.func_patcher.stop()
c6dd098929b6c32d88fc2c66b07a075efb5cfbe515b87837b3670067bca5f165
from math import ceil from django.db import connection, models from django.db.models import ProtectedError, RestrictedError from django.db.models.deletion import Collector from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from .models import ( B1, B2, B3, MR, A, Avatar, B, Base, Child, DeleteBottom, DeleteTop, GenericB1, GenericB2, GenericDeleteBottom, HiddenUser, HiddenUserProfile, M, M2MFrom, M2MTo, MRNull, Origin, P, Parent, R, RChild, RChildChild, Referrer, S, T, User, create_a, get_default_r, ) class OnDeleteTests(TestCase): def setUp(self): self.DEFAULT = get_default_r() def test_auto(self): a = create_a('auto') a.auto.delete() self.assertFalse(A.objects.filter(name='auto').exists()) def test_non_callable(self): msg = 'on_delete must be callable.' with self.assertRaisesMessage(TypeError, msg): models.ForeignKey('self', on_delete=None) with self.assertRaisesMessage(TypeError, msg): models.OneToOneField('self', on_delete=None) def test_auto_nullable(self): a = create_a('auto_nullable') a.auto_nullable.delete() self.assertFalse(A.objects.filter(name='auto_nullable').exists()) def test_setvalue(self): a = create_a('setvalue') a.setvalue.delete() a = A.objects.get(pk=a.pk) self.assertEqual(self.DEFAULT, a.setvalue.pk) def test_setnull(self): a = create_a('setnull') a.setnull.delete() a = A.objects.get(pk=a.pk) self.assertIsNone(a.setnull) def test_setdefault(self): a = create_a('setdefault') a.setdefault.delete() a = A.objects.get(pk=a.pk) self.assertEqual(self.DEFAULT, a.setdefault.pk) def test_setdefault_none(self): a = create_a('setdefault_none') a.setdefault_none.delete() a = A.objects.get(pk=a.pk) self.assertIsNone(a.setdefault_none) def test_cascade(self): a = create_a('cascade') a.cascade.delete() self.assertFalse(A.objects.filter(name='cascade').exists()) def test_cascade_nullable(self): a = create_a('cascade_nullable') a.cascade_nullable.delete() self.assertFalse(A.objects.filter(name='cascade_nullable').exists()) def test_protect(self): a = create_a('protect') msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through protected foreign keys: 'A.protect'." ) with self.assertRaisesMessage(ProtectedError, msg) as cm: a.protect.delete() self.assertEqual(cm.exception.protected_objects, {a}) def test_protect_multiple(self): a = create_a('protect') b = B.objects.create(protect=a.protect) msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through protected foreign keys: 'A.protect', " "'B.protect'." ) with self.assertRaisesMessage(ProtectedError, msg) as cm: a.protect.delete() self.assertEqual(cm.exception.protected_objects, {a, b}) def test_protect_path(self): a = create_a('protect') a.protect.p = P.objects.create() a.protect.save() msg = ( "Cannot delete some instances of model 'P' because they are " "referenced through protected foreign keys: 'R.p'." ) with self.assertRaisesMessage(ProtectedError, msg) as cm: a.protect.p.delete() self.assertEqual(cm.exception.protected_objects, {a}) def test_do_nothing(self): # Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model, # so we connect to pre_delete and set the fk to a known value. replacement_r = R.objects.create() def check_do_nothing(sender, **kwargs): obj = kwargs['instance'] obj.donothing_set.update(donothing=replacement_r) models.signals.pre_delete.connect(check_do_nothing) a = create_a('do_nothing') a.donothing.delete() a = A.objects.get(pk=a.pk) self.assertEqual(replacement_r, a.donothing) models.signals.pre_delete.disconnect(check_do_nothing) def test_do_nothing_qscount(self): """ A models.DO_NOTHING relation doesn't trigger a query. """ b = Base.objects.create() with self.assertNumQueries(1): # RelToBase should not be queried. b.delete() self.assertEqual(Base.objects.count(), 0) def test_inheritance_cascade_up(self): child = RChild.objects.create() child.delete() self.assertFalse(R.objects.filter(pk=child.pk).exists()) def test_inheritance_cascade_down(self): child = RChild.objects.create() parent = child.r_ptr parent.delete() self.assertFalse(RChild.objects.filter(pk=child.pk).exists()) def test_cascade_from_child(self): a = create_a('child') a.child.delete() self.assertFalse(A.objects.filter(name='child').exists()) self.assertFalse(R.objects.filter(pk=a.child_id).exists()) def test_cascade_from_parent(self): a = create_a('child') R.objects.get(pk=a.child_id).delete() self.assertFalse(A.objects.filter(name='child').exists()) self.assertFalse(RChild.objects.filter(pk=a.child_id).exists()) def test_setnull_from_child(self): a = create_a('child_setnull') a.child_setnull.delete() self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists()) a = A.objects.get(pk=a.pk) self.assertIsNone(a.child_setnull) def test_setnull_from_parent(self): a = create_a('child_setnull') R.objects.get(pk=a.child_setnull_id).delete() self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists()) a = A.objects.get(pk=a.pk) self.assertIsNone(a.child_setnull) def test_o2o_setnull(self): a = create_a('o2o_setnull') a.o2o_setnull.delete() a = A.objects.get(pk=a.pk) self.assertIsNone(a.o2o_setnull) def test_restrict(self): a = create_a('restrict') msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through restricted foreign keys: 'A.restrict'." ) with self.assertRaisesMessage(RestrictedError, msg) as cm: a.restrict.delete() self.assertEqual(cm.exception.restricted_objects, {a}) def test_restrict_multiple(self): a = create_a('restrict') b3 = B3.objects.create(restrict=a.restrict) msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through restricted foreign keys: 'A.restrict', " "'B3.restrict'." ) with self.assertRaisesMessage(RestrictedError, msg) as cm: a.restrict.delete() self.assertEqual(cm.exception.restricted_objects, {a, b3}) def test_restrict_path_cascade_indirect(self): a = create_a('restrict') a.restrict.p = P.objects.create() a.restrict.save() msg = ( "Cannot delete some instances of model 'P' because they are " "referenced through restricted foreign keys: 'A.restrict'." ) with self.assertRaisesMessage(RestrictedError, msg) as cm: a.restrict.p.delete() self.assertEqual(cm.exception.restricted_objects, {a}) # Object referenced also with CASCADE relationship can be deleted. a.cascade.p = a.restrict.p a.cascade.save() a.restrict.p.delete() self.assertFalse(A.objects.filter(name='restrict').exists()) self.assertFalse(R.objects.filter(pk=a.restrict_id).exists()) def test_restrict_path_cascade_direct(self): a = create_a('restrict') a.restrict.p = P.objects.create() a.restrict.save() a.cascade_p = a.restrict.p a.save() a.restrict.p.delete() self.assertFalse(A.objects.filter(name='restrict').exists()) self.assertFalse(R.objects.filter(pk=a.restrict_id).exists()) def test_restrict_path_cascade_indirect_diamond(self): delete_top = DeleteTop.objects.create() b1 = B1.objects.create(delete_top=delete_top) b2 = B2.objects.create(delete_top=delete_top) delete_bottom = DeleteBottom.objects.create(b1=b1, b2=b2) msg = ( "Cannot delete some instances of model 'B1' because they are " "referenced through restricted foreign keys: 'DeleteBottom.b1'." ) with self.assertRaisesMessage(RestrictedError, msg) as cm: b1.delete() self.assertEqual(cm.exception.restricted_objects, {delete_bottom}) self.assertTrue(DeleteTop.objects.exists()) self.assertTrue(B1.objects.exists()) self.assertTrue(B2.objects.exists()) self.assertTrue(DeleteBottom.objects.exists()) # Object referenced also with CASCADE relationship can be deleted. delete_top.delete() self.assertFalse(DeleteTop.objects.exists()) self.assertFalse(B1.objects.exists()) self.assertFalse(B2.objects.exists()) self.assertFalse(DeleteBottom.objects.exists()) def test_restrict_gfk_no_fast_delete(self): delete_top = DeleteTop.objects.create() generic_b1 = GenericB1.objects.create(generic_delete_top=delete_top) generic_b2 = GenericB2.objects.create(generic_delete_top=delete_top) generic_delete_bottom = GenericDeleteBottom.objects.create( generic_b1=generic_b1, generic_b2=generic_b2, ) msg = ( "Cannot delete some instances of model 'GenericB1' because they " "are referenced through restricted foreign keys: " "'GenericDeleteBottom.generic_b1'." ) with self.assertRaisesMessage(RestrictedError, msg) as cm: generic_b1.delete() self.assertEqual(cm.exception.restricted_objects, {generic_delete_bottom}) self.assertTrue(DeleteTop.objects.exists()) self.assertTrue(GenericB1.objects.exists()) self.assertTrue(GenericB2.objects.exists()) self.assertTrue(GenericDeleteBottom.objects.exists()) # Object referenced also with CASCADE relationship can be deleted. delete_top.delete() self.assertFalse(DeleteTop.objects.exists()) self.assertFalse(GenericB1.objects.exists()) self.assertFalse(GenericB2.objects.exists()) self.assertFalse(GenericDeleteBottom.objects.exists()) class DeletionTests(TestCase): def test_m2m(self): m = M.objects.create() r = R.objects.create() MR.objects.create(m=m, r=r) r.delete() self.assertFalse(MR.objects.exists()) r = R.objects.create() MR.objects.create(m=m, r=r) m.delete() self.assertFalse(MR.objects.exists()) m = M.objects.create() r = R.objects.create() m.m2m.add(r) r.delete() through = M._meta.get_field('m2m').remote_field.through self.assertFalse(through.objects.exists()) r = R.objects.create() m.m2m.add(r) m.delete() self.assertFalse(through.objects.exists()) m = M.objects.create() r = R.objects.create() MRNull.objects.create(m=m, r=r) r.delete() self.assertFalse(not MRNull.objects.exists()) self.assertFalse(m.m2m_through_null.exists()) def test_bulk(self): s = S.objects.create(r=R.objects.create()) for i in range(2 * GET_ITERATOR_CHUNK_SIZE): T.objects.create(s=s) # 1 (select related `T` instances) # + 1 (select related `U` instances) # + 2 (delete `T` instances in batches) # + 1 (delete `s`) self.assertNumQueries(5, s.delete) self.assertFalse(S.objects.exists()) def test_instance_update(self): deleted = [] related_setnull_sets = [] def pre_delete(sender, **kwargs): obj = kwargs['instance'] deleted.append(obj) if isinstance(obj, R): related_setnull_sets.append([a.pk for a in obj.setnull_set.all()]) models.signals.pre_delete.connect(pre_delete) a = create_a('update_setnull') a.setnull.delete() a = create_a('update_cascade') a.cascade.delete() for obj in deleted: self.assertIsNone(obj.pk) for pk_list in related_setnull_sets: for a in A.objects.filter(id__in=pk_list): self.assertIsNone(a.setnull) models.signals.pre_delete.disconnect(pre_delete) def test_deletion_order(self): pre_delete_order = [] post_delete_order = [] def log_post_delete(sender, **kwargs): pre_delete_order.append((sender, kwargs['instance'].pk)) def log_pre_delete(sender, **kwargs): post_delete_order.append((sender, kwargs['instance'].pk)) models.signals.post_delete.connect(log_post_delete) models.signals.pre_delete.connect(log_pre_delete) r = R.objects.create(pk=1) s1 = S.objects.create(pk=1, r=r) s2 = S.objects.create(pk=2, r=r) T.objects.create(pk=1, s=s1) T.objects.create(pk=2, s=s2) RChild.objects.create(r_ptr=r) r.delete() self.assertEqual( pre_delete_order, [(T, 2), (T, 1), (RChild, 1), (S, 2), (S, 1), (R, 1)] ) self.assertEqual( post_delete_order, [(T, 1), (T, 2), (RChild, 1), (S, 1), (S, 2), (R, 1)] ) models.signals.post_delete.disconnect(log_post_delete) models.signals.pre_delete.disconnect(log_pre_delete) def test_relational_post_delete_signals_happen_before_parent_object(self): deletions = [] def log_post_delete(instance, **kwargs): self.assertTrue(R.objects.filter(pk=instance.r_id)) self.assertIs(type(instance), S) deletions.append(instance.id) r = R.objects.create(pk=1) S.objects.create(pk=1, r=r) models.signals.post_delete.connect(log_post_delete, sender=S) try: r.delete() finally: models.signals.post_delete.disconnect(log_post_delete) self.assertEqual(len(deletions), 1) self.assertEqual(deletions[0], 1) @skipUnlessDBFeature("can_defer_constraint_checks") def test_can_defer_constraint_checks(self): u = User.objects.create( avatar=Avatar.objects.create() ) a = Avatar.objects.get(pk=u.avatar_id) # 1 query to find the users for the avatar. # 1 query to delete the user # 1 query to delete the avatar # The important thing is that when we can defer constraint checks there # is no need to do an UPDATE on User.avatar to null it out. # Attach a signal to make sure we will not do fast_deletes. calls = [] def noop(*args, **kwargs): calls.append('') models.signals.post_delete.connect(noop, sender=User) self.assertNumQueries(3, a.delete) self.assertFalse(User.objects.exists()) self.assertFalse(Avatar.objects.exists()) self.assertEqual(len(calls), 1) models.signals.post_delete.disconnect(noop, sender=User) @skipIfDBFeature("can_defer_constraint_checks") def test_cannot_defer_constraint_checks(self): u = User.objects.create( avatar=Avatar.objects.create() ) # Attach a signal to make sure we will not do fast_deletes. calls = [] def noop(*args, **kwargs): calls.append('') models.signals.post_delete.connect(noop, sender=User) a = Avatar.objects.get(pk=u.avatar_id) # The below doesn't make sense... Why do we need to null out # user.avatar if we are going to delete the user immediately after it, # and there are no more cascades. # 1 query to find the users for the avatar. # 1 query to delete the user # 1 query to null out user.avatar, because we can't defer the constraint # 1 query to delete the avatar self.assertNumQueries(4, a.delete) self.assertFalse(User.objects.exists()) self.assertFalse(Avatar.objects.exists()) self.assertEqual(len(calls), 1) models.signals.post_delete.disconnect(noop, sender=User) def test_hidden_related(self): r = R.objects.create() h = HiddenUser.objects.create(r=r) HiddenUserProfile.objects.create(user=h) r.delete() self.assertEqual(HiddenUserProfile.objects.count(), 0) def test_large_delete(self): TEST_SIZE = 2000 objs = [Avatar() for i in range(0, TEST_SIZE)] Avatar.objects.bulk_create(objs) # Calculate the number of queries needed. batch_size = connection.ops.bulk_batch_size(['pk'], objs) # The related fetches are done in batches. batches = ceil(len(objs) / batch_size) # One query for Avatar.objects.all() and then one related fast delete for # each batch. fetches_to_mem = 1 + batches # The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE self.assertNumQueries(queries, Avatar.objects.all().delete) self.assertFalse(Avatar.objects.exists()) def test_large_delete_related(self): TEST_SIZE = 2000 s = S.objects.create(r=R.objects.create()) for i in range(TEST_SIZE): T.objects.create(s=s) batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1) # TEST_SIZE / batch_size (select related `T` instances) # + 1 (select related `U` instances) # + TEST_SIZE / GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches) # + 1 (delete `s`) expected_num_queries = ceil(TEST_SIZE / batch_size) expected_num_queries += ceil(TEST_SIZE / GET_ITERATOR_CHUNK_SIZE) + 2 self.assertNumQueries(expected_num_queries, s.delete) self.assertFalse(S.objects.exists()) self.assertFalse(T.objects.exists()) def test_delete_with_keeping_parents(self): child = RChild.objects.create() parent_id = child.r_ptr_id child.delete(keep_parents=True) self.assertFalse(RChild.objects.filter(id=child.id).exists()) self.assertTrue(R.objects.filter(id=parent_id).exists()) def test_delete_with_keeping_parents_relationships(self): child = RChild.objects.create() parent_id = child.r_ptr_id parent_referent_id = S.objects.create(r=child.r_ptr).pk child.delete(keep_parents=True) self.assertFalse(RChild.objects.filter(id=child.id).exists()) self.assertTrue(R.objects.filter(id=parent_id).exists()) self.assertTrue(S.objects.filter(pk=parent_referent_id).exists()) childchild = RChildChild.objects.create() parent_id = childchild.rchild_ptr.r_ptr_id child_id = childchild.rchild_ptr_id parent_referent_id = S.objects.create(r=childchild.rchild_ptr.r_ptr).pk childchild.delete(keep_parents=True) self.assertFalse(RChildChild.objects.filter(id=childchild.id).exists()) self.assertTrue(RChild.objects.filter(id=child_id).exists()) self.assertTrue(R.objects.filter(id=parent_id).exists()) self.assertTrue(S.objects.filter(pk=parent_referent_id).exists()) def test_queryset_delete_returns_num_rows(self): """ QuerySet.delete() should return the number of deleted rows and a dictionary with the number of deletions for each object type. """ Avatar.objects.bulk_create([Avatar(desc='a'), Avatar(desc='b'), Avatar(desc='c')]) avatars_count = Avatar.objects.count() deleted, rows_count = Avatar.objects.all().delete() self.assertEqual(deleted, avatars_count) # more complex example with multiple object types r = R.objects.create() h1 = HiddenUser.objects.create(r=r) HiddenUser.objects.create(r=r) HiddenUserProfile.objects.create(user=h1) existed_objs = { R._meta.label: R.objects.count(), HiddenUser._meta.label: HiddenUser.objects.count(), HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(), } deleted, deleted_objs = R.objects.all().delete() self.assertCountEqual(deleted_objs.keys(), existed_objs.keys()) for k, v in existed_objs.items(): self.assertEqual(deleted_objs[k], v) def test_model_delete_returns_num_rows(self): """ Model.delete() should return the number of deleted rows and a dictionary with the number of deletions for each object type. """ r = R.objects.create() h1 = HiddenUser.objects.create(r=r) h2 = HiddenUser.objects.create(r=r) HiddenUser.objects.create(r=r) HiddenUserProfile.objects.create(user=h1) HiddenUserProfile.objects.create(user=h2) m1 = M.objects.create() m2 = M.objects.create() MR.objects.create(r=r, m=m1) r.m_set.add(m1) r.m_set.add(m2) r.save() existed_objs = { R._meta.label: R.objects.count(), HiddenUser._meta.label: HiddenUser.objects.count(), MR._meta.label: MR.objects.count(), HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(), M.m2m.through._meta.label: M.m2m.through.objects.count(), } deleted, deleted_objs = r.delete() self.assertEqual(deleted, sum(existed_objs.values())) self.assertCountEqual(deleted_objs.keys(), existed_objs.keys()) for k, v in existed_objs.items(): self.assertEqual(deleted_objs[k], v) def test_proxied_model_duplicate_queries(self): """ #25685 - Deleting instances of a model with existing proxy classes should not issue multiple queries during cascade deletion of referring models. """ avatar = Avatar.objects.create() # One query for the Avatar table and a second for the User one. with self.assertNumQueries(2): avatar.delete() def test_only_referenced_fields_selected(self): """ Only referenced fields are selected during cascade deletion SELECT unless deletion signals are connected. """ origin = Origin.objects.create() expected_sql = str( Referrer.objects.only( # Both fields are referenced by SecondReferrer. 'id', 'unique_field', ).filter(origin__in=[origin]).query ) with self.assertNumQueries(2) as ctx: origin.delete() self.assertEqual(ctx.captured_queries[0]['sql'], expected_sql) def receiver(instance, **kwargs): pass # All fields are selected if deletion signals are connected. for signal_name in ('pre_delete', 'post_delete'): with self.subTest(signal=signal_name): origin = Origin.objects.create() signal = getattr(models.signals, signal_name) signal.connect(receiver, sender=Referrer) with self.assertNumQueries(2) as ctx: origin.delete() self.assertIn( connection.ops.quote_name('large_field'), ctx.captured_queries[0]['sql'], ) signal.disconnect(receiver, sender=Referrer) class FastDeleteTests(TestCase): def test_fast_delete_all(self): with self.assertNumQueries(1) as ctx: User.objects.all().delete() sql = ctx.captured_queries[0]['sql'] # No subqueries is used when performing a full delete. self.assertNotIn('SELECT', sql) def test_fast_delete_fk(self): u = User.objects.create( avatar=Avatar.objects.create() ) a = Avatar.objects.get(pk=u.avatar_id) # 1 query to fast-delete the user # 1 query to delete the avatar self.assertNumQueries(2, a.delete) self.assertFalse(User.objects.exists()) self.assertFalse(Avatar.objects.exists()) def test_fast_delete_m2m(self): t = M2MTo.objects.create() f = M2MFrom.objects.create() f.m2m.add(t) # 1 to delete f, 1 to fast-delete m2m for f self.assertNumQueries(2, f.delete) def test_fast_delete_revm2m(self): t = M2MTo.objects.create() f = M2MFrom.objects.create() f.m2m.add(t) # 1 to delete t, 1 to fast-delete t's m_set self.assertNumQueries(2, f.delete) def test_fast_delete_qs(self): u1 = User.objects.create() u2 = User.objects.create() self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete) self.assertEqual(User.objects.count(), 1) self.assertTrue(User.objects.filter(pk=u2.pk).exists()) def test_fast_delete_instance_set_pk_none(self): u = User.objects.create() # User can be fast-deleted. collector = Collector(using='default') self.assertTrue(collector.can_fast_delete(u)) u.delete() self.assertIsNone(u.pk) def test_fast_delete_joined_qs(self): a = Avatar.objects.create(desc='a') User.objects.create(avatar=a) u2 = User.objects.create() self.assertNumQueries(1, User.objects.filter(avatar__desc='a').delete) self.assertEqual(User.objects.count(), 1) self.assertTrue(User.objects.filter(pk=u2.pk).exists()) def test_fast_delete_inheritance(self): c = Child.objects.create() p = Parent.objects.create() # 1 for self, 1 for parent self.assertNumQueries(2, c.delete) self.assertFalse(Child.objects.exists()) self.assertEqual(Parent.objects.count(), 1) self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1) # 1 for self delete, 1 for fast delete of empty "child" qs. self.assertNumQueries(2, p.delete) self.assertFalse(Parent.objects.exists()) # 1 for self delete, 1 for fast delete of empty "child" qs. c = Child.objects.create() p = c.parent_ptr self.assertNumQueries(2, p.delete) self.assertFalse(Parent.objects.exists()) self.assertFalse(Child.objects.exists()) def test_fast_delete_large_batch(self): User.objects.bulk_create(User() for i in range(0, 2000)) # No problems here - we aren't going to cascade, so we will fast # delete the objects in a single query. self.assertNumQueries(1, User.objects.all().delete) a = Avatar.objects.create(desc='a') User.objects.bulk_create(User(avatar=a) for i in range(0, 2000)) # We don't hit parameter amount limits for a, so just one query for # that + fast delete of the related objs. self.assertNumQueries(2, a.delete) self.assertEqual(User.objects.count(), 0) def test_fast_delete_empty_no_update_can_self_select(self): """ #25932 - Fast deleting on backends that don't have the `no_update_can_self_select` feature should work even if the specified filter doesn't match any row. """ with self.assertNumQueries(1): self.assertEqual( User.objects.filter(avatar__desc='missing').delete(), (0, {}), ) def test_fast_delete_combined_relationships(self): # The cascading fast-delete of SecondReferrer should be combined # in a single DELETE WHERE referrer_id OR unique_field. origin = Origin.objects.create() referer = Referrer.objects.create(origin=origin, unique_field=42) with self.assertNumQueries(2): referer.delete() def test_fast_delete_aggregation(self): # Fast-deleting when filtering against an aggregation result in # a single query containing a subquery. Base.objects.create() with self.assertNumQueries(1): self.assertEqual( Base.objects.annotate( rels_count=models.Count('rels'), ).filter(rels_count=0).delete(), (1, {'delete.Base': 1}), ) self.assertIs(Base.objects.exists(), False)
98b6b8d7e15a16e5afeb3a96038d7b633da25f40e320dd41cad9c15330986b49
from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.db import models class P(models.Model): pass class R(models.Model): is_default = models.BooleanField(default=False) p = models.ForeignKey(P, models.CASCADE, null=True) def get_default_r(): return R.objects.get_or_create(is_default=True)[0].pk class S(models.Model): r = models.ForeignKey(R, models.CASCADE) class T(models.Model): s = models.ForeignKey(S, models.CASCADE) class U(models.Model): t = models.ForeignKey(T, models.CASCADE) class RChild(R): pass class RChildChild(RChild): pass class A(models.Model): name = models.CharField(max_length=30) auto = models.ForeignKey(R, models.CASCADE, related_name="auto_set") auto_nullable = models.ForeignKey(R, models.CASCADE, null=True, related_name='auto_nullable_set') setvalue = models.ForeignKey(R, models.SET(get_default_r), related_name='setvalue') setnull = models.ForeignKey(R, models.SET_NULL, null=True, related_name='setnull_set') setdefault = models.ForeignKey(R, models.SET_DEFAULT, default=get_default_r, related_name='setdefault_set') setdefault_none = models.ForeignKey( R, models.SET_DEFAULT, default=None, null=True, related_name='setnull_nullable_set', ) cascade = models.ForeignKey(R, models.CASCADE, related_name='cascade_set') cascade_nullable = models.ForeignKey(R, models.CASCADE, null=True, related_name='cascade_nullable_set') protect = models.ForeignKey(R, models.PROTECT, null=True, related_name='protect_set') restrict = models.ForeignKey(R, models.RESTRICT, null=True, related_name='restrict_set') donothing = models.ForeignKey(R, models.DO_NOTHING, null=True, related_name='donothing_set') child = models.ForeignKey(RChild, models.CASCADE, related_name="child") child_setnull = models.ForeignKey(RChild, models.SET_NULL, null=True, related_name="child_setnull") cascade_p = models.ForeignKey(P, models.CASCADE, related_name='cascade_p_set', null=True) # A OneToOneField is just a ForeignKey unique=True, so we don't duplicate # all the tests; just one smoke test to ensure on_delete works for it as # well. o2o_setnull = models.ForeignKey(R, models.SET_NULL, null=True, related_name="o2o_nullable_set") class B(models.Model): protect = models.ForeignKey(R, models.PROTECT) def create_a(name): a = A(name=name) for name in ('auto', 'auto_nullable', 'setvalue', 'setnull', 'setdefault', 'setdefault_none', 'cascade', 'cascade_nullable', 'protect', 'restrict', 'donothing', 'o2o_setnull'): r = R.objects.create() setattr(a, name, r) a.child = RChild.objects.create() a.child_setnull = RChild.objects.create() a.save() return a class M(models.Model): m2m = models.ManyToManyField(R, related_name="m_set") m2m_through = models.ManyToManyField(R, through="MR", related_name="m_through_set") m2m_through_null = models.ManyToManyField(R, through="MRNull", related_name="m_through_null_set") class MR(models.Model): m = models.ForeignKey(M, models.CASCADE) r = models.ForeignKey(R, models.CASCADE) class MRNull(models.Model): m = models.ForeignKey(M, models.CASCADE) r = models.ForeignKey(R, models.SET_NULL, null=True) class Avatar(models.Model): desc = models.TextField(null=True) # This model is used to test a duplicate query regression (#25685) class AvatarProxy(Avatar): class Meta: proxy = True class User(models.Model): avatar = models.ForeignKey(Avatar, models.CASCADE, null=True) class HiddenUser(models.Model): r = models.ForeignKey(R, models.CASCADE, related_name="+") class HiddenUserProfile(models.Model): user = models.ForeignKey(HiddenUser, models.CASCADE) class M2MTo(models.Model): pass class M2MFrom(models.Model): m2m = models.ManyToManyField(M2MTo) class Parent(models.Model): pass class Child(Parent): pass class Base(models.Model): pass class RelToBase(models.Model): base = models.ForeignKey(Base, models.DO_NOTHING, related_name='rels') class Origin(models.Model): pass class Referrer(models.Model): origin = models.ForeignKey(Origin, models.CASCADE) unique_field = models.IntegerField(unique=True) large_field = models.TextField() class SecondReferrer(models.Model): referrer = models.ForeignKey(Referrer, models.CASCADE) other_referrer = models.ForeignKey( Referrer, models.CASCADE, to_field='unique_field', related_name='+' ) class DeleteTop(models.Model): b1 = GenericRelation('GenericB1') b2 = GenericRelation('GenericB2') class B1(models.Model): delete_top = models.ForeignKey(DeleteTop, models.CASCADE) class B2(models.Model): delete_top = models.ForeignKey(DeleteTop, models.CASCADE) class B3(models.Model): restrict = models.ForeignKey(R, models.RESTRICT) class DeleteBottom(models.Model): b1 = models.ForeignKey(B1, models.RESTRICT) b2 = models.ForeignKey(B2, models.CASCADE) class GenericB1(models.Model): content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) object_id = models.PositiveIntegerField() generic_delete_top = GenericForeignKey('content_type', 'object_id') class GenericB2(models.Model): content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) object_id = models.PositiveIntegerField() generic_delete_top = GenericForeignKey('content_type', 'object_id') generic_delete_bottom = GenericRelation('GenericDeleteBottom') class GenericDeleteBottom(models.Model): generic_b1 = models.ForeignKey(GenericB1, models.RESTRICT) content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) object_id = models.PositiveIntegerField() generic_b2 = GenericForeignKey() class GenericDeleteBottomParent(models.Model): generic_delete_bottom = models.ForeignKey(GenericDeleteBottom, on_delete=models.CASCADE)
f402780df1f1ea95051389a2d5bbe5f93caae4e5d969293bc5fad2b9faf86499
import unittest import uuid from django.core.checks import Error, Warning as DjangoWarning from django.db import connection, models from django.test import ( SimpleTestCase, TestCase, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import isolate_apps, override_settings from django.utils.functional import lazy from django.utils.timezone import now from django.utils.translation import gettext_lazy as _ from django.utils.version import get_docs_version @isolate_apps('invalid_models_tests') class AutoFieldTests(SimpleTestCase): def test_valid_case(self): class Model(models.Model): id = models.AutoField(primary_key=True) field = Model._meta.get_field('id') self.assertEqual(field.check(), []) def test_primary_key(self): # primary_key must be True. Refs #12467. class Model(models.Model): field = models.AutoField(primary_key=False) # Prevent Django from autocreating `id` AutoField, which would # result in an error, because a model must have exactly one # AutoField. another = models.IntegerField(primary_key=True) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( 'AutoFields must set primary_key=True.', obj=field, id='fields.E100', ), ]) def test_max_length_warning(self): class Model(models.Model): auto = models.AutoField(primary_key=True, max_length=2) field = Model._meta.get_field('auto') self.assertEqual(field.check(), [ DjangoWarning( "'max_length' is ignored when used with %s." % field.__class__.__name__, hint="Remove 'max_length' from field", obj=field, id='fields.W122', ), ]) @isolate_apps('invalid_models_tests') class BinaryFieldTests(SimpleTestCase): def test_valid_default_value(self): class Model(models.Model): field1 = models.BinaryField(default=b'test') field2 = models.BinaryField(default=None) for field_name in ('field1', 'field2'): field = Model._meta.get_field(field_name) self.assertEqual(field.check(), []) def test_str_default_value(self): class Model(models.Model): field = models.BinaryField(default='test') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "BinaryField's default cannot be a string. Use bytes content " "instead.", obj=field, id='fields.E170', ), ]) @isolate_apps('invalid_models_tests') class CharFieldTests(TestCase): def test_valid_field(self): class Model(models.Model): field = models.CharField( max_length=255, choices=[ ('1', 'item1'), ('2', 'item2'), ], db_index=True, ) field = Model._meta.get_field('field') self.assertEqual(field.check(), []) def test_missing_max_length(self): class Model(models.Model): field = models.CharField() field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "CharFields must define a 'max_length' attribute.", obj=field, id='fields.E120', ), ]) def test_negative_max_length(self): class Model(models.Model): field = models.CharField(max_length=-1) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121', ), ]) def test_bad_max_length_value(self): class Model(models.Model): field = models.CharField(max_length="bad") field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121', ), ]) def test_str_max_length_value(self): class Model(models.Model): field = models.CharField(max_length='20') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121', ), ]) def test_str_max_length_type(self): class Model(models.Model): field = models.CharField(max_length=True) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_length' must be a positive integer.", obj=field, id='fields.E121' ), ]) def test_non_iterable_choices(self): class Model(models.Model): field = models.CharField(max_length=10, choices='bad') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable (e.g., a list or tuple).", obj=field, id='fields.E004', ), ]) def test_non_iterable_choices_two_letters(self): """Two letters isn't a valid choice pair.""" class Model(models.Model): field = models.CharField(max_length=10, choices=['ab']) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual value, " "human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_iterable_of_iterable_choices(self): class ThingItem: def __init__(self, value, display): self.value = value self.display = display def __iter__(self): return iter((self.value, self.display)) def __len__(self): return 2 class Things: def __iter__(self): return iter((ThingItem(1, 2), ThingItem(3, 4))) class ThingWithIterableChoices(models.Model): thing = models.CharField(max_length=100, blank=True, choices=Things()) self.assertEqual(ThingWithIterableChoices._meta.get_field('thing').check(), []) def test_choices_containing_non_pairs(self): class Model(models.Model): field = models.CharField(max_length=10, choices=[(1, 2, 3), (1, 2, 3)]) class Model2(models.Model): field = models.IntegerField(choices=[0]) for model in (Model, Model2): with self.subTest(model.__name__): field = model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual " "value, human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_choices_containing_lazy(self): class Model(models.Model): field = models.CharField(max_length=10, choices=[['1', _('1')], ['2', _('2')]]) self.assertEqual(Model._meta.get_field('field').check(), []) def test_lazy_choices(self): class Model(models.Model): field = models.CharField(max_length=10, choices=lazy(lambda: [[1, '1'], [2, '2']], tuple)()) self.assertEqual(Model._meta.get_field('field').check(), []) def test_choices_named_group(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[ ['knights', [['L', 'Lancelot'], ['G', 'Galahad']]], ['wizards', [['T', 'Tim the Enchanter']]], ['R', 'Random character'], ], ) self.assertEqual(Model._meta.get_field('field').check(), []) def test_choices_named_group_non_pairs(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[['knights', [['L', 'Lancelot', 'Du Lac']]]], ) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual value, " "human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_choices_named_group_bad_structure(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[ ['knights', [ ['Noble', [['G', 'Galahad']]], ['Combative', [['L', 'Lancelot']]], ]], ], ) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'choices' must be an iterable containing (actual value, " "human readable name) tuples.", obj=field, id='fields.E005', ), ]) def test_choices_named_group_lazy(self): class Model(models.Model): field = models.CharField( max_length=10, choices=[ [_('knights'), [['L', _('Lancelot')], ['G', _('Galahad')]]], ['R', _('Random character')], ], ) self.assertEqual(Model._meta.get_field('field').check(), []) def test_choices_in_max_length(self): class Model(models.Model): field = models.CharField( max_length=2, choices=[ ('ABC', 'Value Too Long!'), ('OK', 'Good') ], ) group = models.CharField( max_length=2, choices=[ ('Nested', [('OK', 'Good'), ('Longer', 'Longer')]), ('Grouped', [('Bad', 'Bad')]), ], ) for name, choice_max_length in (('field', 3), ('group', 6)): with self.subTest(name): field = Model._meta.get_field(name) self.assertEqual(field.check(), [ Error( "'max_length' is too small to fit the longest value " "in 'choices' (%d characters)." % choice_max_length, obj=field, id='fields.E009', ), ]) def test_bad_db_index_value(self): class Model(models.Model): field = models.CharField(max_length=10, db_index='bad') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'db_index' must be None, True or False.", obj=field, id='fields.E006', ), ]) def test_bad_validators(self): class Model(models.Model): field = models.CharField(max_length=10, validators=[True]) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "All 'validators' must be callable.", hint=( "validators[0] (True) isn't a function or instance of a " "validator class." ), obj=field, id='fields.E008', ), ]) @unittest.skipUnless(connection.vendor == 'mysql', "Test valid only for MySQL") def test_too_long_char_field_under_mysql(self): from django.db.backends.mysql.validation import DatabaseValidation class Model(models.Model): field = models.CharField(unique=True, max_length=256) field = Model._meta.get_field('field') validator = DatabaseValidation(connection=connection) self.assertEqual(validator.check_field(field), [ DjangoWarning( '%s may not allow unique CharFields to have a max_length > ' '255.' % connection.display_name, hint=( 'See: https://docs.djangoproject.com/en/%s/ref/databases/' '#mysql-character-fields' % get_docs_version() ), obj=field, id='mysql.W003', ) ]) def test_db_collation(self): class Model(models.Model): field = models.CharField(max_length=100, db_collation='anything') field = Model._meta.get_field('field') error = Error( '%s does not support a database collation on CharFields.' % connection.display_name, id='fields.E190', obj=field, ) expected = [] if connection.features.supports_collation_on_charfield else [error] self.assertEqual(field.check(databases=self.databases), expected) def test_db_collation_required_db_features(self): class Model(models.Model): field = models.CharField(max_length=100, db_collation='anything') class Meta: required_db_features = {'supports_collation_on_charfield'} field = Model._meta.get_field('field') self.assertEqual(field.check(databases=self.databases), []) @isolate_apps('invalid_models_tests') class DateFieldTests(SimpleTestCase): maxDiff = None def test_auto_now_and_auto_now_add_raise_error(self): class Model(models.Model): field0 = models.DateTimeField(auto_now=True, auto_now_add=True, default=now) field1 = models.DateTimeField(auto_now=True, auto_now_add=False, default=now) field2 = models.DateTimeField(auto_now=False, auto_now_add=True, default=now) field3 = models.DateTimeField(auto_now=True, auto_now_add=True, default=None) expected = [] checks = [] for i in range(4): field = Model._meta.get_field('field%d' % i) expected.append(Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", obj=field, id='fields.E160', )) checks.extend(field.check()) self.assertEqual(checks, expected) def test_fix_default_value(self): class Model(models.Model): field_dt = models.DateField(default=now()) field_d = models.DateField(default=now().date()) field_now = models.DateField(default=now) field_dt = Model._meta.get_field('field_dt') field_d = Model._meta.get_field('field_d') field_now = Model._meta.get_field('field_now') errors = field_dt.check() errors.extend(field_d.check()) errors.extend(field_now.check()) # doesn't raise a warning self.assertEqual(errors, [ DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_dt, id='fields.W161', ), DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_d, id='fields.W161', ) ]) @override_settings(USE_TZ=True) def test_fix_default_value_tz(self): self.test_fix_default_value() @isolate_apps('invalid_models_tests') class DateTimeFieldTests(SimpleTestCase): maxDiff = None def test_fix_default_value(self): class Model(models.Model): field_dt = models.DateTimeField(default=now()) field_d = models.DateTimeField(default=now().date()) field_now = models.DateTimeField(default=now) field_dt = Model._meta.get_field('field_dt') field_d = Model._meta.get_field('field_d') field_now = Model._meta.get_field('field_now') errors = field_dt.check() errors.extend(field_d.check()) errors.extend(field_now.check()) # doesn't raise a warning self.assertEqual(errors, [ DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_dt, id='fields.W161', ), DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_d, id='fields.W161', ) ]) @override_settings(USE_TZ=True) def test_fix_default_value_tz(self): self.test_fix_default_value() @isolate_apps('invalid_models_tests') class DecimalFieldTests(SimpleTestCase): def test_required_attributes(self): class Model(models.Model): field = models.DecimalField() field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "DecimalFields must define a 'decimal_places' attribute.", obj=field, id='fields.E130', ), Error( "DecimalFields must define a 'max_digits' attribute.", obj=field, id='fields.E132', ), ]) def test_negative_max_digits_and_decimal_places(self): class Model(models.Model): field = models.DecimalField(max_digits=-1, decimal_places=-1) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'decimal_places' must be a non-negative integer.", obj=field, id='fields.E131', ), Error( "'max_digits' must be a positive integer.", obj=field, id='fields.E133', ), ]) def test_bad_values_of_max_digits_and_decimal_places(self): class Model(models.Model): field = models.DecimalField(max_digits="bad", decimal_places="bad") field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'decimal_places' must be a non-negative integer.", obj=field, id='fields.E131', ), Error( "'max_digits' must be a positive integer.", obj=field, id='fields.E133', ), ]) def test_decimal_places_greater_than_max_digits(self): class Model(models.Model): field = models.DecimalField(max_digits=9, decimal_places=10) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'max_digits' must be greater or equal to 'decimal_places'.", obj=field, id='fields.E134', ), ]) def test_valid_field(self): class Model(models.Model): field = models.DecimalField(max_digits=10, decimal_places=10) field = Model._meta.get_field('field') self.assertEqual(field.check(), []) @isolate_apps('invalid_models_tests') class FileFieldTests(SimpleTestCase): def test_valid_default_case(self): class Model(models.Model): field = models.FileField() self.assertEqual(Model._meta.get_field('field').check(), []) def test_valid_case(self): class Model(models.Model): field = models.FileField(upload_to='somewhere') field = Model._meta.get_field('field') self.assertEqual(field.check(), []) def test_primary_key(self): class Model(models.Model): field = models.FileField(primary_key=False, upload_to='somewhere') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'primary_key' is not a valid argument for a FileField.", obj=field, id='fields.E201', ) ]) def test_upload_to_starts_with_slash(self): class Model(models.Model): field = models.FileField(upload_to='/somewhere') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "FileField's 'upload_to' argument must be a relative path, not " "an absolute path.", obj=field, id='fields.E202', hint='Remove the leading slash.', ) ]) def test_upload_to_callable_not_checked(self): def callable(instance, filename): return '/' + filename class Model(models.Model): field = models.FileField(upload_to=callable) field = Model._meta.get_field('field') self.assertEqual(field.check(), []) @isolate_apps('invalid_models_tests') class FilePathFieldTests(SimpleTestCase): def test_forbidden_files_and_folders(self): class Model(models.Model): field = models.FilePathField(allow_files=False, allow_folders=False) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "FilePathFields must have either 'allow_files' or 'allow_folders' set to True.", obj=field, id='fields.E140', ), ]) @isolate_apps('invalid_models_tests') class GenericIPAddressFieldTests(SimpleTestCase): def test_non_nullable_blank(self): class Model(models.Model): field = models.GenericIPAddressField(null=False, blank=True) field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( ('GenericIPAddressFields cannot have blank=True if null=False, ' 'as blank values are stored as nulls.'), obj=field, id='fields.E150', ), ]) @isolate_apps('invalid_models_tests') class ImageFieldTests(SimpleTestCase): def test_pillow_installed(self): try: from PIL import Image # NOQA except ImportError: pillow_installed = False else: pillow_installed = True class Model(models.Model): field = models.ImageField(upload_to='somewhere') field = Model._meta.get_field('field') errors = field.check() expected = [] if pillow_installed else [ Error( 'Cannot use ImageField because Pillow is not installed.', hint=('Get Pillow at https://pypi.org/project/Pillow/ ' 'or run command "python -m pip install Pillow".'), obj=field, id='fields.E210', ), ] self.assertEqual(errors, expected) @isolate_apps('invalid_models_tests') class IntegerFieldTests(SimpleTestCase): def test_max_length_warning(self): class Model(models.Model): integer = models.IntegerField(max_length=2) biginteger = models.BigIntegerField(max_length=2) smallinteger = models.SmallIntegerField(max_length=2) positiveinteger = models.PositiveIntegerField(max_length=2) positivebiginteger = models.PositiveBigIntegerField(max_length=2) positivesmallinteger = models.PositiveSmallIntegerField(max_length=2) for field in Model._meta.get_fields(): if field.auto_created: continue with self.subTest(name=field.name): self.assertEqual(field.check(), [ DjangoWarning( "'max_length' is ignored when used with %s." % field.__class__.__name__, hint="Remove 'max_length' from field", obj=field, id='fields.W122', ) ]) @isolate_apps('invalid_models_tests') class TimeFieldTests(SimpleTestCase): maxDiff = None def test_fix_default_value(self): class Model(models.Model): field_dt = models.TimeField(default=now()) field_t = models.TimeField(default=now().time()) field_now = models.DateField(default=now) field_dt = Model._meta.get_field('field_dt') field_t = Model._meta.get_field('field_t') field_now = Model._meta.get_field('field_now') errors = field_dt.check() errors.extend(field_t.check()) errors.extend(field_now.check()) # doesn't raise a warning self.assertEqual(errors, [ DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_dt, id='fields.W161', ), DjangoWarning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=field_t, id='fields.W161', ) ]) @override_settings(USE_TZ=True) def test_fix_default_value_tz(self): self.test_fix_default_value() @isolate_apps('invalid_models_tests') class TextFieldTests(TestCase): @skipIfDBFeature('supports_index_on_text_field') def test_max_length_warning(self): class Model(models.Model): value = models.TextField(db_index=True) field = Model._meta.get_field('value') field_type = field.db_type(connection) self.assertEqual(field.check(databases=self.databases), [ DjangoWarning( '%s does not support a database index on %s columns.' % (connection.display_name, field_type), hint=( "An index won't be created. Silence this warning if you " "don't care about it." ), obj=field, id='fields.W162', ) ]) def test_db_collation(self): class Model(models.Model): field = models.TextField(db_collation='anything') field = Model._meta.get_field('field') error = Error( '%s does not support a database collation on TextFields.' % connection.display_name, id='fields.E190', obj=field, ) expected = [] if connection.features.supports_collation_on_textfield else [error] self.assertEqual(field.check(databases=self.databases), expected) def test_db_collation_required_db_features(self): class Model(models.Model): field = models.TextField(db_collation='anything') class Meta: required_db_features = {'supports_collation_on_textfield'} field = Model._meta.get_field('field') self.assertEqual(field.check(databases=self.databases), []) @isolate_apps('invalid_models_tests') class UUIDFieldTests(TestCase): def test_choices_named_group(self): class Model(models.Model): field = models.UUIDField( choices=[ ['knights', [ [uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'), 'Lancelot'], [uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2'), 'Galahad'], ]], [uuid.UUID('25d405be-4895-4d50-9b2e-d6695359ce47'), 'Other'], ], ) self.assertEqual(Model._meta.get_field('field').check(), []) @isolate_apps('invalid_models_tests') @skipUnlessDBFeature('supports_json_field') class JSONFieldTests(TestCase): def test_invalid_default(self): class Model(models.Model): field = models.JSONField(default={}) self.assertEqual(Model._meta.get_field('field').check(), [ DjangoWarning( msg=( "JSONField default should be a callable instead of an " "instance so that it's not shared between all field " "instances." ), hint=( 'Use a callable instead, e.g., use `dict` instead of `{}`.' ), obj=Model._meta.get_field('field'), id='fields.E010', ) ]) def test_valid_default(self): class Model(models.Model): field = models.JSONField(default=dict) self.assertEqual(Model._meta.get_field('field').check(), []) def test_valid_default_none(self): class Model(models.Model): field = models.JSONField(default=None) self.assertEqual(Model._meta.get_field('field').check(), []) def test_valid_callable_default(self): def callable_default(): return {'it': 'works'} class Model(models.Model): field = models.JSONField(default=callable_default) self.assertEqual(Model._meta.get_field('field').check(), [])
3c7530dd4db91690de59fc555caf712ce9257c1a7a1cb69626db70a771cc1b46
from unittest import skipIf from django import forms from django.db import connection, models from django.test import SimpleTestCase, TestCase from .models import Post class TextFieldTests(TestCase): def test_max_length_passed_to_formfield(self): """ TextField passes its max_length attribute to form fields created using their formfield() method. """ tf1 = models.TextField() tf2 = models.TextField(max_length=2345) self.assertIsNone(tf1.formfield().max_length) self.assertEqual(2345, tf2.formfield().max_length) def test_choices_generates_select_widget(self): """A TextField with choices uses a Select widget.""" f = models.TextField(choices=[('A', 'A'), ('B', 'B')]) self.assertIsInstance(f.formfield().widget, forms.Select) def test_to_python(self): """TextField.to_python() should return a string.""" f = models.TextField() self.assertEqual(f.to_python(1), '1') def test_lookup_integer_in_textfield(self): self.assertEqual(Post.objects.filter(body=24).count(), 0) @skipIf(connection.vendor == 'mysql', 'Running on MySQL requires utf8mb4 encoding (#18392)') def test_emoji(self): p = Post.objects.create(title='Whatever', body='Smile 😀.') p.refresh_from_db() self.assertEqual(p.body, 'Smile 😀.') class TestMethods(SimpleTestCase): def test_deconstruct(self): field = models.TextField() *_, kwargs = field.deconstruct() self.assertEqual(kwargs, {}) field = models.TextField(db_collation='utf8_esperanto_ci') *_, kwargs = field.deconstruct() self.assertEqual(kwargs, {'db_collation': 'utf8_esperanto_ci'})
9c8277fcb5059b52d6626bb5473f6c2cec26bad519d9dc537aea06284c0d07c0
import pickle from django import forms from django.core.exceptions import ValidationError from django.db import models from django.test import SimpleTestCase, TestCase from django.utils.functional import lazy from .models import ( Bar, Choiceful, Foo, RenamedField, VerboseNameField, Whiz, WhizDelayed, WhizIter, WhizIterEmpty, ) class Nested: class Field(models.Field): pass class BasicFieldTests(SimpleTestCase): def test_show_hidden_initial(self): """ Fields with choices respect show_hidden_initial as a kwarg to formfield(). """ choices = [(0, 0), (1, 1)] model_field = models.Field(choices=choices) form_field = model_field.formfield(show_hidden_initial=True) self.assertTrue(form_field.show_hidden_initial) form_field = model_field.formfield(show_hidden_initial=False) self.assertFalse(form_field.show_hidden_initial) def test_field_repr(self): """ __repr__() of a field displays its name. """ f = Foo._meta.get_field('a') self.assertEqual(repr(f), '<django.db.models.fields.CharField: a>') f = models.fields.CharField() self.assertEqual(repr(f), '<django.db.models.fields.CharField>') def test_field_repr_nested(self): """__repr__() uses __qualname__ for nested class support.""" self.assertEqual(repr(Nested.Field()), '<model_fields.tests.Nested.Field>') def test_field_name(self): """ A defined field name (name="fieldname") is used instead of the model model's attribute name (modelname). """ instance = RenamedField() self.assertTrue(hasattr(instance, 'get_fieldname_display')) self.assertFalse(hasattr(instance, 'get_modelname_display')) def test_field_verbose_name(self): m = VerboseNameField for i in range(1, 23): self.assertEqual(m._meta.get_field('field%d' % i).verbose_name, 'verbose field%d' % i) self.assertEqual(m._meta.get_field('id').verbose_name, 'verbose pk') def test_choices_form_class(self): """Can supply a custom choices form class to Field.formfield()""" choices = [('a', 'a')] field = models.CharField(choices=choices) klass = forms.TypedMultipleChoiceField self.assertIsInstance(field.formfield(choices_form_class=klass), klass) def test_formfield_disabled(self): """Field.formfield() sets disabled for fields with choices.""" field = models.CharField(choices=[('a', 'b')]) form_field = field.formfield(disabled=True) self.assertIs(form_field.disabled, True) def test_field_str(self): f = models.Field() self.assertEqual(str(f), '<django.db.models.fields.Field>') f = Foo._meta.get_field('a') self.assertEqual(str(f), 'model_fields.Foo.a') def test_field_ordering(self): """Fields are ordered based on their creation.""" f1 = models.Field() f2 = models.Field(auto_created=True) f3 = models.Field() self.assertLess(f2, f1) self.assertGreater(f3, f1) self.assertIsNotNone(f1) self.assertNotIn(f2, (None, 1, '')) def test_field_instance_is_picklable(self): """Field instances can be pickled.""" field = models.Field(max_length=100, default='a string') # Must be picklable with this cached property populated (#28188). field._get_default pickle.dumps(field) def test_deconstruct_nested_field(self): """deconstruct() uses __qualname__ for nested class support.""" name, path, args, kwargs = Nested.Field().deconstruct() self.assertEqual(path, 'model_fields.tests.Nested.Field') def test_abstract_inherited_fields(self): """Field instances from abstract models are not equal.""" class AbstractModel(models.Model): field = models.IntegerField() class Meta: abstract = True class InheritAbstractModel1(AbstractModel): pass class InheritAbstractModel2(AbstractModel): pass abstract_model_field = AbstractModel._meta.get_field('field') inherit1_model_field = InheritAbstractModel1._meta.get_field('field') inherit2_model_field = InheritAbstractModel2._meta.get_field('field') self.assertNotEqual(abstract_model_field, inherit1_model_field) self.assertNotEqual(abstract_model_field, inherit2_model_field) self.assertNotEqual(inherit1_model_field, inherit2_model_field) self.assertLess(abstract_model_field, inherit1_model_field) self.assertLess(abstract_model_field, inherit2_model_field) self.assertLess(inherit1_model_field, inherit2_model_field) self.assertNotEqual(hash(abstract_model_field), hash(inherit1_model_field)) self.assertNotEqual(hash(abstract_model_field), hash(inherit2_model_field)) self.assertNotEqual(hash(inherit1_model_field), hash(inherit2_model_field)) class ChoicesTests(SimpleTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.no_choices = Choiceful._meta.get_field('no_choices') cls.empty_choices = Choiceful._meta.get_field('empty_choices') cls.empty_choices_bool = Choiceful._meta.get_field('empty_choices_bool') cls.empty_choices_text = Choiceful._meta.get_field('empty_choices_text') cls.with_choices = Choiceful._meta.get_field('with_choices') def test_choices(self): self.assertIsNone(self.no_choices.choices) self.assertEqual(self.empty_choices.choices, ()) self.assertEqual(self.with_choices.choices, [(1, 'A')]) def test_flatchoices(self): self.assertEqual(self.no_choices.flatchoices, []) self.assertEqual(self.empty_choices.flatchoices, []) self.assertEqual(self.with_choices.flatchoices, [(1, 'A')]) def test_check(self): self.assertEqual(Choiceful.check(), []) def test_invalid_choice(self): model_instance = None # Actual model instance not needed. self.no_choices.validate(0, model_instance) msg = "['Value 99 is not a valid choice.']" with self.assertRaisesMessage(ValidationError, msg): self.empty_choices.validate(99, model_instance) with self.assertRaisesMessage(ValidationError, msg): self.with_choices.validate(99, model_instance) def test_formfield(self): no_choices_formfield = self.no_choices.formfield() self.assertIsInstance(no_choices_formfield, forms.IntegerField) fields = ( self.empty_choices, self.with_choices, self.empty_choices_bool, self.empty_choices_text, ) for field in fields: with self.subTest(field=field): self.assertIsInstance(field.formfield(), forms.ChoiceField) class GetFieldDisplayTests(SimpleTestCase): def test_choices_and_field_display(self): """ get_choices() interacts with get_FIELD_display() to return the expected values. """ self.assertEqual(Whiz(c=1).get_c_display(), 'First') # A nested value self.assertEqual(Whiz(c=0).get_c_display(), 'Other') # A top level value self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value self.assertIsNone(Whiz(c=None).get_c_display()) # Blank value self.assertEqual(Whiz(c='').get_c_display(), '') # Empty value self.assertEqual(WhizDelayed(c=0).get_c_display(), 'Other') # Delayed choices def test_get_FIELD_display_translated(self): """A translated display value is coerced to str.""" val = Whiz(c=5).get_c_display() self.assertIsInstance(val, str) self.assertEqual(val, 'translated') def test_overriding_FIELD_display(self): class FooBar(models.Model): foo_bar = models.IntegerField(choices=[(1, 'foo'), (2, 'bar')]) def get_foo_bar_display(self): return 'something' f = FooBar(foo_bar=1) self.assertEqual(f.get_foo_bar_display(), 'something') def test_overriding_inherited_FIELD_display(self): class Base(models.Model): foo = models.CharField(max_length=254, choices=[('A', 'Base A')]) class Meta: abstract = True class Child(Base): foo = models.CharField(max_length=254, choices=[('A', 'Child A'), ('B', 'Child B')]) self.assertEqual(Child(foo='A').get_foo_display(), 'Child A') self.assertEqual(Child(foo='B').get_foo_display(), 'Child B') def test_iterator_choices(self): """ get_choices() works with Iterators. """ self.assertEqual(WhizIter(c=1).c, 1) # A nested value self.assertEqual(WhizIter(c=9).c, 9) # Invalid value self.assertIsNone(WhizIter(c=None).c) # Blank value self.assertEqual(WhizIter(c='').c, '') # Empty value def test_empty_iterator_choices(self): """ get_choices() works with empty iterators. """ self.assertEqual(WhizIterEmpty(c="a").c, "a") # A nested value self.assertEqual(WhizIterEmpty(c="b").c, "b") # Invalid value self.assertIsNone(WhizIterEmpty(c=None).c) # Blank value self.assertEqual(WhizIterEmpty(c='').c, '') # Empty value class GetChoicesTests(SimpleTestCase): def test_empty_choices(self): choices = [] f = models.CharField(choices=choices) self.assertEqual(f.get_choices(include_blank=False), choices) def test_blank_in_choices(self): choices = [('', '<><>'), ('a', 'A')] f = models.CharField(choices=choices) self.assertEqual(f.get_choices(include_blank=True), choices) def test_blank_in_grouped_choices(self): choices = [ ('f', 'Foo'), ('b', 'Bar'), ('Group', ( ('', 'No Preference'), ('fg', 'Foo'), ('bg', 'Bar'), )), ] f = models.CharField(choices=choices) self.assertEqual(f.get_choices(include_blank=True), choices) def test_lazy_strings_not_evaluated(self): lazy_func = lazy(lambda x: 0 / 0, int) # raises ZeroDivisionError if evaluated. f = models.CharField(choices=[(lazy_func('group'), (('a', 'A'), ('b', 'B')))]) self.assertEqual(f.get_choices(include_blank=True)[0], ('', '---------')) class GetChoicesOrderingTests(TestCase): @classmethod def setUpTestData(cls): cls.foo1 = Foo.objects.create(a='a', d='12.35') cls.foo2 = Foo.objects.create(a='b', d='12.34') cls.bar1 = Bar.objects.create(a=cls.foo1, b='b') cls.bar2 = Bar.objects.create(a=cls.foo2, b='a') cls.field = Bar._meta.get_field('a') def assertChoicesEqual(self, choices, objs): self.assertEqual(choices, [(obj.pk, str(obj)) for obj in objs]) def test_get_choices(self): self.assertChoicesEqual( self.field.get_choices(include_blank=False, ordering=('a',)), [self.foo1, self.foo2] ) self.assertChoicesEqual( self.field.get_choices(include_blank=False, ordering=('-a',)), [self.foo2, self.foo1] ) def test_get_choices_default_ordering(self): self.addCleanup(setattr, Foo._meta, 'ordering', Foo._meta.ordering) Foo._meta.ordering = ('d',) self.assertChoicesEqual( self.field.get_choices(include_blank=False), [self.foo2, self.foo1] ) def test_get_choices_reverse_related_field(self): self.assertChoicesEqual( self.field.remote_field.get_choices(include_blank=False, ordering=('a',)), [self.bar1, self.bar2] ) self.assertChoicesEqual( self.field.remote_field.get_choices(include_blank=False, ordering=('-a',)), [self.bar2, self.bar1] ) def test_get_choices_reverse_related_field_default_ordering(self): self.addCleanup(setattr, Bar._meta, 'ordering', Bar._meta.ordering) Bar._meta.ordering = ('b',) self.assertChoicesEqual( self.field.remote_field.get_choices(include_blank=False), [self.bar2, self.bar1] ) class GetChoicesLimitChoicesToTests(TestCase): @classmethod def setUpTestData(cls): cls.foo1 = Foo.objects.create(a='a', d='12.34') cls.foo2 = Foo.objects.create(a='b', d='12.34') cls.bar1 = Bar.objects.create(a=cls.foo1, b='b') cls.bar2 = Bar.objects.create(a=cls.foo2, b='a') cls.field = Bar._meta.get_field('a') def assertChoicesEqual(self, choices, objs): self.assertEqual(choices, [(obj.pk, str(obj)) for obj in objs]) def test_get_choices(self): self.assertChoicesEqual( self.field.get_choices(include_blank=False, limit_choices_to={'a': 'a'}), [self.foo1], ) self.assertChoicesEqual( self.field.get_choices(include_blank=False, limit_choices_to={}), [self.foo1, self.foo2], ) def test_get_choices_reverse_related_field(self): field = self.field.remote_field self.assertChoicesEqual( field.get_choices(include_blank=False, limit_choices_to={'b': 'b'}), [self.bar1], ) self.assertChoicesEqual( field.get_choices(include_blank=False, limit_choices_to={}), [self.bar1, self.bar2], )
a456426b1b007677ec15fd814add9e89f6666d0fef419cabd5d836bcc85d5670
import operator import uuid from unittest import mock, skipIf from django import forms from django.core import serializers from django.core.exceptions import ValidationError from django.core.serializers.json import DjangoJSONEncoder from django.db import ( DataError, IntegrityError, NotSupportedError, OperationalError, connection, models, ) from django.db.models import ( Count, ExpressionWrapper, F, IntegerField, OuterRef, Q, Subquery, Transform, Value, ) from django.db.models.expressions import RawSQL from django.db.models.fields.json import ( KeyTextTransform, KeyTransform, KeyTransformFactory, KeyTransformTextLookupMixin, ) from django.db.models.functions import Cast from django.test import ( SimpleTestCase, TestCase, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import CaptureQueriesContext from .models import CustomJSONDecoder, JSONModel, NullableJSONModel @skipUnlessDBFeature('supports_json_field') class JSONFieldTests(TestCase): def test_invalid_value(self): msg = 'is not JSON serializable' with self.assertRaisesMessage(TypeError, msg): NullableJSONModel.objects.create(value={ 'uuid': uuid.UUID('d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475'), }) def test_custom_encoder_decoder(self): value = {'uuid': uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}')} obj = NullableJSONModel(value_custom=value) obj.clean_fields() obj.save() obj.refresh_from_db() self.assertEqual(obj.value_custom, value) def test_db_check_constraints(self): value = '{@!invalid json value 123 $!@#' with mock.patch.object(DjangoJSONEncoder, 'encode', return_value=value): with self.assertRaises((IntegrityError, DataError, OperationalError)): NullableJSONModel.objects.create(value_custom=value) class TestMethods(SimpleTestCase): def test_deconstruct(self): field = models.JSONField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, 'django.db.models.JSONField') self.assertEqual(args, []) self.assertEqual(kwargs, {}) def test_deconstruct_custom_encoder_decoder(self): field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder) name, path, args, kwargs = field.deconstruct() self.assertEqual(kwargs['encoder'], DjangoJSONEncoder) self.assertEqual(kwargs['decoder'], CustomJSONDecoder) def test_get_transforms(self): @models.JSONField.register_lookup class MyTransform(Transform): lookup_name = 'my_transform' field = models.JSONField() transform = field.get_transform('my_transform') self.assertIs(transform, MyTransform) models.JSONField._unregister_lookup(MyTransform) models.JSONField._clear_cached_lookups() transform = field.get_transform('my_transform') self.assertIsInstance(transform, KeyTransformFactory) def test_key_transform_text_lookup_mixin_non_key_transform(self): transform = Transform('test') msg = ( 'Transform should be an instance of KeyTransform in order to use ' 'this lookup.' ) with self.assertRaisesMessage(TypeError, msg): KeyTransformTextLookupMixin(transform) class TestValidation(SimpleTestCase): def test_invalid_encoder(self): msg = 'The encoder parameter must be a callable object.' with self.assertRaisesMessage(ValueError, msg): models.JSONField(encoder=DjangoJSONEncoder()) def test_invalid_decoder(self): msg = 'The decoder parameter must be a callable object.' with self.assertRaisesMessage(ValueError, msg): models.JSONField(decoder=CustomJSONDecoder()) def test_validation_error(self): field = models.JSONField() msg = 'Value must be valid JSON.' value = uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}') with self.assertRaisesMessage(ValidationError, msg): field.clean({'uuid': value}, None) def test_custom_encoder(self): field = models.JSONField(encoder=DjangoJSONEncoder) value = uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}') field.clean({'uuid': value}, None) class TestFormField(SimpleTestCase): def test_formfield(self): model_field = models.JSONField() form_field = model_field.formfield() self.assertIsInstance(form_field, forms.JSONField) def test_formfield_custom_encoder_decoder(self): model_field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder) form_field = model_field.formfield() self.assertIs(form_field.encoder, DjangoJSONEncoder) self.assertIs(form_field.decoder, CustomJSONDecoder) class TestSerialization(SimpleTestCase): test_data = ( '[{"fields": {"value": %s}, ' '"model": "model_fields.jsonmodel", "pk": null}]' ) test_values = ( # (Python value, serialized value), ({'a': 'b', 'c': None}, '{"a": "b", "c": null}'), ('abc', '"abc"'), ('{"a": "a"}', '"{\\"a\\": \\"a\\"}"'), ) def test_dumping(self): for value, serialized in self.test_values: with self.subTest(value=value): instance = JSONModel(value=value) data = serializers.serialize('json', [instance]) self.assertJSONEqual(data, self.test_data % serialized) def test_loading(self): for value, serialized in self.test_values: with self.subTest(value=value): instance = list( serializers.deserialize('json', self.test_data % serialized) )[0].object self.assertEqual(instance.value, value) def test_xml_serialization(self): test_xml_data = ( '<django-objects version="1.0">' '<object model="model_fields.nullablejsonmodel">' '<field name="value" type="JSONField">%s' '</field></object></django-objects>' ) for value, serialized in self.test_values: with self.subTest(value=value): instance = NullableJSONModel(value=value) data = serializers.serialize('xml', [instance], fields=['value']) self.assertXMLEqual(data, test_xml_data % serialized) new_instance = list(serializers.deserialize('xml', data))[0].object self.assertEqual(new_instance.value, instance.value) @skipUnlessDBFeature('supports_json_field') class TestSaveLoad(TestCase): def test_null(self): obj = NullableJSONModel(value=None) obj.save() obj.refresh_from_db() self.assertIsNone(obj.value) @skipUnlessDBFeature('supports_primitives_in_json_field') def test_json_null_different_from_sql_null(self): json_null = NullableJSONModel.objects.create(value=Value('null')) json_null.refresh_from_db() sql_null = NullableJSONModel.objects.create(value=None) sql_null.refresh_from_db() # 'null' is not equal to NULL in the database. self.assertSequenceEqual( NullableJSONModel.objects.filter(value=Value('null')), [json_null], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value=None), [json_null], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__isnull=True), [sql_null], ) # 'null' is equal to NULL in Python (None). self.assertEqual(json_null.value, sql_null.value) @skipUnlessDBFeature('supports_primitives_in_json_field') def test_primitives(self): values = [ True, 1, 1.45, 'String', '', ] for value in values: with self.subTest(value=value): obj = JSONModel(value=value) obj.save() obj.refresh_from_db() self.assertEqual(obj.value, value) def test_dict(self): values = [ {}, {'name': 'John', 'age': 20, 'height': 180.3}, {'a': True, 'b': {'b1': False, 'b2': None}}, ] for value in values: with self.subTest(value=value): obj = JSONModel.objects.create(value=value) obj.refresh_from_db() self.assertEqual(obj.value, value) def test_list(self): values = [ [], ['John', 20, 180.3], [True, [False, None]], ] for value in values: with self.subTest(value=value): obj = JSONModel.objects.create(value=value) obj.refresh_from_db() self.assertEqual(obj.value, value) def test_realistic_object(self): value = { 'name': 'John', 'age': 20, 'pets': [ {'name': 'Kit', 'type': 'cat', 'age': 2}, {'name': 'Max', 'type': 'dog', 'age': 1}, ], 'courses': [ ['A1', 'A2', 'A3'], ['B1', 'B2'], ['C1'], ], } obj = JSONModel.objects.create(value=value) obj.refresh_from_db() self.assertEqual(obj.value, value) @skipUnlessDBFeature('supports_json_field') class TestQuerying(TestCase): @classmethod def setUpTestData(cls): cls.primitives = [True, False, 'yes', 7, 9.6] values = [ None, [], {}, {'a': 'b', 'c': 14}, { 'a': 'b', 'c': 14, 'd': ['e', {'f': 'g'}], 'h': True, 'i': False, 'j': None, 'k': {'l': 'm'}, 'n': [None], }, [1, [2]], {'k': True, 'l': False, 'foo': 'bax'}, { 'foo': 'bar', 'baz': {'a': 'b', 'c': 'd'}, 'bar': ['foo', 'bar'], 'bax': {'foo': 'bar'}, }, ] cls.objs = [ NullableJSONModel.objects.create(value=value) for value in values ] if connection.features.supports_primitives_in_json_field: cls.objs.extend([ NullableJSONModel.objects.create(value=value) for value in cls.primitives ]) cls.raw_sql = '%s::jsonb' if connection.vendor == 'postgresql' else '%s' def test_exact(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__exact={}), [self.objs[2]], ) def test_exact_complex(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__exact={'a': 'b', 'c': 14}), [self.objs[3]], ) def test_isnull(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__isnull=True), [self.objs[0]], ) def test_ordering_by_transform(self): mariadb = connection.vendor == 'mysql' and connection.mysql_is_mariadb values = [ {'ord': 93, 'name': 'bar'}, {'ord': 22.1, 'name': 'foo'}, {'ord': -1, 'name': 'baz'}, {'ord': 21.931902, 'name': 'spam'}, {'ord': -100291029, 'name': 'eggs'}, ] for field_name in ['value', 'value_custom']: with self.subTest(field=field_name): objs = [ NullableJSONModel.objects.create(**{field_name: value}) for value in values ] query = NullableJSONModel.objects.filter( **{'%s__name__isnull' % field_name: False}, ).order_by('%s__ord' % field_name) expected = [objs[4], objs[2], objs[3], objs[1], objs[0]] if mariadb or connection.vendor == 'oracle': # MariaDB and Oracle return JSON values as strings. expected = [objs[2], objs[4], objs[3], objs[1], objs[0]] self.assertSequenceEqual(query, expected) def test_ordering_grouping_by_key_transform(self): base_qs = NullableJSONModel.objects.filter(value__d__0__isnull=False) for qs in ( base_qs.order_by('value__d__0'), base_qs.annotate(key=KeyTransform('0', KeyTransform('d', 'value'))).order_by('key'), ): self.assertSequenceEqual(qs, [self.objs[4]]) qs = NullableJSONModel.objects.filter(value__isnull=False) self.assertQuerysetEqual( qs.filter(value__isnull=False).annotate( key=KeyTextTransform('f', KeyTransform('1', KeyTransform('d', 'value'))), ).values('key').annotate(count=Count('key')).order_by('count'), [(None, 0), ('g', 1)], operator.itemgetter('key', 'count'), ) @skipUnlessDBFeature('allows_group_by_lob') def test_ordering_grouping_by_count(self): qs = NullableJSONModel.objects.filter( value__isnull=False, ).values('value__d__0').annotate(count=Count('value__d__0')).order_by('count') self.assertQuerysetEqual(qs, [1, 11], operator.itemgetter('count')) def test_order_grouping_custom_decoder(self): NullableJSONModel.objects.create(value_custom={'a': 'b'}) qs = NullableJSONModel.objects.filter(value_custom__isnull=False) self.assertSequenceEqual( qs.values( 'value_custom__a', ).annotate( count=Count('id'), ).order_by('value_custom__a'), [{'value_custom__a': 'b', 'count': 1}], ) def test_key_transform_raw_expression(self): expr = RawSQL(self.raw_sql, ['{"x": "bar"}']) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__foo=KeyTransform('x', expr)), [self.objs[7]], ) def test_nested_key_transform_raw_expression(self): expr = RawSQL(self.raw_sql, ['{"x": {"y": "bar"}}']) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__foo=KeyTransform('y', KeyTransform('x', expr))), [self.objs[7]], ) def test_key_transform_expression(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate( key=KeyTransform('d', 'value'), chain=KeyTransform('0', 'key'), expr=KeyTransform('0', Cast('key', models.JSONField())), ).filter(chain=F('expr')), [self.objs[4]], ) def test_nested_key_transform_expression(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate( key=KeyTransform('d', 'value'), chain=KeyTransform('f', KeyTransform('1', 'key')), expr=KeyTransform('f', KeyTransform('1', Cast('key', models.JSONField()))), ).filter(chain=F('expr')), [self.objs[4]], ) def test_expression_wrapper_key_transform(self): self.assertSequenceEqual( NullableJSONModel.objects.annotate( expr=ExpressionWrapper( KeyTransform('c', 'value'), output_field=IntegerField(), ), ).filter(expr__isnull=False), self.objs[3:5], ) def test_has_key(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__has_key='a'), [self.objs[3], self.objs[4]], ) def test_has_key_null_value(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__has_key='j'), [self.objs[4]], ) def test_has_key_deep(self): tests = [ (Q(value__baz__has_key='a'), self.objs[7]), (Q(value__has_key=KeyTransform('a', KeyTransform('baz', 'value'))), self.objs[7]), (Q(value__has_key=KeyTransform('c', KeyTransform('baz', 'value'))), self.objs[7]), (Q(value__d__1__has_key='f'), self.objs[4]), ( Q(value__has_key=KeyTransform('f', KeyTransform('1', KeyTransform('d', 'value')))), self.objs[4], ) ] for condition, expected in tests: with self.subTest(condition=condition): self.assertSequenceEqual( NullableJSONModel.objects.filter(condition), [expected], ) def test_has_key_list(self): obj = NullableJSONModel.objects.create(value=[{'a': 1}, {'b': 'x'}]) tests = [ Q(value__1__has_key='b'), Q(value__has_key=KeyTransform('b', KeyTransform(1, 'value'))), Q(value__has_key=KeyTransform('b', KeyTransform('1', 'value'))), ] for condition in tests: with self.subTest(condition=condition): self.assertSequenceEqual( NullableJSONModel.objects.filter(condition), [obj], ) def test_has_keys(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__has_keys=['a', 'c', 'h']), [self.objs[4]], ) def test_has_any_keys(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__has_any_keys=['c', 'l']), [self.objs[3], self.objs[4], self.objs[6]], ) @skipUnlessDBFeature('supports_json_field_contains') def test_contains(self): tests = [ ({}, self.objs[2:5] + self.objs[6:8]), ({'baz': {'a': 'b', 'c': 'd'}}, [self.objs[7]]), ({'baz': {'a': 'b'}}, [self.objs[7]]), ({'baz': {'c': 'd'}}, [self.objs[7]]), ({'k': True, 'l': False}, [self.objs[6]]), ({'d': ['e', {'f': 'g'}]}, [self.objs[4]]), ({'d': ['e']}, [self.objs[4]]), ({'d': [{'f': 'g'}]}, [self.objs[4]]), ([1, [2]], [self.objs[5]]), ([1], [self.objs[5]]), ([[2]], [self.objs[5]]), ({'n': [None]}, [self.objs[4]]), ({'j': None}, [self.objs[4]]), ] for value, expected in tests: with self.subTest(value=value): qs = NullableJSONModel.objects.filter(value__contains=value) self.assertSequenceEqual(qs, expected) @skipIfDBFeature('supports_json_field_contains') def test_contains_unsupported(self): msg = 'contains lookup is not supported on this database backend.' with self.assertRaisesMessage(NotSupportedError, msg): NullableJSONModel.objects.filter( value__contains={'baz': {'a': 'b', 'c': 'd'}}, ).get() @skipUnlessDBFeature( 'supports_primitives_in_json_field', 'supports_json_field_contains', ) def test_contains_primitives(self): for value in self.primitives: with self.subTest(value=value): qs = NullableJSONModel.objects.filter(value__contains=value) self.assertIs(qs.exists(), True) @skipUnlessDBFeature('supports_json_field_contains') def test_contained_by(self): qs = NullableJSONModel.objects.filter(value__contained_by={'a': 'b', 'c': 14, 'h': True}) self.assertSequenceEqual(qs, self.objs[2:4]) @skipIfDBFeature('supports_json_field_contains') def test_contained_by_unsupported(self): msg = 'contained_by lookup is not supported on this database backend.' with self.assertRaisesMessage(NotSupportedError, msg): NullableJSONModel.objects.filter(value__contained_by={'a': 'b'}).get() def test_deep_values(self): qs = NullableJSONModel.objects.values_list('value__k__l') expected_objs = [(None,)] * len(self.objs) expected_objs[4] = ('m',) self.assertSequenceEqual(qs, expected_objs) @skipUnlessDBFeature('can_distinct_on_fields') def test_deep_distinct(self): query = NullableJSONModel.objects.distinct('value__k__l').values_list('value__k__l') self.assertSequenceEqual(query, [('m',), (None,)]) def test_isnull_key(self): # key__isnull=False works the same as has_key='key'. self.assertSequenceEqual( NullableJSONModel.objects.filter(value__a__isnull=True), self.objs[:3] + self.objs[5:], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__a__isnull=False), [self.objs[3], self.objs[4]], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__j__isnull=False), [self.objs[4]], ) def test_isnull_key_or_none(self): obj = NullableJSONModel.objects.create(value={'a': None}) self.assertSequenceEqual( NullableJSONModel.objects.filter(Q(value__a__isnull=True) | Q(value__a=None)), self.objs[:3] + self.objs[5:] + [obj], ) def test_none_key(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__j=None), [self.objs[4]], ) def test_none_key_exclude(self): obj = NullableJSONModel.objects.create(value={'j': 1}) if connection.vendor == 'oracle': # Oracle supports filtering JSON objects with NULL keys, but the # current implementation doesn't support it. self.assertSequenceEqual( NullableJSONModel.objects.exclude(value__j=None), self.objs[1:4] + self.objs[5:] + [obj], ) else: self.assertSequenceEqual(NullableJSONModel.objects.exclude(value__j=None), [obj]) def test_shallow_list_lookup(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__0=1), [self.objs[5]], ) def test_shallow_obj_lookup(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__a='b'), [self.objs[3], self.objs[4]], ) def test_obj_subquery_lookup(self): qs = NullableJSONModel.objects.annotate( field=Subquery(NullableJSONModel.objects.filter(pk=OuterRef('pk')).values('value')), ).filter(field__a='b') self.assertSequenceEqual(qs, [self.objs[3], self.objs[4]]) def test_deep_lookup_objs(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__k__l='m'), [self.objs[4]], ) def test_shallow_lookup_obj_target(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__k={'l': 'm'}), [self.objs[4]], ) def test_deep_lookup_array(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__1__0=2), [self.objs[5]], ) def test_deep_lookup_mixed(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__d__1__f='g'), [self.objs[4]], ) def test_deep_lookup_transform(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__c__gt=2), [self.objs[3], self.objs[4]], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__c__gt=2.33), [self.objs[3], self.objs[4]], ) self.assertIs(NullableJSONModel.objects.filter(value__c__lt=5).exists(), False) def test_lookup_exclude(self): tests = [ (Q(value__a='b'), [self.objs[0]]), (Q(value__foo='bax'), [self.objs[0], self.objs[7]]), ] for condition, expected in tests: self.assertSequenceEqual( NullableJSONModel.objects.exclude(condition), expected, ) self.assertSequenceEqual( NullableJSONModel.objects.filter(~condition), expected, ) def test_lookup_exclude_nonexistent_key(self): # Values without the key are ignored. condition = Q(value__foo='bax') objs_with_value = [self.objs[6]] objs_with_different_value = [self.objs[0], self.objs[7]] self.assertSequenceEqual( NullableJSONModel.objects.exclude(condition), objs_with_different_value, ) self.assertSequenceEqual( NullableJSONModel.objects.exclude(~condition), objs_with_value, ) self.assertCountEqual( NullableJSONModel.objects.filter(condition | ~condition), objs_with_value + objs_with_different_value, ) self.assertCountEqual( NullableJSONModel.objects.exclude(condition & ~condition), objs_with_value + objs_with_different_value, ) # Add the __isnull lookup to get an exhaustive set. self.assertSequenceEqual( NullableJSONModel.objects.exclude(condition & Q(value__foo__isnull=False)), self.objs[0:6] + self.objs[7:], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(condition & Q(value__foo__isnull=False)), objs_with_value, ) @skipIf( connection.vendor == 'oracle', 'Raises ORA-00600: internal error code on Oracle 18.', ) def test_usage_in_subquery(self): self.assertSequenceEqual( NullableJSONModel.objects.filter( id__in=NullableJSONModel.objects.filter(value__c=14), ), self.objs[3:5], ) @skipUnlessDBFeature('supports_json_field_contains') def test_array_key_contains(self): tests = [ ([], [self.objs[7]]), ('bar', [self.objs[7]]), (['bar'], [self.objs[7]]), ('ar', []), ] for value, expected in tests: with self.subTest(value=value): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__bar__contains=value), expected, ) def test_key_iexact(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__iexact='BaR').exists(), True) self.assertIs(NullableJSONModel.objects.filter(value__foo__iexact='"BaR"').exists(), False) def test_key_in(self): tests = [ ('value__c__in', [14], self.objs[3:5]), ('value__c__in', [14, 15], self.objs[3:5]), ('value__0__in', [1], [self.objs[5]]), ('value__0__in', [1, 3], [self.objs[5]]), ('value__foo__in', ['bar'], [self.objs[7]]), ( 'value__foo__in', [KeyTransform('foo', KeyTransform('bax', 'value'))], [self.objs[7]], ), ( 'value__foo__in', [KeyTransform('foo', KeyTransform('bax', 'value')), 'baz'], [self.objs[7]], ), ('value__foo__in', ['bar', 'baz'], [self.objs[7]]), ('value__bar__in', [['foo', 'bar']], [self.objs[7]]), ('value__bar__in', [['foo', 'bar'], ['a']], [self.objs[7]]), ('value__bax__in', [{'foo': 'bar'}, {'a': 'b'}], [self.objs[7]]), ] for lookup, value, expected in tests: with self.subTest(lookup=lookup, value=value): self.assertSequenceEqual( NullableJSONModel.objects.filter(**{lookup: value}), expected, ) @skipUnlessDBFeature('supports_json_field_contains') def test_key_contains(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__contains='ar').exists(), False) self.assertIs(NullableJSONModel.objects.filter(value__foo__contains='bar').exists(), True) def test_key_icontains(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__icontains='Ar').exists(), True) def test_key_startswith(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__startswith='b').exists(), True) def test_key_istartswith(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__istartswith='B').exists(), True) def test_key_endswith(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__endswith='r').exists(), True) def test_key_iendswith(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__iendswith='R').exists(), True) def test_key_regex(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__regex=r'^bar$').exists(), True) def test_key_iregex(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__iregex=r'^bAr$').exists(), True) @skipUnlessDBFeature('has_json_operators') def test_key_sql_injection(self): with CaptureQueriesContext(connection) as queries: self.assertIs( NullableJSONModel.objects.filter(**{ """value__test' = '"a"') OR 1 = 1 OR ('d""": 'x', }).exists(), False, ) self.assertIn( """."value" -> 'test'' = ''"a"'') OR 1 = 1 OR (''d') = '"x"' """, queries[0]['sql'], ) @skipIfDBFeature('has_json_operators') def test_key_sql_injection_escape(self): query = str(JSONModel.objects.filter(**{ """value__test") = '"a"' OR 1 = 1 OR ("d""": 'x', }).query) self.assertIn('"test\\"', query) self.assertIn('\\"d', query) def test_key_escape(self): obj = NullableJSONModel.objects.create(value={'%total': 10}) self.assertEqual(NullableJSONModel.objects.filter(**{'value__%total': 10}).get(), obj) def test_none_key_and_exact_lookup(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__a='b', value__j=None), [self.objs[4]], ) def test_lookups_with_key_transform(self): tests = ( ('value__baz__has_key', 'c'), ('value__baz__has_keys', ['a', 'c']), ('value__baz__has_any_keys', ['a', 'x']), ('value__has_key', KeyTextTransform('foo', 'value')), ) for lookup, value in tests: with self.subTest(lookup=lookup): self.assertIs(NullableJSONModel.objects.filter( **{lookup: value}, ).exists(), True) @skipUnlessDBFeature('supports_json_field_contains') def test_contains_contained_by_with_key_transform(self): tests = [ ('value__d__contains', 'e'), ('value__d__contains', [{'f': 'g'}]), ('value__contains', KeyTransform('bax', 'value')), ('value__baz__contains', {'a': 'b'}), ('value__baz__contained_by', {'a': 'b', 'c': 'd', 'e': 'f'}), ( 'value__contained_by', KeyTransform('x', RawSQL( self.raw_sql, ['{"x": {"a": "b", "c": 1, "d": "e"}}'], )), ), ] # For databases where {'f': 'g'} (without surrounding []) matches # [{'f': 'g'}]. if not connection.features.json_key_contains_list_matching_requires_list: tests.append(('value__d__contains', {'f': 'g'})) for lookup, value in tests: with self.subTest(lookup=lookup, value=value): self.assertIs(NullableJSONModel.objects.filter( **{lookup: value}, ).exists(), True)
349aea64b005b774a173115cecf1804e31db68d7b0ec249ba6699bde60b47a5a
from django import forms from django.core.exceptions import ValidationError from django.db import IntegrityError, models, transaction from django.test import SimpleTestCase, TestCase from .models import BooleanModel, FksToBooleans, NullBooleanModel class BooleanFieldTests(TestCase): def _test_get_prep_value(self, f): self.assertIs(f.get_prep_value(True), True) self.assertIs(f.get_prep_value('1'), True) self.assertIs(f.get_prep_value(1), True) self.assertIs(f.get_prep_value(False), False) self.assertIs(f.get_prep_value('0'), False) self.assertIs(f.get_prep_value(0), False) self.assertIsNone(f.get_prep_value(None)) def _test_to_python(self, f): self.assertIs(f.to_python(1), True) self.assertIs(f.to_python(0), False) def test_booleanfield_get_prep_value(self): self._test_get_prep_value(models.BooleanField()) def test_nullbooleanfield_get_prep_value(self): self._test_get_prep_value(models.BooleanField(null=True)) def test_nullbooleanfield_old_get_prep_value(self): self._test_get_prep_value(models.NullBooleanField()) def test_booleanfield_to_python(self): self._test_to_python(models.BooleanField()) def test_nullbooleanfield_to_python(self): self._test_to_python(models.BooleanField(null=True)) def test_nullbooleanfield_old_to_python(self): self._test_to_python(models.NullBooleanField()) def test_booleanfield_choices_blank(self): """ BooleanField with choices and defaults doesn't generate a formfield with the blank option (#9640, #10549). """ choices = [(1, 'Si'), (2, 'No')] f = models.BooleanField(choices=choices, default=1, null=False) self.assertEqual(f.formfield().choices, choices) def test_booleanfield_choices_blank_desired(self): """ BooleanField with choices and no default should generated a formfield with the blank option. """ choices = [(1, 'Si'), (2, 'No')] f = models.BooleanField(choices=choices) self.assertEqual(f.formfield().choices, [('', '---------')] + choices) def test_nullbooleanfield_formfield(self): f = models.BooleanField(null=True) self.assertIsInstance(f.formfield(), forms.NullBooleanField) f = models.NullBooleanField() self.assertIsInstance(f.formfield(), forms.NullBooleanField) def test_return_type(self): b = BooleanModel.objects.create(bfield=True) b.refresh_from_db() self.assertIs(b.bfield, True) b2 = BooleanModel.objects.create(bfield=False) b2.refresh_from_db() self.assertIs(b2.bfield, False) b3 = NullBooleanModel.objects.create(nbfield=True, nbfield_old=True) b3.refresh_from_db() self.assertIs(b3.nbfield, True) self.assertIs(b3.nbfield_old, True) b4 = NullBooleanModel.objects.create(nbfield=False, nbfield_old=False) b4.refresh_from_db() self.assertIs(b4.nbfield, False) self.assertIs(b4.nbfield_old, False) # When an extra clause exists, the boolean conversions are applied with # an offset (#13293). b5 = BooleanModel.objects.all().extra(select={'string_col': 'string'})[0] self.assertNotIsInstance(b5.pk, bool) def test_select_related(self): """ Boolean fields retrieved via select_related() should return booleans. """ bmt = BooleanModel.objects.create(bfield=True) bmf = BooleanModel.objects.create(bfield=False) nbmt = NullBooleanModel.objects.create(nbfield=True, nbfield_old=True) nbmf = NullBooleanModel.objects.create(nbfield=False, nbfield_old=False) m1 = FksToBooleans.objects.create(bf=bmt, nbf=nbmt) m2 = FksToBooleans.objects.create(bf=bmf, nbf=nbmf) # select_related('fk_field_name') ma = FksToBooleans.objects.select_related('bf').get(pk=m1.id) self.assertIs(ma.bf.bfield, True) self.assertIs(ma.nbf.nbfield, True) # select_related() mb = FksToBooleans.objects.select_related().get(pk=m1.id) mc = FksToBooleans.objects.select_related().get(pk=m2.id) self.assertIs(mb.bf.bfield, True) self.assertIs(mb.nbf.nbfield, True) self.assertIs(mb.nbf.nbfield_old, True) self.assertIs(mc.bf.bfield, False) self.assertIs(mc.nbf.nbfield, False) self.assertIs(mc.nbf.nbfield_old, False) def test_null_default(self): """ A BooleanField defaults to None, which isn't a valid value (#15124). """ boolean_field = BooleanModel._meta.get_field('bfield') self.assertFalse(boolean_field.has_default()) b = BooleanModel() self.assertIsNone(b.bfield) with transaction.atomic(): with self.assertRaises(IntegrityError): b.save() nb = NullBooleanModel() self.assertIsNone(nb.nbfield) self.assertIsNone(nb.nbfield_old) nb.save() # no error class ValidationTest(SimpleTestCase): def test_boolean_field_doesnt_accept_empty_input(self): f = models.BooleanField() with self.assertRaises(ValidationError): f.clean(None, None) def test_nullbooleanfield_blank(self): """ NullBooleanField shouldn't throw a validation error when given a value of None. """ nullboolean = NullBooleanModel(nbfield=None, nbfield_old=None) nullboolean.full_clean()
b1193669f8aa88667bdd0a53079ba580c8ecebe5471924cae3092ced5ab6c95d
from unittest import skipIf from django.core.exceptions import ValidationError from django.db import connection, models from django.test import SimpleTestCase, TestCase from .models import Post class TestCharField(TestCase): def test_max_length_passed_to_formfield(self): """ CharField passes its max_length attribute to form fields created using the formfield() method. """ cf1 = models.CharField() cf2 = models.CharField(max_length=1234) self.assertIsNone(cf1.formfield().max_length) self.assertEqual(1234, cf2.formfield().max_length) def test_lookup_integer_in_charfield(self): self.assertEqual(Post.objects.filter(title=9).count(), 0) @skipIf(connection.vendor == 'mysql', 'Running on MySQL requires utf8mb4 encoding (#18392)') def test_emoji(self): p = Post.objects.create(title='Smile 😀', body='Whatever.') p.refresh_from_db() self.assertEqual(p.title, 'Smile 😀') def test_assignment_from_choice_enum(self): class Event(models.TextChoices): C = 'Carnival!' F = 'Festival!' p1 = Post.objects.create(title=Event.C, body=Event.F) p1.refresh_from_db() self.assertEqual(p1.title, 'Carnival!') self.assertEqual(p1.body, 'Festival!') self.assertEqual(p1.title, Event.C) self.assertEqual(p1.body, Event.F) p2 = Post.objects.get(title='Carnival!') self.assertEqual(p1, p2) self.assertEqual(p2.title, Event.C) class TestMethods(SimpleTestCase): def test_deconstruct(self): field = models.CharField() *_, kwargs = field.deconstruct() self.assertEqual(kwargs, {}) field = models.CharField(db_collation='utf8_esperanto_ci') *_, kwargs = field.deconstruct() self.assertEqual(kwargs, {'db_collation': 'utf8_esperanto_ci'}) class ValidationTests(SimpleTestCase): class Choices(models.TextChoices): C = 'c', 'C' def test_charfield_raises_error_on_empty_string(self): f = models.CharField() msg = 'This field cannot be blank.' with self.assertRaisesMessage(ValidationError, msg): f.clean('', None) def test_charfield_cleans_empty_string_when_blank_true(self): f = models.CharField(blank=True) self.assertEqual('', f.clean('', None)) def test_charfield_with_choices_cleans_valid_choice(self): f = models.CharField(max_length=1, choices=[('a', 'A'), ('b', 'B')]) self.assertEqual('a', f.clean('a', None)) def test_charfield_with_choices_raises_error_on_invalid_choice(self): f = models.CharField(choices=[('a', 'A'), ('b', 'B')]) msg = "Value 'not a' is not a valid choice." with self.assertRaisesMessage(ValidationError, msg): f.clean('not a', None) def test_enum_choices_cleans_valid_string(self): f = models.CharField(choices=self.Choices.choices, max_length=1) self.assertEqual(f.clean('c', None), 'c') def test_enum_choices_invalid_input(self): f = models.CharField(choices=self.Choices.choices, max_length=1) msg = "Value 'a' is not a valid choice." with self.assertRaisesMessage(ValidationError, msg): f.clean('a', None) def test_charfield_raises_error_on_empty_input(self): f = models.CharField(null=False) msg = 'This field cannot be null.' with self.assertRaisesMessage(ValidationError, msg): f.clean(None, None)
8bafd891ded5dba47d3daf3b2269be88930e1d484ddea071dd4393dc217ab86d
import datetime import pickle import unittest import uuid from collections import namedtuple from copy import deepcopy from decimal import Decimal from unittest import mock from django.core.exceptions import FieldError from django.db import DatabaseError, NotSupportedError, connection from django.db.models import ( AutoField, Avg, BinaryField, BooleanField, Case, CharField, Count, DateField, DateTimeField, DecimalField, DurationField, Exists, Expression, ExpressionList, ExpressionWrapper, F, FloatField, Func, IntegerField, Max, Min, Model, OrderBy, OuterRef, Q, StdDev, Subquery, Sum, TimeField, UUIDField, Value, Variance, When, ) from django.db.models.expressions import ( Col, Combinable, CombinedExpression, RawSQL, Ref, ) from django.db.models.functions import ( Coalesce, Concat, Left, Length, Lower, Substr, Upper, ) from django.db.models.sql import constants from django.db.models.sql.datastructures import Join from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import Approximate, CaptureQueriesContext, isolate_apps from django.utils.functional import SimpleLazyObject from .models import ( UUID, UUIDPK, Company, Employee, Experiment, Manager, Number, RemoteEmployee, Result, SimulationRun, Time, ) class BasicExpressionsTests(TestCase): @classmethod def setUpTestData(cls): cls.example_inc = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10) ) cls.foobar_ltd = Company.objects.create( name="Foobar Ltd.", num_employees=3, num_chairs=4, based_in_eu=True, ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20) ) cls.max = Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30) cls.gmbh = Company.objects.create(name='Test GmbH', num_employees=32, num_chairs=1, ceo=cls.max) def setUp(self): self.company_query = Company.objects.values( "name", "num_employees", "num_chairs" ).order_by( "name", "num_employees", "num_chairs" ) def test_annotate_values_aggregate(self): companies = Company.objects.annotate( salaries=F('ceo__salary'), ).values('num_employees', 'salaries').aggregate( result=Sum( F('salaries') + F('num_employees'), output_field=IntegerField() ), ) self.assertEqual(companies['result'], 2395) def test_annotate_values_filter(self): companies = Company.objects.annotate( foo=RawSQL('%s', ['value']), ).filter(foo='value').order_by('name') self.assertQuerysetEqual( companies, ['<Company: Example Inc.>', '<Company: Foobar Ltd.>', '<Company: Test GmbH>'], ) def test_annotate_values_count(self): companies = Company.objects.annotate(foo=RawSQL('%s', ['value'])) self.assertEqual(companies.count(), 3) @skipUnlessDBFeature('supports_boolean_expr_in_select_clause') def test_filtering_on_annotate_that_uses_q(self): self.assertEqual( Company.objects.annotate( num_employees_check=ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField()) ).filter(num_employees_check=True).count(), 2, ) def test_filtering_on_q_that_is_boolean(self): self.assertEqual( Company.objects.filter( ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField()) ).count(), 2, ) def test_filtering_on_rawsql_that_is_boolean(self): self.assertEqual( Company.objects.filter( RawSQL('num_employees > %s', (3,), output_field=BooleanField()), ).count(), 2, ) def test_filter_inter_attribute(self): # We can filter on attribute relationships on same model obj, e.g. # find companies where the number of employees is greater # than the number of chairs. self.assertSequenceEqual( self.company_query.filter(num_employees__gt=F("num_chairs")), [ { "num_chairs": 5, "name": "Example Inc.", "num_employees": 2300, }, { "num_chairs": 1, "name": "Test GmbH", "num_employees": 32 }, ], ) def test_update(self): # We can set one field to have the value of another field # Make sure we have enough chairs self.company_query.update(num_chairs=F("num_employees")) self.assertSequenceEqual( self.company_query, [ { "num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300 }, { "num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3 }, { "num_chairs": 32, "name": "Test GmbH", "num_employees": 32 } ], ) def test_arithmetic(self): # We can perform arithmetic operations in expressions # Make sure we have 2 spare chairs self.company_query.update(num_chairs=F("num_employees") + 2) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 2302, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 5, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 34, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_order_of_operations(self): # Law of order of operations is followed self.company_query.update(num_chairs=F('num_employees') + 2 * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 6900, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 9, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 96, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_parenthesis_priority(self): # Law of order of operations can be overridden by parentheses self.company_query.update(num_chairs=(F('num_employees') + 2) * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 5294600, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 15, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 1088, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_update_with_fk(self): # ForeignKey can become updated with the value of another ForeignKey. self.assertEqual(Company.objects.update(point_of_contact=F('ceo')), 3) self.assertQuerysetEqual( Company.objects.all(), ['Joe Smith', 'Frank Meyer', 'Max Mustermann'], lambda c: str(c.point_of_contact), ordered=False ) def test_update_with_none(self): Number.objects.create(integer=1, float=1.0) Number.objects.create(integer=2) Number.objects.filter(float__isnull=False).update(float=Value(None)) self.assertQuerysetEqual( Number.objects.all(), [None, None], lambda n: n.float, ordered=False ) def test_filter_with_join(self): # F Expressions can also span joins Company.objects.update(point_of_contact=F('ceo')) c = Company.objects.first() c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum") c.save() self.assertQuerysetEqual( Company.objects.filter(ceo__firstname=F('point_of_contact__firstname')), ['Foobar Ltd.', 'Test GmbH'], lambda c: c.name, ordered=False ) Company.objects.exclude( ceo__firstname=F("point_of_contact__firstname") ).update(name="foo") self.assertEqual( Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).get().name, "foo", ) msg = "Joined field references are not permitted in this query" with self.assertRaisesMessage(FieldError, msg): Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).update(name=F('point_of_contact__lastname')) def test_object_update(self): # F expressions can be used to update attributes on single objects self.gmbh.num_employees = F('num_employees') + 4 self.gmbh.save() self.gmbh.refresh_from_db() self.assertEqual(self.gmbh.num_employees, 36) def test_new_object_save(self): # We should be able to use Funcs when inserting new data test_co = Company(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.save() test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_new_object_create(self): test_co = Company.objects.create(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_object_create_with_aggregate(self): # Aggregates are not allowed when inserting new data msg = 'Aggregate functions are not allowed in this query (num_employees=Max(Value(1))).' with self.assertRaisesMessage(FieldError, msg): Company.objects.create( name='Company', num_employees=Max(Value(1)), num_chairs=1, ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30), ) def test_object_update_fk(self): # F expressions cannot be used to update attributes which are foreign # keys, or attributes which involve joins. test_gmbh = Company.objects.get(pk=self.gmbh.pk) msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.' with self.assertRaisesMessage(ValueError, msg): test_gmbh.point_of_contact = F('ceo') test_gmbh.point_of_contact = self.gmbh.ceo test_gmbh.save() test_gmbh.name = F('ceo__lastname') msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): test_gmbh.save() def test_update_inherited_field_value(self): msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): RemoteEmployee.objects.update(adjusted_salary=F('salary') * 5) def test_object_update_unsaved_objects(self): # F expressions cannot be used to update attributes on objects which do # not yet exist in the database acme = Company(name='The Acme Widget Co.', num_employees=12, num_chairs=5, ceo=self.max) acme.num_employees = F("num_employees") + 16 msg = ( 'Failed to insert expression "Col(expressions_company, ' 'expressions.Company.num_employees) + Value(16)" on ' 'expressions.Company.num_employees. F() expressions can only be ' 'used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() acme.num_employees = 12 acme.name = Lower(F('name')) msg = ( 'Failed to insert expression "Lower(Col(expressions_company, ' 'expressions.Company.name))" on expressions.Company.name. F() ' 'expressions can only be used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() def test_ticket_11722_iexact_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") Employee.objects.create(firstname="Test", lastname="test") queryset = Employee.objects.filter(firstname__iexact=F('lastname')) self.assertQuerysetEqual(queryset, ["<Employee: Test test>"]) def test_ticket_16731_startswith_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") e2 = Employee.objects.create(firstname="Jack", lastname="Jackson") e3 = Employee.objects.create(firstname="Jack", lastname="jackson") self.assertSequenceEqual( Employee.objects.filter(lastname__startswith=F('firstname')), [e2, e3] if connection.features.has_case_insensitive_like else [e2] ) qs = Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk') self.assertSequenceEqual(qs, [e2, e3]) def test_ticket_18375_join_reuse(self): # Reverse multijoin F() references and the lookup target the same join. # Pre #18375 the F() join was generated first and the lookup couldn't # reuse that join. qs = Employee.objects.filter(company_ceo_set__num_chairs=F('company_ceo_set__num_employees')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering(self): # The next query was dict-randomization dependent - if the "gte=1" # was seen first, then the F() will reuse the join generated by the # gte lookup, if F() was seen first, then it generated a join the # other lookups could not reuse. qs = Employee.objects.filter( company_ceo_set__num_chairs=F('company_ceo_set__num_employees'), company_ceo_set__num_chairs__gte=1, ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering_2(self): # Another similar case for F() than above. Now we have the same join # in two filter kwargs, one in the lhs lookup, one in F. Here pre # #18375 the amount of joins generated was random if dict # randomization was enabled, that is the generated query dependent # on which clause was seen first. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk'), pk=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_chained_filters(self): # F() expressions do not reuse joins from previous filter. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk') ).filter( company_ceo_set__num_employees=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 2) def test_order_by_exists(self): mary = Employee.objects.create(firstname='Mary', lastname='Mustermann', salary=20) mustermanns_by_seniority = Employee.objects.filter(lastname='Mustermann').order_by( # Order by whether the employee is the CEO of a company Exists(Company.objects.filter(ceo=OuterRef('pk'))).desc() ) self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary]) def test_order_by_multiline_sql(self): raw_order_by = ( RawSQL(''' CASE WHEN num_employees > 1000 THEN num_chairs ELSE 0 END ''', []).desc(), RawSQL(''' CASE WHEN num_chairs > 1 THEN 1 ELSE 0 END ''', []).asc() ) for qs in ( Company.objects.all(), Company.objects.distinct(), ): with self.subTest(qs=qs): self.assertSequenceEqual( qs.order_by(*raw_order_by), [self.example_inc, self.gmbh, self.foobar_ltd], ) def test_outerref(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) msg = ( 'This queryset contains a reference to an outer query and may only ' 'be used in a subquery.' ) with self.assertRaisesMessage(ValueError, msg): inner.exists() outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) self.assertIs(outer.exists(), True) def test_exist_single_field_output_field(self): queryset = Company.objects.values('pk') self.assertIsInstance(Exists(queryset).output_field, BooleanField) def test_subquery(self): Company.objects.filter(name='Example Inc.').update( point_of_contact=Employee.objects.get(firstname='Joe', lastname='Smith'), ceo=self.max, ) Employee.objects.create(firstname='Bob', lastname='Brown', salary=40) qs = Employee.objects.annotate( is_point_of_contact=Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_not_point_of_contact=~Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_ceo_of_small_company=Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), is_ceo_small_2=~~Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), largest_company=Subquery(Company.objects.order_by('-num_employees').filter( Q(ceo=OuterRef('pk')) | Q(point_of_contact=OuterRef('pk')) ).values('name')[:1], output_field=CharField()) ).values( 'firstname', 'is_point_of_contact', 'is_not_point_of_contact', 'is_ceo_of_small_company', 'is_ceo_small_2', 'largest_company', ).order_by('firstname') results = list(qs) # Could use Coalesce(subq, Value('')) instead except for the bug in # cx_Oracle mentioned in #23843. bob = results[0] if bob['largest_company'] == '' and connection.features.interprets_empty_strings_as_nulls: bob['largest_company'] = None self.assertEqual(results, [ { 'firstname': 'Bob', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': None, }, { 'firstname': 'Frank', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Foobar Ltd.', }, { 'firstname': 'Joe', 'is_point_of_contact': True, 'is_not_point_of_contact': False, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': 'Example Inc.', }, { 'firstname': 'Max', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Example Inc.' } ]) # A less elegant way to write the same query: this uses a LEFT OUTER # JOIN and an IS NULL, inside a WHERE NOT IN which is probably less # efficient than EXISTS. self.assertCountEqual( qs.filter(is_point_of_contact=True).values('pk'), Employee.objects.exclude(company_point_of_contact_set=None).values('pk') ) def test_subquery_eq(self): qs = Employee.objects.annotate( is_ceo=Exists(Company.objects.filter(ceo=OuterRef('pk'))), is_point_of_contact=Exists( Company.objects.filter(point_of_contact=OuterRef('pk')), ), small_company=Exists( queryset=Company.objects.filter(num_employees__lt=200), ), ).filter(is_ceo=True, is_point_of_contact=False, small_company=True) self.assertNotEqual( qs.query.annotations['is_ceo'], qs.query.annotations['is_point_of_contact'], ) self.assertNotEqual( qs.query.annotations['is_ceo'], qs.query.annotations['small_company'], ) def test_in_subquery(self): # This is a contrived test (and you really wouldn't write this query), # but it is a succinct way to test the __in=Subquery() construct. small_companies = Company.objects.filter(num_employees__lt=200).values('pk') subquery_test = Company.objects.filter(pk__in=Subquery(small_companies)) self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh]) subquery_test2 = Company.objects.filter(pk=Subquery(small_companies.filter(num_employees=3))) self.assertCountEqual(subquery_test2, [self.foobar_ltd]) def test_uuid_pk_subquery(self): u = UUIDPK.objects.create() UUID.objects.create(uuid_fk=u) qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values('uuid_fk__id'))) self.assertCountEqual(qs, [u]) def test_nested_subquery(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) contrived = Employee.objects.annotate( is_point_of_contact=Subquery( outer.filter(pk=OuterRef('pk')).values('is_point_of_contact'), output_field=BooleanField(), ), ) self.assertCountEqual(contrived.values_list(), outer.values_list()) def test_nested_subquery_join_outer_ref(self): inner = Employee.objects.filter(pk=OuterRef('ceo__pk')).values('pk') qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( ceo__in=inner, ceo__pk=OuterRef('pk'), ).values('pk'), ), ) self.assertSequenceEqual( qs.values_list('ceo_company', flat=True), [self.example_inc.pk, self.foobar_ltd.pk, self.gmbh.pk], ) def test_nested_subquery_outer_ref_2(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') third = Time.objects.create(time='21:00') SimulationRun.objects.bulk_create([ SimulationRun(start=first, end=second, midpoint='12:00'), SimulationRun(start=first, end=third, midpoint='15:00'), SimulationRun(start=second, end=first, midpoint='00:00'), ]) inner = Time.objects.filter(time=OuterRef(OuterRef('time')), pk=OuterRef('start')).values('time') middle = SimulationRun.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=TimeField())) # This is a contrived example. It exercises the double OuterRef form. self.assertCountEqual(outer, [first, second, third]) def test_nested_subquery_outer_ref_with_autofield(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') SimulationRun.objects.create(start=first, end=second, midpoint='12:00') inner = SimulationRun.objects.filter(start=OuterRef(OuterRef('pk'))).values('start') middle = Time.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=IntegerField())) # This exercises the double OuterRef form with AutoField as pk. self.assertCountEqual(outer, [first, second]) def test_annotations_within_subquery(self): Company.objects.filter(num_employees__lt=50).update(ceo=Employee.objects.get(firstname='Frank')) inner = Company.objects.filter( ceo=OuterRef('pk') ).values('ceo').annotate(total_employees=Sum('num_employees')).values('total_employees') outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(salary__lte=Subquery(inner)) self.assertSequenceEqual( outer.order_by('-total_employees').values('salary', 'total_employees'), [{'salary': 10, 'total_employees': 2300}, {'salary': 20, 'total_employees': 35}], ) def test_subquery_references_joined_table_twice(self): inner = Company.objects.filter( num_chairs__gte=OuterRef('ceo__salary'), num_employees__gte=OuterRef('point_of_contact__salary'), ) # Another contrived example (there is no need to have a subquery here) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_subquery_filter_by_aggregate(self): Number.objects.create(integer=1000, float=1.2) Employee.objects.create(salary=1000) qs = Number.objects.annotate( min_valuable_count=Subquery( Employee.objects.filter( salary=OuterRef('integer'), ).annotate(cnt=Count('salary')).filter(cnt__gt=0).values('cnt')[:1] ), ) self.assertEqual(qs.get().float, 1.2) def test_subquery_filter_by_lazy(self): self.max.manager = Manager.objects.create(name='Manager') self.max.save() max_manager = SimpleLazyObject( lambda: Manager.objects.get(pk=self.max.manager.pk) ) qs = Company.objects.annotate( ceo_manager=Subquery( Employee.objects.filter( lastname=OuterRef('ceo__lastname'), ).values('manager'), ), ).filter(ceo_manager=max_manager) self.assertEqual(qs.get(), self.gmbh) def test_aggregate_subquery_annotation(self): with self.assertNumQueries(1) as ctx: aggregate = Company.objects.annotate( ceo_salary=Subquery( Employee.objects.filter( id=OuterRef('ceo_id'), ).values('salary') ), ).aggregate( ceo_salary_gt_20=Count('pk', filter=Q(ceo_salary__gt=20)), ) self.assertEqual(aggregate, {'ceo_salary_gt_20': 1}) # Aggregation over a subquery annotation doesn't annotate the subquery # twice in the inner query. sql = ctx.captured_queries[0]['sql'] self.assertLessEqual(sql.count('SELECT'), 3) # GROUP BY isn't required to aggregate over a query that doesn't # contain nested aggregates. self.assertNotIn('GROUP BY', sql) def test_explicit_output_field(self): class FuncA(Func): output_field = CharField() class FuncB(Func): pass expr = FuncB(FuncA()) self.assertEqual(expr.output_field, FuncA.output_field) def test_outerref_mixed_case_table_name(self): inner = Result.objects.filter(result_time__gte=OuterRef('experiment__assigned')) outer = Result.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_outerref_with_operator(self): inner = Company.objects.filter(num_employees=OuterRef('ceo__salary') + 2) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertEqual(outer.get().name, 'Test GmbH') def test_nested_outerref_with_function(self): self.gmbh.point_of_contact = Employee.objects.get(lastname='Meyer') self.gmbh.save() inner = Employee.objects.filter( lastname__startswith=Left(OuterRef(OuterRef('lastname')), 1), ) qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( point_of_contact__in=inner, ceo__pk=OuterRef('pk'), ).values('name'), ), ).filter(ceo_company__isnull=False) self.assertEqual(qs.get().ceo_company, 'Test GmbH') def test_annotation_with_outerref(self): gmbh_salary = Company.objects.annotate( max_ceo_salary_raise=Subquery( Company.objects.annotate( salary_raise=OuterRef('num_employees') + F('num_employees'), ).order_by('-salary_raise').values('salary_raise')[:1], output_field=IntegerField(), ), ).get(pk=self.gmbh.pk) self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332) def test_annotation_with_nested_outerref(self): self.gmbh.point_of_contact = Employee.objects.get(lastname='Meyer') self.gmbh.save() inner = Employee.objects.annotate( outer_lastname=OuterRef(OuterRef('lastname')), ).filter(lastname__startswith=Left('outer_lastname', 1)) qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( point_of_contact__in=inner, ceo__pk=OuterRef('pk'), ).values('name'), ), ).filter(ceo_company__isnull=False) self.assertEqual(qs.get().ceo_company, 'Test GmbH') def test_pickle_expression(self): expr = Value(1) expr.convert_value # populate cached property self.assertEqual(pickle.loads(pickle.dumps(expr)), expr) def test_incorrect_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Employee.objects.filter(firstname=F('nope'))) def test_incorrect_joined_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Company.objects.filter(ceo__pk=F('point_of_contact__nope'))) def test_exists_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('pk') qs1 = Employee.objects.filter(Exists(inner)) qs2 = Employee.objects.annotate(found=Exists(inner)).filter(found=True) self.assertCountEqual(qs1, qs2) self.assertFalse(Employee.objects.exclude(Exists(inner)).exists()) self.assertCountEqual(qs2, Employee.objects.exclude(~Exists(inner))) def test_subquery_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('based_in_eu') self.assertSequenceEqual( Employee.objects.filter(Subquery(inner)), [self.foobar_ltd.ceo], ) def test_subquery_group_by_outerref_in_filter(self): inner = Company.objects.annotate( employee=OuterRef('pk'), ).values('employee').annotate( min_num_chairs=Min('num_chairs'), ).values('ceo') self.assertIs(Employee.objects.filter(pk__in=Subquery(inner)).exists(), True) def test_case_in_filter_if_boolean_output_field(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) qs = Employee.objects.filter( Case( When(Exists(is_ceo), then=True), When(Exists(is_poc), then=True), default=False, output_field=BooleanField(), ), ) self.assertCountEqual(qs, [self.example_inc.ceo, self.foobar_ltd.ceo, self.max]) def test_boolean_expression_combined(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) self.gmbh.point_of_contact = self.max self.gmbh.save() self.assertCountEqual( Employee.objects.filter(Exists(is_ceo) | Exists(is_poc)), [self.example_inc.ceo, self.foobar_ltd.ceo, self.max], ) self.assertCountEqual( Employee.objects.filter(Exists(is_ceo) & Exists(is_poc)), [self.max], ) self.assertCountEqual( Employee.objects.filter(Exists(is_ceo) & Q(salary__gte=30)), [self.max], ) self.assertCountEqual( Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)), [self.example_inc.ceo, self.max], ) class IterableLookupInnerExpressionsTests(TestCase): @classmethod def setUpTestData(cls): ceo = Employee.objects.create(firstname='Just', lastname='Doit', salary=30) # MySQL requires that the values calculated for expressions don't pass # outside of the field's range, so it's inconvenient to use the values # in the more general tests. Company.objects.create(name='5020 Ltd', num_employees=50, num_chairs=20, ceo=ceo) Company.objects.create(name='5040 Ltd', num_employees=50, num_chairs=40, ceo=ceo) Company.objects.create(name='5050 Ltd', num_employees=50, num_chairs=50, ceo=ceo) Company.objects.create(name='5060 Ltd', num_employees=50, num_chairs=60, ceo=ceo) cls.c5 = Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo) def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self): # __in lookups can use F() expressions for integers. queryset = Company.objects.filter(num_employees__in=([F('num_chairs') - 10])) self.assertQuerysetEqual(queryset, ['<Company: 5060 Ltd>'], ordered=False) self.assertQuerysetEqual( Company.objects.filter(num_employees__in=([F('num_chairs') - 10, F('num_chairs') + 10])), ['<Company: 5040 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter( num_employees__in=([F('num_chairs') - 10, F('num_chairs'), F('num_chairs') + 10]) ), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) def test_expressions_in_lookups_join_choice(self): midpoint = datetime.time(13, 0) t1 = Time.objects.create(time=datetime.time(12, 0)) t2 = Time.objects.create(time=datetime.time(14, 0)) SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint) SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=None, end=None, midpoint=midpoint) queryset = SimulationRun.objects.filter(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual( queryset, ['<SimulationRun: 13:00:00 (12:00:00 to 14:00:00)>'], ordered=False ) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.INNER) queryset = SimulationRun.objects.exclude(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual(queryset, [], ordered=False) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.LOUTER) def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self): # Range lookups can use F() expressions for integers. Company.objects.filter(num_employees__exact=F("num_chairs")) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs'), 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, F('num_chairs') + 10)), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(1, 100)), [ '<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>', '<Company: 99300 Ltd>', ], ordered=False ) def test_range_lookup_namedtuple(self): EmployeeRange = namedtuple('EmployeeRange', ['minimum', 'maximum']) qs = Company.objects.filter( num_employees__range=EmployeeRange(minimum=51, maximum=100), ) self.assertSequenceEqual(qs, [self.c5]) @unittest.skipUnless(connection.vendor == 'sqlite', "This defensive test only works on databases that don't validate parameter types") def test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion(self): """ This tests that SQL injection isn't possible using compilation of expressions in iterable filters, as their compilation happens before the main query compilation. It's limited to SQLite, as PostgreSQL, Oracle and other vendors have defense in depth against this by type checking. Testing against SQLite (the most permissive of the built-in databases) demonstrates that the problem doesn't exist while keeping the test simple. """ queryset = Company.objects.filter(name__in=[F('num_chairs') + '1)) OR ((1==1']) self.assertQuerysetEqual(queryset, [], ordered=False) def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self): start = datetime.datetime(2016, 2, 3, 15, 0, 0) end = datetime.datetime(2016, 2, 5, 15, 0, 0) experiment_1 = Experiment.objects.create( name='Integrity testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) experiment_2 = Experiment.objects.create( name='Taste testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 2, 4, 15, 0, 0), ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 3, 10, 2, 0, 0), ) Result.objects.create( experiment=experiment_2, result_time=datetime.datetime(2016, 1, 8, 5, 0, 0), ) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) class FTests(SimpleTestCase): def test_deepcopy(self): f = F("foo") g = deepcopy(f) self.assertEqual(f.name, g.name) def test_deconstruct(self): f = F('name') path, args, kwargs = f.deconstruct() self.assertEqual(path, 'django.db.models.expressions.F') self.assertEqual(args, (f.name,)) self.assertEqual(kwargs, {}) def test_equal(self): f = F('name') same_f = F('name') other_f = F('username') self.assertEqual(f, same_f) self.assertNotEqual(f, other_f) def test_hash(self): d = {F('name'): 'Bob'} self.assertIn(F('name'), d) self.assertEqual(d[F('name')], 'Bob') def test_not_equal_Value(self): f = F('name') value = Value('name') self.assertNotEqual(f, value) self.assertNotEqual(value, f) class ExpressionsTests(TestCase): def test_F_reuse(self): f = F('id') n = Number.objects.create(integer=-1) c = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith") ) c_qs = Company.objects.filter(id=f) self.assertEqual(c_qs.get(), c) # Reuse the same F-object for another queryset n_qs = Number.objects.filter(id=f) self.assertEqual(n_qs.get(), n) # The original query still works correctly self.assertEqual(c_qs.get(), c) def test_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a pattern lookup with an expression refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%Joh\\n"), Employee(firstname="Johnny", lastname="%John"), Employee(firstname="Jean-Claude", lastname="Claud_"), Employee(firstname="Jean-Claude", lastname="Claude"), Employee(firstname="Jean-Claude", lastname="Claude%"), Employee(firstname="Johnny", lastname="Joh\\n"), Employee(firstname="Johnny", lastname="John"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__contains=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__startswith=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__endswith=F('lastname')), ["<Employee: Jean-Claude Claude>"], ordered=False, ) def test_insensitive_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a case insensitive pattern lookup with an expression -- refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%joh\\n"), Employee(firstname="Johnny", lastname="%john"), Employee(firstname="Jean-Claude", lastname="claud_"), Employee(firstname="Jean-Claude", lastname="claude"), Employee(firstname="Jean-Claude", lastname="claude%"), Employee(firstname="Johnny", lastname="joh\\n"), Employee(firstname="Johnny", lastname="john"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__icontains=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__istartswith=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__iendswith=F('lastname')), ["<Employee: Jean-Claude claude>"], ordered=False, ) @isolate_apps('expressions') class SimpleExpressionTests(SimpleTestCase): def test_equal(self): self.assertEqual(Expression(), Expression()) self.assertEqual( Expression(IntegerField()), Expression(output_field=IntegerField()) ) self.assertEqual(Expression(IntegerField()), mock.ANY) self.assertNotEqual( Expression(IntegerField()), Expression(CharField()) ) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( Expression(TestModel._meta.get_field('field')), Expression(TestModel._meta.get_field('other_field')), ) def test_hash(self): self.assertEqual(hash(Expression()), hash(Expression())) self.assertEqual( hash(Expression(IntegerField())), hash(Expression(output_field=IntegerField())) ) self.assertNotEqual( hash(Expression(IntegerField())), hash(Expression(CharField())), ) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( hash(Expression(TestModel._meta.get_field('field'))), hash(Expression(TestModel._meta.get_field('other_field'))), ) class ExpressionsNumericTests(TestCase): @classmethod def setUpTestData(cls): Number(integer=-1).save() Number(integer=42).save() Number(integer=1337).save() Number.objects.update(float=F('integer')) def test_fill_with_value_from_same_object(self): """ We can fill a value in all objects with an other value of the same object. """ self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 42, 42.000>', '<Number: 1337, 1337.000>'], ordered=False ) def test_increment_value(self): """ We can increment a value of all objects in a query set. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_filter_not_equals_other_field(self): """ We can filter for objects, where a value is not equals the value of an other field. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.exclude(float=F('integer')), ['<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_complex_expressions(self): """ Complex expressions of different connection types are possible. """ n = Number.objects.create(integer=10, float=123.45) self.assertEqual(Number.objects.filter(pk=n.pk).update( float=F('integer') + F('float') * 2), 1) self.assertEqual(Number.objects.get(pk=n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3)) class ExpressionOperatorTests(TestCase): @classmethod def setUpTestData(cls): cls.n = Number.objects.create(integer=42, float=15.5) cls.n1 = Number.objects.create(integer=-42, float=-15.5) def test_lefthand_addition(self): # LH Addition of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F('integer') + 15, float=F('float') + 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_lefthand_subtraction(self): # LH Subtraction of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15, float=F('float') - 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3)) def test_lefthand_multiplication(self): # Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15, float=F('float') * 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_lefthand_division(self): # LH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2, float=F('float') / 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3)) def test_lefthand_modulo(self): # LH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2) def test_lefthand_bitwise_and(self): # LH Bitwise ands on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56)) Number.objects.filter(pk=self.n1.pk).update(integer=F('integer').bitand(-56)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64) def test_lefthand_bitwise_left_shift_operator(self): Number.objects.update(integer=F('integer').bitleftshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168) def test_lefthand_bitwise_right_shift_operator(self): Number.objects.update(integer=F('integer').bitrightshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11) def test_lefthand_bitwise_or(self): # LH Bitwise or on integers Number.objects.update(integer=F('integer').bitor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10) def test_lefthand_power(self): # LH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2, float=F('float') ** 1.5) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2)) @unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor(self): Number.objects.update(integer=F('integer').bitxor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 26) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -26) @unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor_null(self): employee = Employee.objects.create(firstname='John', lastname='Doe') Employee.objects.update(salary=F('salary').bitxor(48)) employee.refresh_from_db() self.assertIsNone(employee.salary) @unittest.skipUnless(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor_not_supported(self): msg = 'Bitwise XOR is not supported in Oracle.' with self.assertRaisesMessage(NotSupportedError, msg): Number.objects.update(integer=F('integer').bitxor(48)) def test_right_hand_addition(self): # Right hand operators Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'), float=42.7 + F('float')) # RH Addition of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_right_hand_subtraction(self): Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'), float=42.7 - F('float')) # RH Subtraction of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3)) def test_right_hand_multiplication(self): # RH Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'), float=42.7 * F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_right_hand_division(self): # RH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'), float=42.7 / F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3)) def test_right_hand_modulo(self): # RH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) def test_righthand_power(self): # RH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'), float=1.5 ** F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3)) class FTimeDeltaTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) midnight = datetime.time(0) delta0 = datetime.timedelta(0) delta1 = datetime.timedelta(microseconds=253000) delta2 = datetime.timedelta(seconds=44) delta3 = datetime.timedelta(hours=21, minutes=8) delta4 = datetime.timedelta(days=10) delta5 = datetime.timedelta(days=90) # Test data is set so that deltas and delays will be # strictly increasing. cls.deltas = [] cls.delays = [] cls.days_long = [] # e0: started same day as assigned, zero duration end = stime + delta0 cls.e0 = Experiment.objects.create( name='e0', assigned=sday, start=stime, end=end, completed=end.date(), estimated_time=delta0, ) cls.deltas.append(delta0) cls.delays.append(cls.e0.start - datetime.datetime.combine(cls.e0.assigned, midnight)) cls.days_long.append(cls.e0.completed - cls.e0.assigned) # e1: started one day after assigned, tiny duration, data # set so that end time has no fractional seconds, which # tests an edge case on sqlite. delay = datetime.timedelta(1) end = stime + delay + delta1 e1 = Experiment.objects.create( name='e1', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta1, ) cls.deltas.append(delta1) cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight)) cls.days_long.append(e1.completed - e1.assigned) # e2: started three days after assigned, small duration end = stime + delta2 e2 = Experiment.objects.create( name='e2', assigned=sday - datetime.timedelta(3), start=stime, end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1), ) cls.deltas.append(delta2) cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight)) cls.days_long.append(e2.completed - e2.assigned) # e3: started four days after assigned, medium duration delay = datetime.timedelta(4) end = stime + delay + delta3 e3 = Experiment.objects.create( name='e3', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3, ) cls.deltas.append(delta3) cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight)) cls.days_long.append(e3.completed - e3.assigned) # e4: started 10 days after assignment, long duration end = stime + delta4 e4 = Experiment.objects.create( name='e4', assigned=sday - datetime.timedelta(10), start=stime, end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1), ) cls.deltas.append(delta4) cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight)) cls.days_long.append(e4.completed - e4.assigned) # e5: started a month after assignment, very long duration delay = datetime.timedelta(30) end = stime + delay + delta5 e5 = Experiment.objects.create( name='e5', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta5, ) cls.deltas.append(delta5) cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight)) cls.days_long.append(e5.completed - e5.assigned) cls.expnames = [e.name for e in Experiment.objects.all()] def test_multiple_query_compilation(self): # Ticket #21643 queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) q1 = str(queryset.query) q2 = str(queryset.query) self.assertEqual(q1, q2) def test_query_clone(self): # Ticket #21643 - Crash when compiling query more than once qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) qs2 = qs.all() list(qs) list(qs2) # Intentionally no assert def test_delta_add(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lt=delta + F('start'))] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_subtract(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(start__gt=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(start__gte=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_exclude(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.exclude(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[i:]) test_set = [e.name for e in Experiment.objects.exclude(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[i + 1:]) def test_date_comparison(self): for i, days in enumerate(self.days_long): test_set = [e.name for e in Experiment.objects.filter(completed__lt=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(completed__lte=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i + 1]) @skipUnlessDBFeature("supports_mixed_date_datetime_comparisons") def test_mixed_comparisons1(self): for i, delay in enumerate(self.delays): test_set = [e.name for e in Experiment.objects.filter(assigned__gt=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(assigned__gte=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_mixed_comparisons2(self): for i, delay in enumerate(self.delays): delay = datetime.timedelta(delay.days) test_set = [e.name for e in Experiment.objects.filter(start__lt=F('assigned') + delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(start__lte=F('assigned') + delay + datetime.timedelta(1)) ] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_update(self): for delta in self.deltas: exps = Experiment.objects.all() expected_durations = [e.duration() for e in exps] expected_starts = [e.start + delta for e in exps] expected_ends = [e.end + delta for e in exps] Experiment.objects.update(start=F('start') + delta, end=F('end') + delta) exps = Experiment.objects.all() new_starts = [e.start for e in exps] new_ends = [e.end for e in exps] new_durations = [e.duration() for e in exps] self.assertEqual(expected_starts, new_starts) self.assertEqual(expected_ends, new_ends) self.assertEqual(expected_durations, new_durations) def test_invalid_operator(self): with self.assertRaises(DatabaseError): list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0))) def test_durationfield_add(self): zeros = [e.name for e in Experiment.objects.filter(start=F('start') + F('estimated_time'))] self.assertEqual(zeros, ['e0']) end_less = [e.name for e in Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))] self.assertEqual(end_less, ['e2']) delta_math = [ e.name for e in Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1)) ] self.assertEqual(delta_math, ['e4']) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') + Value(None, output_field=DurationField()), output_field=DateTimeField(), )) self.assertIsNone(queryset.first().shifted) def test_duration_expressions(self): for delta in self.deltas: qs = Experiment.objects.annotate(duration=F('estimated_time') + delta) for obj in qs: self.assertEqual(obj.duration, obj.estimated_time + delta) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subtraction(self): queryset = Experiment.objects.annotate( completion_duration=F('completed') - F('assigned'), ) at_least_5_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=5))} self.assertEqual(at_least_5_days, {'e3', 'e4', 'e5'}) at_least_120_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=120))} self.assertEqual(at_least_120_days, {'e5'}) less_than_5_days = {e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))} self.assertEqual(less_than_5_days, {'e0', 'e1', 'e2'}) queryset = Experiment.objects.annotate( difference=F('completed') - Value(None, output_field=DateField()), ) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('completed') - Value(None, output_field=DurationField()), output_field=DateField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('completed') queryset = Experiment.objects.annotate( difference=subquery - F('completed'), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_case_subtraction(self): queryset = Experiment.objects.annotate( date_case=Case( When(Q(name='e0'), then=F('completed')), output_field=DateField(), ), completed_value=Value( self.e0.completed, output_field=DateField(), ), difference=F('date_case') - F('completed_value'), ).filter(difference=datetime.timedelta()) self.assertEqual(queryset.get(), self.e0) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) queryset = Time.objects.annotate( difference=F('time') - Value(datetime.time(11, 15, 0)), ) self.assertEqual( queryset.get().difference, datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345) ) queryset = Time.objects.annotate( difference=F('time') - Value(None, output_field=TimeField()), ) self.assertIsNone(queryset.first().difference) queryset = Time.objects.annotate(shifted=ExpressionWrapper( F('time') - Value(None, output_field=DurationField()), output_field=TimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subquery_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) subquery = Time.objects.filter(pk=OuterRef('pk')).values('time') queryset = Time.objects.annotate( difference=subquery - F('time'), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction(self): under_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__gt=F('end') - F('start')) ] self.assertEqual(under_estimate, ['e2']) over_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__lt=F('end') - F('start')) ] self.assertEqual(over_estimate, ['e4']) queryset = Experiment.objects.annotate( difference=F('start') - Value(None, output_field=DateTimeField()), ) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') - Value(None, output_field=DurationField()), output_field=DateTimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('start') queryset = Experiment.objects.annotate( difference=subquery - F('start'), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) Experiment.objects.update(end=F('start') + delta) qs = Experiment.objects.annotate(delta=F('end') - F('start')) for e in qs: self.assertEqual(e.delta, delta) def test_duration_with_datetime(self): # Exclude e1 which has very high precision so we can test this on all # backends regardless of whether or not it supports # microsecond_precision. over_estimate = Experiment.objects.exclude(name='e1').filter( completed__gt=self.stime + F('estimated_time'), ).order_by('name') self.assertQuerysetEqual(over_estimate, ['e3', 'e4', 'e5'], lambda e: e.name) def test_duration_with_datetime_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) qs = Experiment.objects.annotate(dt=ExpressionWrapper( F('start') + delta, output_field=DateTimeField(), )) for e in qs: self.assertEqual(e.dt, e.start + delta) def test_date_minus_duration(self): more_than_4_days = Experiment.objects.filter( assigned__lt=F('completed') - Value(datetime.timedelta(days=4)) ) self.assertQuerysetEqual(more_than_4_days, ['e3', 'e4', 'e5'], lambda e: e.name) def test_negative_timedelta_update(self): # subtract 30 seconds, 30 minutes, 2 hours and 2 days experiments = Experiment.objects.filter(name='e0').annotate( start_sub_seconds=F('start') + datetime.timedelta(seconds=-30), ).annotate( start_sub_minutes=F('start_sub_seconds') + datetime.timedelta(minutes=-30), ).annotate( start_sub_hours=F('start_sub_minutes') + datetime.timedelta(hours=-2), ).annotate( new_start=F('start_sub_hours') + datetime.timedelta(days=-2), ) expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0) # subtract 30 microseconds experiments = experiments.annotate(new_start=F('new_start') + datetime.timedelta(microseconds=-30)) expected_start += datetime.timedelta(microseconds=+746970) experiments.update(start=F('new_start')) e0 = Experiment.objects.get(name='e0') self.assertEqual(e0.start, expected_start) class ValueTests(TestCase): def test_update_TimeField_using_Value(self): Time.objects.create() Time.objects.update(time=Value(datetime.time(1), output_field=TimeField())) self.assertEqual(Time.objects.get().time, datetime.time(1)) def test_update_UUIDField_using_Value(self): UUID.objects.create() UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField())) self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012')) def test_deconstruct(self): value = Value('name') path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(kwargs, {}) def test_deconstruct_output_field(self): value = Value('name', output_field=CharField()) path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(len(kwargs), 1) self.assertEqual(kwargs['output_field'].deconstruct(), CharField().deconstruct()) def test_equal(self): value = Value('name') self.assertEqual(value, Value('name')) self.assertNotEqual(value, Value('username')) def test_hash(self): d = {Value('name'): 'Bob'} self.assertIn(Value('name'), d) self.assertEqual(d[Value('name')], 'Bob') def test_equal_output_field(self): value = Value('name', output_field=CharField()) same_value = Value('name', output_field=CharField()) other_value = Value('name', output_field=TimeField()) no_output_field = Value('name') self.assertEqual(value, same_value) self.assertNotEqual(value, other_value) self.assertNotEqual(value, no_output_field) def test_raise_empty_expressionlist(self): msg = 'ExpressionList requires at least one expression' with self.assertRaisesMessage(ValueError, msg): ExpressionList() def test_compile_unresolved(self): # This test might need to be revisited later on if #25425 is enforced. compiler = Time.objects.all().query.get_compiler(connection=connection) value = Value('foo') self.assertEqual(value.as_sql(compiler, connection), ('%s', ['foo'])) value = Value('foo', output_field=CharField()) self.assertEqual(value.as_sql(compiler, connection), ('%s', ['foo'])) def test_resolve_output_field(self): value_types = [ ('str', CharField), (True, BooleanField), (42, IntegerField), (3.14, FloatField), (datetime.date(2019, 5, 15), DateField), (datetime.datetime(2019, 5, 15), DateTimeField), (datetime.time(3, 16), TimeField), (datetime.timedelta(1), DurationField), (Decimal('3.14'), DecimalField), (b'', BinaryField), (uuid.uuid4(), UUIDField), ] for value, ouput_field_type in value_types: with self.subTest(type=type(value)): expr = Value(value) self.assertIsInstance(expr.output_field, ouput_field_type) def test_resolve_output_field_failure(self): msg = 'Cannot resolve expression type, unknown output_field' with self.assertRaisesMessage(FieldError, msg): Value(object()).output_field class ExistsTests(TestCase): def test_optimizations(self): with CaptureQueriesContext(connection) as context: list(Experiment.objects.values(exists=Exists( Experiment.objects.order_by('pk'), )).order_by()) captured_queries = context.captured_queries self.assertEqual(len(captured_queries), 1) captured_sql = captured_queries[0]['sql'] self.assertNotIn( connection.ops.quote_name(Experiment._meta.pk.column), captured_sql, ) self.assertIn( connection.ops.limit_offset_sql(None, 1), captured_sql, ) self.assertNotIn('ORDER BY', captured_sql) class FieldTransformTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) cls.ex1 = Experiment.objects.create( name='Experiment 1', assigned=sday, completed=sday + datetime.timedelta(2), estimated_time=datetime.timedelta(2), start=stime, end=stime + datetime.timedelta(2), ) def test_month_aggregation(self): self.assertEqual( Experiment.objects.aggregate(month_count=Count('assigned__month')), {'month_count': 1} ) def test_transform_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('assigned__month'), ["{'assigned__month': 6}"] ) def test_multiple_transforms_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('end__date__month'), ["{'end__date__month': 6}"] ) class ReprTests(SimpleTestCase): def test_expressions(self): self.assertEqual( repr(Case(When(a=1))), "<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>" ) self.assertEqual( repr(When(Q(age__gte=18), then=Value('legal'))), "<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value(legal)>" ) self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)") self.assertEqual(repr(F('published')), "F(published)") self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>") self.assertEqual( repr(ExpressionWrapper(F('cost') + F('tax'), IntegerField())), "ExpressionWrapper(F(cost) + F(tax))" ) self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)") self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)') self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])") self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))") self.assertEqual(repr(Value(1)), "Value(1)") self.assertEqual( repr(ExpressionList(F('col'), F('anothercol'))), 'ExpressionList(F(col), F(anothercol))' ) self.assertEqual( repr(ExpressionList(OrderBy(F('col'), descending=False))), 'ExpressionList(OrderBy(F(col), descending=False))' ) def test_functions(self): self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))") self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))") self.assertEqual(repr(Length('a')), "Length(F(a))") self.assertEqual(repr(Lower('a')), "Lower(F(a))") self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))") self.assertEqual(repr(Upper('a')), "Upper(F(a))") def test_aggregates(self): self.assertEqual(repr(Avg('a')), "Avg(F(a))") self.assertEqual(repr(Count('a')), "Count(F(a))") self.assertEqual(repr(Count('*')), "Count('*')") self.assertEqual(repr(Max('a')), "Max(F(a))") self.assertEqual(repr(Min('a')), "Min(F(a))") self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)") self.assertEqual(repr(Sum('a')), "Sum(F(a))") self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)") def test_distinct_aggregates(self): self.assertEqual(repr(Count('a', distinct=True)), "Count(F(a), distinct=True)") self.assertEqual(repr(Count('*', distinct=True)), "Count('*', distinct=True)") def test_filtered_aggregates(self): filter = Q(a=1) self.assertEqual(repr(Avg('a', filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Count('a', filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Max('a', filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Min('a', filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(StdDev('a', filter=filter)), "StdDev(F(a), filter=(AND: ('a', 1)), sample=False)") self.assertEqual(repr(Sum('a', filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))") self.assertEqual( repr(Variance('a', sample=True, filter=filter)), "Variance(F(a), filter=(AND: ('a', 1)), sample=True)" ) self.assertEqual( repr(Count('a', filter=filter, distinct=True)), "Count(F(a), distinct=True, filter=(AND: ('a', 1)))" ) class CombinableTests(SimpleTestCase): bitwise_msg = 'Use .bitand() and .bitor() for bitwise logical operations.' def test_negation(self): c = Combinable() self.assertEqual(-c, c * -1) def test_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() & Combinable() def test_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() | Combinable() def test_reversed_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() & Combinable() def test_reversed_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() | Combinable() class CombinedExpressionTests(SimpleTestCase): def test_resolve_output_field(self): tests = [ (IntegerField, AutoField, IntegerField), (AutoField, IntegerField, IntegerField), (IntegerField, DecimalField, DecimalField), (DecimalField, IntegerField, DecimalField), (IntegerField, FloatField, FloatField), (FloatField, IntegerField, FloatField), ] connectors = [Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV] for lhs, rhs, combined in tests: for connector in connectors: with self.subTest(lhs=lhs, connector=connector, rhs=rhs, combined=combined): expr = CombinedExpression( Expression(lhs()), connector, Expression(rhs()), ) self.assertIsInstance(expr.output_field, combined) class ExpressionWrapperTests(SimpleTestCase): def test_empty_group_by(self): expr = ExpressionWrapper(Value(3), output_field=IntegerField()) self.assertEqual(expr.get_group_by_cols(alias=None), []) def test_non_empty_group_by(self): value = Value('f') value.output_field = None expr = ExpressionWrapper(Lower(value), output_field=IntegerField()) group_by_cols = expr.get_group_by_cols(alias=None) self.assertEqual(group_by_cols, [expr.expression]) self.assertEqual(group_by_cols[0].output_field, expr.output_field)
a25dc81f4df2ac5f6f9cbb93bdeef876a3c4f87656f83852f607e0a4f7efe76c
from unittest import mock from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist from django.db import connection from django.db.models import Prefetch, QuerySet, prefetch_related_objects from django.db.models.query import get_prefetcher from django.db.models.sql import Query from django.test import TestCase, override_settings from django.test.utils import CaptureQueriesContext from .models import ( Article, Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book, Bookmark, BookReview, BookWithYear, Comment, Department, Employee, FavoriteAuthors, House, LessonEntry, ModelIterableSubclass, Person, Qualification, Reader, Room, TaggedItem, Teacher, WordEntry, ) class TestDataMixin: @classmethod def setUpTestData(cls): cls.book1 = Book.objects.create(title='Poems') cls.book2 = Book.objects.create(title='Jane Eyre') cls.book3 = Book.objects.create(title='Wuthering Heights') cls.book4 = Book.objects.create(title='Sense and Sensibility') cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1) cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1) cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1) cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4) cls.book1.authors.add(cls.author1, cls.author2, cls.author3) cls.book2.authors.add(cls.author1) cls.book3.authors.add(cls.author3) cls.book4.authors.add(cls.author4) cls.reader1 = Reader.objects.create(name='Amy') cls.reader2 = Reader.objects.create(name='Belinda') cls.reader1.books_read.add(cls.book1, cls.book4) cls.reader2.books_read.add(cls.book2, cls.book4) class PrefetchRelatedTests(TestDataMixin, TestCase): def assertWhereContains(self, sql, needle): where_idx = sql.index('WHERE') self.assertEqual( sql.count(str(needle), where_idx), 1, msg="WHERE clause doesn't contain %s, actual SQL: %s" % (needle, sql[where_idx:]) ) def test_m2m_forward(self): with self.assertNumQueries(2): lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')] normal_lists = [list(b.authors.all()) for b in Book.objects.all()] self.assertEqual(lists, normal_lists) def test_m2m_reverse(self): with self.assertNumQueries(2): lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')] normal_lists = [list(a.books.all()) for a in Author.objects.all()] self.assertEqual(lists, normal_lists) def test_foreignkey_forward(self): with self.assertNumQueries(2): books = [a.first_book for a in Author.objects.prefetch_related('first_book')] normal_books = [a.first_book for a in Author.objects.all()] self.assertEqual(books, normal_books) def test_foreignkey_reverse(self): with self.assertNumQueries(2): [list(b.first_time_authors.all()) for b in Book.objects.prefetch_related('first_time_authors')] self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"]) def test_onetoone_reverse_no_match(self): # Regression for #17439 with self.assertNumQueries(2): book = Book.objects.prefetch_related('bookwithyear').all()[0] with self.assertNumQueries(0): with self.assertRaises(BookWithYear.DoesNotExist): book.bookwithyear def test_onetoone_reverse_with_to_field_pk(self): """ A model (Bio) with a OneToOneField primary key (author) that references a non-pk field (name) on the related model (Author) is prefetchable. """ Bio.objects.bulk_create([ Bio(author=self.author1), Bio(author=self.author2), Bio(author=self.author3), ]) authors = Author.objects.filter( name__in=[self.author1, self.author2, self.author3], ).prefetch_related('bio') with self.assertNumQueries(2): for author in authors: self.assertEqual(author.name, author.bio.author.name) def test_survives_clone(self): with self.assertNumQueries(2): [list(b.first_time_authors.all()) for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)] def test_len(self): with self.assertNumQueries(2): qs = Book.objects.prefetch_related('first_time_authors') len(qs) [list(b.first_time_authors.all()) for b in qs] def test_bool(self): with self.assertNumQueries(2): qs = Book.objects.prefetch_related('first_time_authors') bool(qs) [list(b.first_time_authors.all()) for b in qs] def test_count(self): with self.assertNumQueries(2): qs = Book.objects.prefetch_related('first_time_authors') [b.first_time_authors.count() for b in qs] def test_exists(self): with self.assertNumQueries(2): qs = Book.objects.prefetch_related('first_time_authors') [b.first_time_authors.exists() for b in qs] def test_in_and_prefetch_related(self): """ Regression test for #20242 - QuerySet "in" didn't work the first time when using prefetch_related. This was fixed by the removal of chunked reads from QuerySet iteration in 70679243d1786e03557c28929f9762a119e3ac14. """ qs = Book.objects.prefetch_related('first_time_authors') self.assertIn(qs[0], qs) def test_clear(self): with self.assertNumQueries(5): with_prefetch = Author.objects.prefetch_related('books') without_prefetch = with_prefetch.prefetch_related(None) [list(a.books.all()) for a in without_prefetch] def test_m2m_then_m2m(self): """A m2m can be followed through another m2m.""" with self.assertNumQueries(3): qs = Author.objects.prefetch_related('books__read_by') lists = [[[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs] self.assertEqual(lists, [ [["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre [["Amy"]], # Anne - Poems [["Amy"], []], # Emily - Poems, Wuthering Heights [["Amy", "Belinda"]], # Jane - Sense and Sense ]) def test_overriding_prefetch(self): with self.assertNumQueries(3): qs = Author.objects.prefetch_related('books', 'books__read_by') lists = [[[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs] self.assertEqual(lists, [ [["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre [["Amy"]], # Anne - Poems [["Amy"], []], # Emily - Poems, Wuthering Heights [["Amy", "Belinda"]], # Jane - Sense and Sense ]) with self.assertNumQueries(3): qs = Author.objects.prefetch_related('books__read_by', 'books') lists = [[[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs] self.assertEqual(lists, [ [["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre [["Amy"]], # Anne - Poems [["Amy"], []], # Emily - Poems, Wuthering Heights [["Amy", "Belinda"]], # Jane - Sense and Sense ]) def test_get(self): """ Objects retrieved with .get() get the prefetch behavior. """ # Need a double with self.assertNumQueries(3): author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte") lists = [[str(r) for r in b.read_by.all()] for b in author.books.all()] self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre def test_foreign_key_then_m2m(self): """ A m2m relation can be followed after a relation like ForeignKey that doesn't have many objects. """ with self.assertNumQueries(2): qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by') lists = [[str(r) for r in a.first_book.read_by.all()] for a in qs] self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]]) def test_reverse_one_to_one_then_m2m(self): """ A m2m relation can be followed after going through the select_related reverse of an o2o. """ qs = Author.objects.prefetch_related('bio__books').select_related('bio') with self.assertNumQueries(1): list(qs.all()) Bio.objects.create(author=self.author1) with self.assertNumQueries(2): list(qs.all()) def test_attribute_error(self): qs = Reader.objects.all().prefetch_related('books_read__xyz') msg = ( "Cannot find 'xyz' on Book object, 'books_read__xyz' " "is an invalid parameter to prefetch_related()" ) with self.assertRaisesMessage(AttributeError, msg) as cm: list(qs) self.assertIn('prefetch_related', str(cm.exception)) def test_invalid_final_lookup(self): qs = Book.objects.prefetch_related('authors__name') msg = ( "'authors__name' does not resolve to an item that supports " "prefetching - this is an invalid parameter to prefetch_related()." ) with self.assertRaisesMessage(ValueError, msg) as cm: list(qs) self.assertIn('prefetch_related', str(cm.exception)) self.assertIn("name", str(cm.exception)) def test_prefetch_eq(self): prefetch_1 = Prefetch('authors', queryset=Author.objects.all()) prefetch_2 = Prefetch('books', queryset=Book.objects.all()) self.assertEqual(prefetch_1, prefetch_1) self.assertEqual(prefetch_1, mock.ANY) self.assertNotEqual(prefetch_1, prefetch_2) def test_forward_m2m_to_attr_conflict(self): msg = 'to_attr=authors conflicts with a field on the Book model.' authors = Author.objects.all() with self.assertRaisesMessage(ValueError, msg): list(Book.objects.prefetch_related( Prefetch('authors', queryset=authors, to_attr='authors'), )) # Without the ValueError, an author was deleted due to the implicit # save of the relation assignment. self.assertEqual(self.book1.authors.count(), 3) def test_reverse_m2m_to_attr_conflict(self): msg = 'to_attr=books conflicts with a field on the Author model.' poems = Book.objects.filter(title='Poems') with self.assertRaisesMessage(ValueError, msg): list(Author.objects.prefetch_related( Prefetch('books', queryset=poems, to_attr='books'), )) # Without the ValueError, a book was deleted due to the implicit # save of reverse relation assignment. self.assertEqual(self.author1.books.count(), 2) def test_m2m_then_reverse_fk_object_ids(self): with CaptureQueriesContext(connection) as queries: list(Book.objects.prefetch_related('authors__addresses')) sql = queries[-1]['sql'] self.assertWhereContains(sql, self.author1.name) def test_m2m_then_m2m_object_ids(self): with CaptureQueriesContext(connection) as queries: list(Book.objects.prefetch_related('authors__favorite_authors')) sql = queries[-1]['sql'] self.assertWhereContains(sql, self.author1.name) def test_m2m_then_reverse_one_to_one_object_ids(self): with CaptureQueriesContext(connection) as queries: list(Book.objects.prefetch_related('authors__authorwithage')) sql = queries[-1]['sql'] self.assertWhereContains(sql, self.author1.id) def test_filter_deferred(self): """ Related filtering of prefetched querysets is deferred on m2m and reverse m2o relations until necessary. """ add_q = Query.add_q for relation in ['authors', 'first_time_authors']: with self.subTest(relation=relation): with mock.patch.object( Query, 'add_q', autospec=True, side_effect=lambda self, q: add_q(self, q), ) as add_q_mock: list(Book.objects.prefetch_related(relation)) self.assertEqual(add_q_mock.call_count, 1) class RawQuerySetTests(TestDataMixin, TestCase): def test_basic(self): with self.assertNumQueries(2): books = Book.objects.raw( "SELECT * FROM prefetch_related_book WHERE id = %s", (self.book1.id,) ).prefetch_related('authors') book1 = list(books)[0] with self.assertNumQueries(0): self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3]) def test_prefetch_before_raw(self): with self.assertNumQueries(2): books = Book.objects.prefetch_related('authors').raw( "SELECT * FROM prefetch_related_book WHERE id = %s", (self.book1.id,) ) book1 = list(books)[0] with self.assertNumQueries(0): self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3]) def test_clear(self): with self.assertNumQueries(5): with_prefetch = Author.objects.raw( "SELECT * FROM prefetch_related_author" ).prefetch_related('books') without_prefetch = with_prefetch.prefetch_related(None) [list(a.books.all()) for a in without_prefetch] class CustomPrefetchTests(TestCase): @classmethod def traverse_qs(cls, obj_iter, path): """ Helper method that returns a list containing a list of the objects in the obj_iter. Then for each object in the obj_iter, the path will be recursively travelled and the found objects are added to the return value. """ ret_val = [] if hasattr(obj_iter, 'all'): obj_iter = obj_iter.all() try: iter(obj_iter) except TypeError: obj_iter = [obj_iter] for obj in obj_iter: rel_objs = [] for part in path: if not part: continue try: related = getattr(obj, part[0]) except ObjectDoesNotExist: continue if related is not None: rel_objs.extend(cls.traverse_qs(related, [part[1:]])) ret_val.append((obj, rel_objs)) return ret_val @classmethod def setUpTestData(cls): cls.person1 = Person.objects.create(name='Joe') cls.person2 = Person.objects.create(name='Mary') # Set main_room for each house before creating the next one for # databases where supports_nullable_unique_constraints is False. cls.house1 = House.objects.create(name='House 1', address='123 Main St', owner=cls.person1) cls.room1_1 = Room.objects.create(name='Dining room', house=cls.house1) cls.room1_2 = Room.objects.create(name='Lounge', house=cls.house1) cls.room1_3 = Room.objects.create(name='Kitchen', house=cls.house1) cls.house1.main_room = cls.room1_1 cls.house1.save() cls.person1.houses.add(cls.house1) cls.house2 = House.objects.create(name='House 2', address='45 Side St', owner=cls.person1) cls.room2_1 = Room.objects.create(name='Dining room', house=cls.house2) cls.room2_2 = Room.objects.create(name='Lounge', house=cls.house2) cls.room2_3 = Room.objects.create(name='Kitchen', house=cls.house2) cls.house2.main_room = cls.room2_1 cls.house2.save() cls.person1.houses.add(cls.house2) cls.house3 = House.objects.create(name='House 3', address='6 Downing St', owner=cls.person2) cls.room3_1 = Room.objects.create(name='Dining room', house=cls.house3) cls.room3_2 = Room.objects.create(name='Lounge', house=cls.house3) cls.room3_3 = Room.objects.create(name='Kitchen', house=cls.house3) cls.house3.main_room = cls.room3_1 cls.house3.save() cls.person2.houses.add(cls.house3) cls.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=cls.person2) cls.room4_1 = Room.objects.create(name='Dining room', house=cls.house4) cls.room4_2 = Room.objects.create(name='Lounge', house=cls.house4) cls.room4_3 = Room.objects.create(name='Kitchen', house=cls.house4) cls.house4.main_room = cls.room4_1 cls.house4.save() cls.person2.houses.add(cls.house4) def test_traverse_qs(self): qs = Person.objects.prefetch_related('houses') related_objs_normal = [list(p.houses.all()) for p in qs], related_objs_from_traverse = [[inner[0] for inner in o[1]] for o in self.traverse_qs(qs, [['houses']])] self.assertEqual(related_objs_normal, (related_objs_from_traverse,)) def test_ambiguous(self): # Ambiguous: Lookup was already seen with a different queryset. msg = ( "'houses' lookup was already seen with a different queryset. You " "may need to adjust the ordering of your lookups." ) # lookup.queryset shouldn't be evaluated. with self.assertNumQueries(3): with self.assertRaisesMessage(ValueError, msg): self.traverse_qs( Person.objects.prefetch_related( 'houses__rooms', Prefetch('houses', queryset=House.objects.all()), ), [['houses', 'rooms']], ) # Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms. msg = ( "Cannot find 'houses_lst' on Person object, 'houses_lst__rooms' is " "an invalid parameter to prefetch_related()" ) with self.assertRaisesMessage(AttributeError, msg): self.traverse_qs( Person.objects.prefetch_related( 'houses_lst__rooms', Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst') ), [['houses', 'rooms']] ) # Not ambiguous. self.traverse_qs( Person.objects.prefetch_related('houses__rooms', 'houses'), [['houses', 'rooms']] ) self.traverse_qs( Person.objects.prefetch_related( 'houses__rooms', Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst') ), [['houses', 'rooms']] ) def test_m2m(self): # Control lookups. with self.assertNumQueries(2): lst1 = self.traverse_qs( Person.objects.prefetch_related('houses'), [['houses']] ) # Test lookups. with self.assertNumQueries(2): lst2 = self.traverse_qs( Person.objects.prefetch_related(Prefetch('houses')), [['houses']] ) self.assertEqual(lst1, lst2) with self.assertNumQueries(2): lst2 = self.traverse_qs( Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')), [['houses_lst']] ) self.assertEqual(lst1, lst2) def test_reverse_m2m(self): # Control lookups. with self.assertNumQueries(2): lst1 = self.traverse_qs( House.objects.prefetch_related('occupants'), [['occupants']] ) # Test lookups. with self.assertNumQueries(2): lst2 = self.traverse_qs( House.objects.prefetch_related(Prefetch('occupants')), [['occupants']] ) self.assertEqual(lst1, lst2) with self.assertNumQueries(2): lst2 = self.traverse_qs( House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')), [['occupants_lst']] ) self.assertEqual(lst1, lst2) def test_m2m_through_fk(self): # Control lookups. with self.assertNumQueries(3): lst1 = self.traverse_qs( Room.objects.prefetch_related('house__occupants'), [['house', 'occupants']] ) # Test lookups. with self.assertNumQueries(3): lst2 = self.traverse_qs( Room.objects.prefetch_related(Prefetch('house__occupants')), [['house', 'occupants']] ) self.assertEqual(lst1, lst2) with self.assertNumQueries(3): lst2 = self.traverse_qs( Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')), [['house', 'occupants_lst']] ) self.assertEqual(lst1, lst2) def test_m2m_through_gfk(self): TaggedItem.objects.create(tag="houses", content_object=self.house1) TaggedItem.objects.create(tag="houses", content_object=self.house2) # Control lookups. with self.assertNumQueries(3): lst1 = self.traverse_qs( TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'), [['content_object', 'rooms']] ) # Test lookups. with self.assertNumQueries(3): lst2 = self.traverse_qs( TaggedItem.objects.prefetch_related( Prefetch('content_object'), Prefetch('content_object__rooms', to_attr='rooms_lst') ), [['content_object', 'rooms_lst']] ) self.assertEqual(lst1, lst2) def test_o2m_through_m2m(self): # Control lookups. with self.assertNumQueries(3): lst1 = self.traverse_qs( Person.objects.prefetch_related('houses', 'houses__rooms'), [['houses', 'rooms']] ) # Test lookups. with self.assertNumQueries(3): lst2 = self.traverse_qs( Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'), [['houses', 'rooms']] ) self.assertEqual(lst1, lst2) with self.assertNumQueries(3): lst2 = self.traverse_qs( Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')), [['houses', 'rooms']] ) self.assertEqual(lst1, lst2) with self.assertNumQueries(3): lst2 = self.traverse_qs( Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'), [['houses_lst', 'rooms']] ) self.assertEqual(lst1, lst2) with self.assertNumQueries(3): lst2 = self.traverse_qs( Person.objects.prefetch_related( Prefetch('houses', to_attr='houses_lst'), Prefetch('houses_lst__rooms', to_attr='rooms_lst') ), [['houses_lst', 'rooms_lst']] ) self.assertEqual(lst1, lst2) def test_generic_rel(self): bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/') TaggedItem.objects.create(content_object=bookmark, tag='django') TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python') # Control lookups. with self.assertNumQueries(4): lst1 = self.traverse_qs( Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'), [['tags', 'content_object'], ['favorite_tags']] ) # Test lookups. with self.assertNumQueries(4): lst2 = self.traverse_qs( Bookmark.objects.prefetch_related( Prefetch('tags', to_attr='tags_lst'), Prefetch('tags_lst__content_object'), Prefetch('favorite_tags'), ), [['tags_lst', 'content_object'], ['favorite_tags']] ) self.assertEqual(lst1, lst2) def test_traverse_single_item_property(self): # Control lookups. with self.assertNumQueries(5): lst1 = self.traverse_qs( Person.objects.prefetch_related( 'houses__rooms', 'primary_house__occupants__houses', ), [['primary_house', 'occupants', 'houses']] ) # Test lookups. with self.assertNumQueries(5): lst2 = self.traverse_qs( Person.objects.prefetch_related( 'houses__rooms', Prefetch('primary_house__occupants', to_attr='occupants_lst'), 'primary_house__occupants_lst__houses', ), [['primary_house', 'occupants_lst', 'houses']] ) self.assertEqual(lst1, lst2) def test_traverse_multiple_items_property(self): # Control lookups. with self.assertNumQueries(4): lst1 = self.traverse_qs( Person.objects.prefetch_related( 'houses', 'all_houses__occupants__houses', ), [['all_houses', 'occupants', 'houses']] ) # Test lookups. with self.assertNumQueries(4): lst2 = self.traverse_qs( Person.objects.prefetch_related( 'houses', Prefetch('all_houses__occupants', to_attr='occupants_lst'), 'all_houses__occupants_lst__houses', ), [['all_houses', 'occupants_lst', 'houses']] ) self.assertEqual(lst1, lst2) def test_custom_qs(self): # Test basic. with self.assertNumQueries(2): lst1 = list(Person.objects.prefetch_related('houses')) with self.assertNumQueries(2): lst2 = list(Person.objects.prefetch_related( Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst'))) self.assertEqual( self.traverse_qs(lst1, [['houses']]), self.traverse_qs(lst2, [['houses_lst']]) ) # Test queryset filtering. with self.assertNumQueries(2): lst2 = list( Person.objects.prefetch_related( Prefetch( 'houses', queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]), to_attr='houses_lst', ) ) ) self.assertEqual(len(lst2[0].houses_lst), 1) self.assertEqual(lst2[0].houses_lst[0], self.house1) self.assertEqual(len(lst2[1].houses_lst), 1) self.assertEqual(lst2[1].houses_lst[0], self.house3) # Test flattened. with self.assertNumQueries(3): lst1 = list(Person.objects.prefetch_related('houses__rooms')) with self.assertNumQueries(3): lst2 = list(Person.objects.prefetch_related( Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst'))) self.assertEqual( self.traverse_qs(lst1, [['houses', 'rooms']]), self.traverse_qs(lst2, [['houses', 'rooms_lst']]) ) # Test inner select_related. with self.assertNumQueries(3): lst1 = list(Person.objects.prefetch_related('houses__owner')) with self.assertNumQueries(2): lst2 = list(Person.objects.prefetch_related( Prefetch('houses', queryset=House.objects.select_related('owner')))) self.assertEqual( self.traverse_qs(lst1, [['houses', 'owner']]), self.traverse_qs(lst2, [['houses', 'owner']]) ) # Test inner prefetch. inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk]) houses_qs_prf = House.objects.prefetch_related( Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst')) with self.assertNumQueries(4): lst2 = list(Person.objects.prefetch_related( Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'), Prefetch('houses_lst__rooms_lst__main_room_of') )) self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2) self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1) self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2) self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1) self.assertEqual(len(lst2[1].houses_lst), 0) # Test ForwardManyToOneDescriptor. houses = House.objects.select_related('owner') with self.assertNumQueries(6): rooms = Room.objects.all().prefetch_related('house') lst1 = self.traverse_qs(rooms, [['house', 'owner']]) with self.assertNumQueries(2): rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all())) lst2 = self.traverse_qs(rooms, [['house', 'owner']]) self.assertEqual(lst1, lst2) with self.assertNumQueries(2): houses = House.objects.select_related('owner') rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr')) lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']]) self.assertEqual(lst1, lst2) room = Room.objects.all().prefetch_related( Prefetch('house', queryset=houses.filter(address='DoesNotExist')) ).first() with self.assertRaises(ObjectDoesNotExist): getattr(room, 'house') room = Room.objects.all().prefetch_related( Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr') ).first() self.assertIsNone(room.house_attr) rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name'))) with self.assertNumQueries(2): getattr(rooms.first().house, 'name') with self.assertNumQueries(3): getattr(rooms.first().house, 'address') # Test ReverseOneToOneDescriptor. houses = House.objects.select_related('owner') with self.assertNumQueries(6): rooms = Room.objects.all().prefetch_related('main_room_of') lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']]) with self.assertNumQueries(2): rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all())) lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']]) self.assertEqual(lst1, lst2) with self.assertNumQueries(2): rooms = list( Room.objects.all().prefetch_related( Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr') ) ) lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']]) self.assertEqual(lst1, lst2) room = Room.objects.filter(main_room_of__isnull=False).prefetch_related( Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist')) ).first() with self.assertRaises(ObjectDoesNotExist): getattr(room, 'main_room_of') room = Room.objects.filter(main_room_of__isnull=False).prefetch_related( Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr') ).first() self.assertIsNone(room.main_room_of_attr) # The custom queryset filters should be applied to the queryset # instance returned by the manager. person = Person.objects.prefetch_related( Prefetch('houses', queryset=House.objects.filter(name='House 1')), ).get(pk=self.person1.pk) self.assertEqual( list(person.houses.all()), list(person.houses.all().all()), ) def test_nested_prefetch_related_are_not_overwritten(self): # Regression test for #24873 houses_2 = House.objects.prefetch_related(Prefetch('rooms')) persons = Person.objects.prefetch_related(Prefetch('houses', queryset=houses_2)) houses = House.objects.prefetch_related(Prefetch('occupants', queryset=persons)) list(houses) # queryset must be evaluated once to reproduce the bug. self.assertEqual( houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0], self.room2_1 ) def test_nested_prefetch_related_with_duplicate_prefetcher(self): """ Nested prefetches whose name clashes with descriptor names (Person.houses here) are allowed. """ occupants = Person.objects.prefetch_related( Prefetch('houses', to_attr='some_attr_name'), Prefetch('houses', queryset=House.objects.prefetch_related('main_room')), ) houses = House.objects.prefetch_related(Prefetch('occupants', queryset=occupants)) with self.assertNumQueries(5): self.traverse_qs(list(houses), [['occupants', 'houses', 'main_room']]) def test_values_queryset(self): msg = 'Prefetch querysets cannot use raw(), values(), and values_list().' with self.assertRaisesMessage(ValueError, msg): Prefetch('houses', House.objects.values('pk')) with self.assertRaisesMessage(ValueError, msg): Prefetch('houses', House.objects.values_list('pk')) # That error doesn't affect managers with custom ModelIterable subclasses self.assertIs(Teacher.objects_custom.all()._iterable_class, ModelIterableSubclass) Prefetch('teachers', Teacher.objects_custom.all()) def test_raw_queryset(self): msg = 'Prefetch querysets cannot use raw(), values(), and values_list().' with self.assertRaisesMessage(ValueError, msg): Prefetch('houses', House.objects.raw('select pk from house')) def test_to_attr_doesnt_cache_through_attr_as_list(self): house = House.objects.prefetch_related( Prefetch('rooms', queryset=Room.objects.all(), to_attr='to_rooms'), ).get(pk=self.house3.pk) self.assertIsInstance(house.rooms.all(), QuerySet) def test_to_attr_cached_property(self): persons = Person.objects.prefetch_related( Prefetch('houses', House.objects.all(), to_attr='cached_all_houses'), ) for person in persons: # To bypass caching at the related descriptor level, don't use # person.houses.all() here. all_houses = list(House.objects.filter(occupants=person)) with self.assertNumQueries(0): self.assertEqual(person.cached_all_houses, all_houses) def test_filter_deferred(self): """ Related filtering of prefetched querysets is deferred until necessary. """ add_q = Query.add_q with mock.patch.object( Query, 'add_q', autospec=True, side_effect=lambda self, q: add_q(self, q), ) as add_q_mock: list(House.objects.prefetch_related( Prefetch('occupants', queryset=Person.objects.all()) )) self.assertEqual(add_q_mock.call_count, 1) class DefaultManagerTests(TestCase): @classmethod def setUpTestData(cls): cls.qual1 = Qualification.objects.create(name='BA') cls.qual2 = Qualification.objects.create(name='BSci') cls.qual3 = Qualification.objects.create(name='MA') cls.qual4 = Qualification.objects.create(name='PhD') cls.teacher1 = Teacher.objects.create(name='Mr Cleese') cls.teacher2 = Teacher.objects.create(name='Mr Idle') cls.teacher3 = Teacher.objects.create(name='Mr Chapman') cls.teacher1.qualifications.add(cls.qual1, cls.qual2, cls.qual3, cls.qual4) cls.teacher2.qualifications.add(cls.qual1) cls.teacher3.qualifications.add(cls.qual2) cls.dept1 = Department.objects.create(name='English') cls.dept2 = Department.objects.create(name='Physics') cls.dept1.teachers.add(cls.teacher1, cls.teacher2) cls.dept2.teachers.add(cls.teacher1, cls.teacher3) def test_m2m_then_m2m(self): with self.assertNumQueries(3): # When we prefetch the teachers, and force the query, we don't want # the default manager on teachers to immediately get all the related # qualifications, since this will do one query per teacher. qs = Department.objects.prefetch_related('teachers') depts = "".join("%s department: %s\n" % (dept.name, ", ".join(str(t) for t in dept.teachers.all())) for dept in qs) self.assertEqual(depts, "English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n" "Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n") class GenericRelationTests(TestCase): @classmethod def setUpTestData(cls): book1 = Book.objects.create(title="Winnie the Pooh") book2 = Book.objects.create(title="Do you like green eggs and spam?") book3 = Book.objects.create(title="Three Men In A Boat") reader1 = Reader.objects.create(name="me") reader2 = Reader.objects.create(name="you") reader3 = Reader.objects.create(name="someone") book1.read_by.add(reader1, reader2) book2.read_by.add(reader2) book3.read_by.add(reader3) cls.book1, cls.book2, cls.book3 = book1, book2, book3 cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3 def test_prefetch_GFK(self): TaggedItem.objects.create(tag="awesome", content_object=self.book1) TaggedItem.objects.create(tag="great", content_object=self.reader1) TaggedItem.objects.create(tag="outstanding", content_object=self.book2) TaggedItem.objects.create(tag="amazing", content_object=self.reader3) # 1 for TaggedItem table, 1 for Book table, 1 for Reader table with self.assertNumQueries(3): qs = TaggedItem.objects.prefetch_related('content_object') list(qs) def test_prefetch_GFK_nonint_pk(self): Comment.objects.create(comment="awesome", content_object=self.book1) # 1 for Comment table, 1 for Book table with self.assertNumQueries(2): qs = Comment.objects.prefetch_related('content_object') [c.content_object for c in qs] def test_prefetch_GFK_uuid_pk(self): article = Article.objects.create(name='Django') Comment.objects.create(comment='awesome', content_object_uuid=article) qs = Comment.objects.prefetch_related('content_object_uuid') self.assertEqual([c.content_object_uuid for c in qs], [article]) def test_prefetch_GFK_fk_pk(self): book = Book.objects.create(title='Poems') book_with_year = BookWithYear.objects.create(book=book, published_year=2019) Comment.objects.create(comment='awesome', content_object=book_with_year) qs = Comment.objects.prefetch_related('content_object') self.assertEqual([c.content_object for c in qs], [book_with_year]) def test_traverse_GFK(self): """ A 'content_object' can be traversed with prefetch_related() and get to related objects on the other side (assuming it is suitably filtered) """ TaggedItem.objects.create(tag="awesome", content_object=self.book1) TaggedItem.objects.create(tag="awesome", content_object=self.book2) TaggedItem.objects.create(tag="awesome", content_object=self.book3) TaggedItem.objects.create(tag="awesome", content_object=self.reader1) TaggedItem.objects.create(tag="awesome", content_object=self.reader2) ct = ContentType.objects.get_for_model(Book) # We get 3 queries - 1 for main query, 1 for content_objects since they # all use the same table, and 1 for the 'read_by' relation. with self.assertNumQueries(3): # If we limit to books, we know that they will have 'read_by' # attributes, so the following makes sense: qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by') readers_of_awesome_books = {r.name for tag in qs for r in tag.content_object.read_by.all()} self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"}) def test_nullable_GFK(self): TaggedItem.objects.create(tag="awesome", content_object=self.book1, created_by=self.reader1) TaggedItem.objects.create(tag="great", content_object=self.book2) TaggedItem.objects.create(tag="rubbish", content_object=self.book3) with self.assertNumQueries(2): result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')] self.assertEqual(result, [t.created_by for t in TaggedItem.objects.all()]) def test_generic_relation(self): bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/') TaggedItem.objects.create(content_object=bookmark, tag='django') TaggedItem.objects.create(content_object=bookmark, tag='python') with self.assertNumQueries(2): tags = [t.tag for b in Bookmark.objects.prefetch_related('tags') for t in b.tags.all()] self.assertEqual(sorted(tags), ["django", "python"]) def test_charfield_GFK(self): b = Bookmark.objects.create(url='http://www.djangoproject.com/') TaggedItem.objects.create(content_object=b, tag='django') TaggedItem.objects.create(content_object=b, favorite=b, tag='python') with self.assertNumQueries(3): bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0] self.assertEqual(sorted(i.tag for i in bookmark.tags.all()), ["django", "python"]) self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"]) def test_custom_queryset(self): bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/') django_tag = TaggedItem.objects.create(content_object=bookmark, tag='django') TaggedItem.objects.create(content_object=bookmark, tag='python') with self.assertNumQueries(2): bookmark = Bookmark.objects.prefetch_related( Prefetch('tags', TaggedItem.objects.filter(tag='django')), ).get() with self.assertNumQueries(0): self.assertEqual(list(bookmark.tags.all()), [django_tag]) # The custom queryset filters should be applied to the queryset # instance returned by the manager. self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all())) class MultiTableInheritanceTest(TestCase): @classmethod def setUpTestData(cls): cls.book1 = BookWithYear.objects.create(title='Poems', published_year=2010) cls.book2 = BookWithYear.objects.create(title='More poems', published_year=2011) cls.author1 = AuthorWithAge.objects.create(name='Jane', first_book=cls.book1, age=50) cls.author2 = AuthorWithAge.objects.create(name='Tom', first_book=cls.book1, age=49) cls.author3 = AuthorWithAge.objects.create(name='Robert', first_book=cls.book2, age=48) cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1') cls.book2.aged_authors.add(cls.author2, cls.author3) cls.br1 = BookReview.objects.create(book=cls.book1, notes='review book1') cls.br2 = BookReview.objects.create(book=cls.book2, notes='review book2') def test_foreignkey(self): with self.assertNumQueries(2): qs = AuthorWithAge.objects.prefetch_related('addresses') addresses = [[str(address) for address in obj.addresses.all()] for obj in qs] self.assertEqual(addresses, [[str(self.author_address)], [], []]) def test_foreignkey_to_inherited(self): with self.assertNumQueries(2): qs = BookReview.objects.prefetch_related('book') titles = [obj.book.title for obj in qs] self.assertEqual(titles, ["Poems", "More poems"]) def test_m2m_to_inheriting_model(self): qs = AuthorWithAge.objects.prefetch_related('books_with_year') with self.assertNumQueries(2): lst = [[str(book) for book in author.books_with_year.all()] for author in qs] qs = AuthorWithAge.objects.all() lst2 = [[str(book) for book in author.books_with_year.all()] for author in qs] self.assertEqual(lst, lst2) qs = BookWithYear.objects.prefetch_related('aged_authors') with self.assertNumQueries(2): lst = [[str(author) for author in book.aged_authors.all()] for book in qs] qs = BookWithYear.objects.all() lst2 = [[str(author) for author in book.aged_authors.all()] for book in qs] self.assertEqual(lst, lst2) def test_parent_link_prefetch(self): with self.assertNumQueries(2): [a.author for a in AuthorWithAge.objects.prefetch_related('author')] @override_settings(DEBUG=True) def test_child_link_prefetch(self): with self.assertNumQueries(2): authors = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')] # Regression for #18090: the prefetching query must include an IN clause. # Note that on Oracle the table name is upper case in the generated SQL, # thus the .lower() call. self.assertIn('authorwithage', connection.queries[-1]['sql'].lower()) self.assertIn(' IN ', connection.queries[-1]['sql']) self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()]) class ForeignKeyToFieldTest(TestCase): @classmethod def setUpTestData(cls): cls.book = Book.objects.create(title='Poems') cls.author1 = Author.objects.create(name='Jane', first_book=cls.book) cls.author2 = Author.objects.create(name='Tom', first_book=cls.book) cls.author3 = Author.objects.create(name='Robert', first_book=cls.book) cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1') FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2) FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3) FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1) def test_foreignkey(self): with self.assertNumQueries(2): qs = Author.objects.prefetch_related('addresses') addresses = [[str(address) for address in obj.addresses.all()] for obj in qs] self.assertEqual(addresses, [[str(self.author_address)], [], []]) def test_m2m(self): with self.assertNumQueries(3): qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me') favorites = [( [str(i_like) for i_like in author.favorite_authors.all()], [str(likes_me) for likes_me in author.favors_me.all()] ) for author in qs] self.assertEqual( favorites, [ ([str(self.author2)], [str(self.author3)]), ([str(self.author3)], [str(self.author1)]), ([str(self.author1)], [str(self.author2)]) ] ) class LookupOrderingTest(TestCase): """ Test cases that demonstrate that ordering of lookups is important, and ensure it is preserved. """ @classmethod def setUpTestData(cls): person1 = Person.objects.create(name='Joe') person2 = Person.objects.create(name='Mary') # Set main_room for each house before creating the next one for # databases where supports_nullable_unique_constraints is False. house1 = House.objects.create(address='123 Main St') room1_1 = Room.objects.create(name='Dining room', house=house1) Room.objects.create(name='Lounge', house=house1) Room.objects.create(name='Kitchen', house=house1) house1.main_room = room1_1 house1.save() person1.houses.add(house1) house2 = House.objects.create(address='45 Side St') room2_1 = Room.objects.create(name='Dining room', house=house2) Room.objects.create(name='Lounge', house=house2) house2.main_room = room2_1 house2.save() person1.houses.add(house2) house3 = House.objects.create(address='6 Downing St') room3_1 = Room.objects.create(name='Dining room', house=house3) Room.objects.create(name='Lounge', house=house3) Room.objects.create(name='Kitchen', house=house3) house3.main_room = room3_1 house3.save() person2.houses.add(house3) house4 = House.objects.create(address='7 Regents St') room4_1 = Room.objects.create(name='Dining room', house=house4) Room.objects.create(name='Lounge', house=house4) house4.main_room = room4_1 house4.save() person2.houses.add(house4) def test_order(self): with self.assertNumQueries(4): # The following two queries must be done in the same order as written, # otherwise 'primary_house' will cause non-prefetched lookups qs = Person.objects.prefetch_related('houses__rooms', 'primary_house__occupants') [list(p.primary_house.occupants.all()) for p in qs] class NullableTest(TestCase): @classmethod def setUpTestData(cls): boss = Employee.objects.create(name="Peter") Employee.objects.create(name="Joe", boss=boss) Employee.objects.create(name="Angela", boss=boss) def test_traverse_nullable(self): # Because we use select_related() for 'boss', it doesn't need to be # prefetched, but we can still traverse it although it contains some nulls with self.assertNumQueries(2): qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs') co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs] qs2 = Employee.objects.select_related('boss') co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2] self.assertEqual(co_serfs, co_serfs2) def test_prefetch_nullable(self): # One for main employee, one for boss, one for serfs with self.assertNumQueries(3): qs = Employee.objects.prefetch_related('boss__serfs') co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs] qs2 = Employee.objects.all() co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2] self.assertEqual(co_serfs, co_serfs2) def test_in_bulk(self): """ In-bulk does correctly prefetch objects by not using .iterator() directly. """ boss1 = Employee.objects.create(name="Peter") boss2 = Employee.objects.create(name="Jack") with self.assertNumQueries(2): # Prefetch is done and it does not cause any errors. bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk]) for b in bulk.values(): list(b.serfs.all()) class MultiDbTests(TestCase): databases = {'default', 'other'} def test_using_is_honored_m2m(self): B = Book.objects.using('other') A = Author.objects.using('other') book1 = B.create(title="Poems") book2 = B.create(title="Jane Eyre") book3 = B.create(title="Wuthering Heights") book4 = B.create(title="Sense and Sensibility") author1 = A.create(name="Charlotte", first_book=book1) author2 = A.create(name="Anne", first_book=book1) author3 = A.create(name="Emily", first_book=book1) author4 = A.create(name="Jane", first_book=book4) book1.authors.add(author1, author2, author3) book2.authors.add(author1) book3.authors.add(author3) book4.authors.add(author4) # Forward qs1 = B.prefetch_related('authors') with self.assertNumQueries(2, using='other'): books = "".join("%s (%s)\n" % (book.title, ", ".join(a.name for a in book.authors.all())) for book in qs1) self.assertEqual(books, "Poems (Charlotte, Anne, Emily)\n" "Jane Eyre (Charlotte)\n" "Wuthering Heights (Emily)\n" "Sense and Sensibility (Jane)\n") # Reverse qs2 = A.prefetch_related('books') with self.assertNumQueries(2, using='other'): authors = "".join("%s: %s\n" % (author.name, ", ".join(b.title for b in author.books.all())) for author in qs2) self.assertEqual(authors, "Charlotte: Poems, Jane Eyre\n" "Anne: Poems\n" "Emily: Poems, Wuthering Heights\n" "Jane: Sense and Sensibility\n") def test_using_is_honored_fkey(self): B = Book.objects.using('other') A = Author.objects.using('other') book1 = B.create(title="Poems") book2 = B.create(title="Sense and Sensibility") A.create(name="Charlotte Bronte", first_book=book1) A.create(name="Jane Austen", first_book=book2) # Forward with self.assertNumQueries(2, using='other'): books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book')) self.assertEqual("Poems, Sense and Sensibility", books) # Reverse with self.assertNumQueries(2, using='other'): books = "".join("%s (%s)\n" % (b.title, ", ".join(a.name for a in b.first_time_authors.all())) for b in B.prefetch_related('first_time_authors')) self.assertEqual(books, "Poems (Charlotte Bronte)\n" "Sense and Sensibility (Jane Austen)\n") def test_using_is_honored_inheritance(self): B = BookWithYear.objects.using('other') A = AuthorWithAge.objects.using('other') book1 = B.create(title="Poems", published_year=2010) B.create(title="More poems", published_year=2011) A.create(name='Jane', first_book=book1, age=50) A.create(name='Tom', first_book=book1, age=49) # parent link with self.assertNumQueries(2, using='other'): authors = ", ".join(a.author.name for a in A.prefetch_related('author')) self.assertEqual(authors, "Jane, Tom") # child link with self.assertNumQueries(2, using='other'): ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage')) self.assertEqual(ages, "50, 49") def test_using_is_honored_custom_qs(self): B = Book.objects.using('other') A = Author.objects.using('other') book1 = B.create(title="Poems") book2 = B.create(title="Sense and Sensibility") A.create(name="Charlotte Bronte", first_book=book1) A.create(name="Jane Austen", first_book=book2) # Implicit hinting with self.assertNumQueries(2, using='other'): prefetch = Prefetch('first_time_authors', queryset=Author.objects.all()) books = "".join("%s (%s)\n" % (b.title, ", ".join(a.name for a in b.first_time_authors.all())) for b in B.prefetch_related(prefetch)) self.assertEqual(books, "Poems (Charlotte Bronte)\n" "Sense and Sensibility (Jane Austen)\n") # Explicit using on the same db. with self.assertNumQueries(2, using='other'): prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other')) books = "".join("%s (%s)\n" % (b.title, ", ".join(a.name for a in b.first_time_authors.all())) for b in B.prefetch_related(prefetch)) self.assertEqual(books, "Poems (Charlotte Bronte)\n" "Sense and Sensibility (Jane Austen)\n") # Explicit using on a different db. with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'): prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default')) books = "".join("%s (%s)\n" % (b.title, ", ".join(a.name for a in b.first_time_authors.all())) for b in B.prefetch_related(prefetch)) self.assertEqual(books, "Poems ()\n" "Sense and Sensibility ()\n") class Ticket19607Tests(TestCase): @classmethod def setUpTestData(cls): LessonEntry.objects.bulk_create( LessonEntry(id=id_, name1=name1, name2=name2) for id_, name1, name2 in [ (1, 'einfach', 'simple'), (2, 'schwierig', 'difficult'), ] ) WordEntry.objects.bulk_create( WordEntry(id=id_, lesson_entry_id=lesson_entry_id, name=name) for id_, lesson_entry_id, name in [ (1, 1, 'einfach'), (2, 1, 'simple'), (3, 2, 'schwierig'), (4, 2, 'difficult'), ] ) def test_bug(self): list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set')) class Ticket21410Tests(TestCase): @classmethod def setUpTestData(cls): book1 = Book.objects.create(title='Poems') book2 = Book.objects.create(title='Jane Eyre') book3 = Book.objects.create(title='Wuthering Heights') book4 = Book.objects.create(title='Sense and Sensibility') author1 = Author2.objects.create(name='Charlotte', first_book=book1) author2 = Author2.objects.create(name='Anne', first_book=book1) author3 = Author2.objects.create(name='Emily', first_book=book1) author4 = Author2.objects.create(name='Jane', first_book=book4) author1.favorite_books.add(book1, book2, book3) author2.favorite_books.add(book1) author3.favorite_books.add(book2) author4.favorite_books.add(book3) def test_bug(self): list(Author2.objects.prefetch_related('first_book', 'favorite_books')) class Ticket21760Tests(TestCase): @classmethod def setUpTestData(cls): cls.rooms = [] for _ in range(3): house = House.objects.create() for _ in range(3): cls.rooms.append(Room.objects.create(house=house)) # Set main_room for each house before creating the next one for # databases where supports_nullable_unique_constraints is False. house.main_room = cls.rooms[-3] house.save() def test_bug(self): prefetcher = get_prefetcher(self.rooms[0], 'house', 'house')[0] queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0] self.assertNotIn(' JOIN ', str(queryset.query)) class DirectPrefetchedObjectCacheReuseTests(TestCase): """ prefetch_related() reuses objects fetched in _prefetched_objects_cache. When objects are prefetched and not stored as an instance attribute (often intermediary relationships), they are saved to the _prefetched_objects_cache attribute. prefetch_related() takes _prefetched_objects_cache into account when determining whether an object has been fetched[1] and retrieves results from it when it is populated [2]. [1]: #25546 (duplicate queries on nested Prefetch) [2]: #27554 (queryset evaluation fails with a mix of nested and flattened prefetches) """ @classmethod def setUpTestData(cls): cls.book1, cls.book2 = [ Book.objects.create(title='book1'), Book.objects.create(title='book2'), ] cls.author11, cls.author12, cls.author21 = [ Author.objects.create(first_book=cls.book1, name='Author11'), Author.objects.create(first_book=cls.book1, name='Author12'), Author.objects.create(first_book=cls.book2, name='Author21'), ] cls.author1_address1, cls.author1_address2, cls.author2_address1 = [ AuthorAddress.objects.create(author=cls.author11, address='Happy place'), AuthorAddress.objects.create(author=cls.author12, address='Haunted house'), AuthorAddress.objects.create(author=cls.author21, address='Happy place'), ] cls.bookwithyear1 = BookWithYear.objects.create(title='Poems', published_year=2010) cls.bookreview1 = BookReview.objects.create(book=cls.bookwithyear1) def test_detect_is_fetched(self): """ Nested prefetch_related() shouldn't trigger duplicate queries for the same lookup. """ with self.assertNumQueries(3): books = Book.objects.filter( title__in=['book1', 'book2'], ).prefetch_related( Prefetch( 'first_time_authors', Author.objects.prefetch_related( Prefetch( 'addresses', AuthorAddress.objects.filter(address='Happy place'), ) ), ), ) book1, book2 = list(books) with self.assertNumQueries(0): self.assertSequenceEqual(book1.first_time_authors.all(), [self.author11, self.author12]) self.assertSequenceEqual(book2.first_time_authors.all(), [self.author21]) self.assertSequenceEqual(book1.first_time_authors.all()[0].addresses.all(), [self.author1_address1]) self.assertSequenceEqual(book1.first_time_authors.all()[1].addresses.all(), []) self.assertSequenceEqual(book2.first_time_authors.all()[0].addresses.all(), [self.author2_address1]) self.assertEqual( list(book1.first_time_authors.all()), list(book1.first_time_authors.all().all()) ) self.assertEqual( list(book2.first_time_authors.all()), list(book2.first_time_authors.all().all()) ) self.assertEqual( list(book1.first_time_authors.all()[0].addresses.all()), list(book1.first_time_authors.all()[0].addresses.all().all()) ) self.assertEqual( list(book1.first_time_authors.all()[1].addresses.all()), list(book1.first_time_authors.all()[1].addresses.all().all()) ) self.assertEqual( list(book2.first_time_authors.all()[0].addresses.all()), list(book2.first_time_authors.all()[0].addresses.all().all()) ) def test_detect_is_fetched_with_to_attr(self): with self.assertNumQueries(3): books = Book.objects.filter( title__in=['book1', 'book2'], ).prefetch_related( Prefetch( 'first_time_authors', Author.objects.prefetch_related( Prefetch( 'addresses', AuthorAddress.objects.filter(address='Happy place'), to_attr='happy_place', ) ), to_attr='first_authors', ), ) book1, book2 = list(books) with self.assertNumQueries(0): self.assertEqual(book1.first_authors, [self.author11, self.author12]) self.assertEqual(book2.first_authors, [self.author21]) self.assertEqual(book1.first_authors[0].happy_place, [self.author1_address1]) self.assertEqual(book1.first_authors[1].happy_place, []) self.assertEqual(book2.first_authors[0].happy_place, [self.author2_address1]) def test_prefetch_reverse_foreign_key(self): with self.assertNumQueries(2): bookwithyear1, = BookWithYear.objects.prefetch_related('bookreview_set') with self.assertNumQueries(0): self.assertCountEqual(bookwithyear1.bookreview_set.all(), [self.bookreview1]) with self.assertNumQueries(0): prefetch_related_objects([bookwithyear1], 'bookreview_set') def test_add_clears_prefetched_objects(self): bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk) prefetch_related_objects([bookwithyear], 'bookreview_set') self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1]) new_review = BookReview.objects.create() bookwithyear.bookreview_set.add(new_review) self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1, new_review]) def test_remove_clears_prefetched_objects(self): bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk) prefetch_related_objects([bookwithyear], 'bookreview_set') self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1]) bookwithyear.bookreview_set.remove(self.bookreview1) self.assertCountEqual(bookwithyear.bookreview_set.all(), []) class ReadPrefetchedObjectsCacheTests(TestCase): @classmethod def setUpTestData(cls): cls.book1 = Book.objects.create(title='Les confessions Volume I') cls.book2 = Book.objects.create(title='Candide') cls.author1 = AuthorWithAge.objects.create(name='Rousseau', first_book=cls.book1, age=70) cls.author2 = AuthorWithAge.objects.create(name='Voltaire', first_book=cls.book2, age=65) cls.book1.authors.add(cls.author1) cls.book2.authors.add(cls.author2) FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2) def test_retrieves_results_from_prefetched_objects_cache(self): """ When intermediary results are prefetched without a destination attribute, they are saved in the RelatedManager's cache (_prefetched_objects_cache). prefetch_related() uses this cache (#27554). """ authors = AuthorWithAge.objects.prefetch_related( Prefetch( 'author', queryset=Author.objects.prefetch_related( # Results are saved in the RelatedManager's cache # (_prefetched_objects_cache) and do not replace the # RelatedManager on Author instances (favorite_authors) Prefetch('favorite_authors__first_book'), ), ), ) with self.assertNumQueries(4): # AuthorWithAge -> Author -> FavoriteAuthors, Book self.assertQuerysetEqual(authors, ['<AuthorWithAge: Rousseau>', '<AuthorWithAge: Voltaire>'])
2acb5903b88d72bb8773ab1803b234579b783c341ba91447754a0756e7a0b4cd
from unittest import mock, skipUnless from django.db import DatabaseError, connection from django.db.models import Index from django.test import TransactionTestCase, skipUnlessDBFeature from .models import ( Article, ArticleReporter, CheckConstraintModel, City, Comment, Country, District, Reporter, ) class IntrospectionTests(TransactionTestCase): available_apps = ['introspection'] def test_table_names(self): tl = connection.introspection.table_names() self.assertEqual(tl, sorted(tl)) self.assertIn(Reporter._meta.db_table, tl, "'%s' isn't in table_list()." % Reporter._meta.db_table) self.assertIn(Article._meta.db_table, tl, "'%s' isn't in table_list()." % Article._meta.db_table) def test_django_table_names(self): with connection.cursor() as cursor: cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);') tl = connection.introspection.django_table_names() cursor.execute("DROP TABLE django_ixn_test_table;") self.assertNotIn('django_ixn_test_table', tl, "django_table_names() returned a non-Django table") def test_django_table_names_retval_type(self): # Table name is a list #15216 tl = connection.introspection.django_table_names(only_existing=True) self.assertIs(type(tl), list) tl = connection.introspection.django_table_names(only_existing=False) self.assertIs(type(tl), list) def test_table_names_with_views(self): with connection.cursor() as cursor: try: cursor.execute( 'CREATE VIEW introspection_article_view AS SELECT headline ' 'from introspection_article;') except DatabaseError as e: if 'insufficient privileges' in str(e): self.fail("The test user has no CREATE VIEW privileges") else: raise try: self.assertIn('introspection_article_view', connection.introspection.table_names(include_views=True)) self.assertNotIn('introspection_article_view', connection.introspection.table_names()) finally: with connection.cursor() as cursor: cursor.execute('DROP VIEW introspection_article_view') def test_unmanaged_through_model(self): tables = connection.introspection.django_table_names() self.assertNotIn(ArticleReporter._meta.db_table, tables) def test_installed_models(self): tables = [Article._meta.db_table, Reporter._meta.db_table] models = connection.introspection.installed_models(tables) self.assertEqual(models, {Article, Reporter}) def test_sequence_list(self): sequences = connection.introspection.sequence_list() reporter_seqs = [seq for seq in sequences if seq['table'] == Reporter._meta.db_table] self.assertEqual(len(reporter_seqs), 1, 'Reporter sequence not found in sequence_list()') self.assertEqual(reporter_seqs[0]['column'], 'id') def test_get_table_description_names(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual([r[0] for r in desc], [f.column for f in Reporter._meta.fields]) def test_get_table_description_types(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [connection.introspection.get_field_type(r[1], r) for r in desc], [ connection.features.introspected_field_types[field] for field in ( 'AutoField', 'CharField', 'CharField', 'CharField', 'BigIntegerField', 'BinaryField', 'SmallIntegerField', 'DurationField', ) ], ) def test_get_table_description_col_lengths(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [r[3] for r in desc if connection.introspection.get_field_type(r[1], r) == 'CharField'], [30, 30, 254] ) def test_get_table_description_nullable(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) nullable_by_backend = connection.features.interprets_empty_strings_as_nulls self.assertEqual( [r[6] for r in desc], [False, nullable_by_backend, nullable_by_backend, nullable_by_backend, True, True, False, False] ) def test_bigautofield(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, City._meta.db_table) self.assertIn( connection.features.introspected_field_types['BigAutoField'], [connection.introspection.get_field_type(r[1], r) for r in desc], ) def test_smallautofield(self): with connection.cursor() as cursor: desc = connection.introspection.get_table_description(cursor, Country._meta.db_table) self.assertIn( connection.features.introspected_field_types['SmallAutoField'], [connection.introspection.get_field_type(r[1], r) for r in desc], ) # Regression test for #9991 - 'real' types in postgres @skipUnlessDBFeature('has_real_datatype') def test_postgresql_real_type(self): with connection.cursor() as cursor: cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);") desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table') cursor.execute('DROP TABLE django_ixn_real_test_table;') self.assertEqual(connection.introspection.get_field_type(desc[0][1], desc[0]), 'FloatField') @skipUnlessDBFeature('can_introspect_foreign_keys') def test_get_relations(self): with connection.cursor() as cursor: relations = connection.introspection.get_relations(cursor, Article._meta.db_table) # That's {field_name: (field_name_other_table, other_table)} expected_relations = { 'reporter_id': ('id', Reporter._meta.db_table), 'response_to_id': ('id', Article._meta.db_table), } self.assertEqual(relations, expected_relations) # Removing a field shouldn't disturb get_relations (#17785) body = Article._meta.get_field('body') with connection.schema_editor() as editor: editor.remove_field(Article, body) with connection.cursor() as cursor: relations = connection.introspection.get_relations(cursor, Article._meta.db_table) with connection.schema_editor() as editor: editor.add_field(Article, body) self.assertEqual(relations, expected_relations) @skipUnless(connection.vendor == 'sqlite', "This is an sqlite-specific issue") def test_get_relations_alt_format(self): """ With SQLite, foreign keys can be added with different syntaxes and formatting. """ create_table_statements = [ "CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY(art_id) REFERENCES {}(id));", "CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY (art_id) REFERENCES {}(id));" ] for statement in create_table_statements: with connection.cursor() as cursor: cursor.fetchone = mock.Mock(return_value=[statement.format(Article._meta.db_table), 'table']) relations = connection.introspection.get_relations(cursor, 'mocked_table') self.assertEqual(relations, {'art_id': ('id', Article._meta.db_table)}) @skipUnlessDBFeature('can_introspect_foreign_keys') def test_get_key_columns(self): with connection.cursor() as cursor: key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table) self.assertEqual(set(key_columns), { ('reporter_id', Reporter._meta.db_table, 'id'), ('response_to_id', Article._meta.db_table, 'id'), }) def test_get_primary_key_column(self): with connection.cursor() as cursor: primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table) pk_fk_column = connection.introspection.get_primary_key_column(cursor, District._meta.db_table) self.assertEqual(primary_key_column, 'id') self.assertEqual(pk_fk_column, 'city_id') def test_get_constraints_index_types(self): with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, Article._meta.db_table) index = {} index2 = {} for val in constraints.values(): if val['columns'] == ['headline', 'pub_date']: index = val if val['columns'] == ['headline', 'response_to_id', 'pub_date', 'reporter_id']: index2 = val self.assertEqual(index['type'], Index.suffix) self.assertEqual(index2['type'], Index.suffix) @skipUnlessDBFeature('supports_index_column_ordering') def test_get_constraints_indexes_orders(self): """ Indexes have the 'orders' key with a list of 'ASC'/'DESC' values. """ with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, Article._meta.db_table) indexes_verified = 0 expected_columns = [ ['headline', 'pub_date'], ['headline', 'response_to_id', 'pub_date', 'reporter_id'], ] if connection.features.indexes_foreign_keys: expected_columns += [ ['reporter_id'], ['response_to_id'], ] for val in constraints.values(): if val['index'] and not (val['primary_key'] or val['unique']): self.assertIn(val['columns'], expected_columns) self.assertEqual(val['orders'], ['ASC'] * len(val['columns'])) indexes_verified += 1 self.assertEqual(indexes_verified, len(expected_columns)) def test_get_constraints(self): def assertDetails(details, cols, primary_key=False, unique=False, index=False, check=False, foreign_key=None): # Different backends have different values for same constraints: # PRIMARY KEY UNIQUE CONSTRAINT UNIQUE INDEX # MySQL pk=1 uniq=1 idx=1 pk=0 uniq=1 idx=1 pk=0 uniq=1 idx=1 # PostgreSQL pk=1 uniq=1 idx=0 pk=0 uniq=1 idx=0 pk=0 uniq=1 idx=1 # SQLite pk=1 uniq=0 idx=0 pk=0 uniq=1 idx=0 pk=0 uniq=1 idx=1 if details['primary_key']: details['unique'] = True if details['unique']: details['index'] = False self.assertEqual(details['columns'], cols) self.assertEqual(details['primary_key'], primary_key) self.assertEqual(details['unique'], unique) self.assertEqual(details['index'], index) self.assertEqual(details['check'], check) self.assertEqual(details['foreign_key'], foreign_key) # Test custom constraints custom_constraints = { 'article_email_pub_date_uniq', 'email_pub_date_idx', } with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, Comment._meta.db_table) if ( connection.features.supports_column_check_constraints and connection.features.can_introspect_check_constraints ): constraints.update( connection.introspection.get_constraints(cursor, CheckConstraintModel._meta.db_table) ) custom_constraints.add('up_votes_gte_0_check') assertDetails(constraints['up_votes_gte_0_check'], ['up_votes'], check=True) assertDetails(constraints['article_email_pub_date_uniq'], ['article_id', 'email', 'pub_date'], unique=True) assertDetails(constraints['email_pub_date_idx'], ['email', 'pub_date'], index=True) # Test field constraints field_constraints = set() for name, details in constraints.items(): if name in custom_constraints: continue elif details['columns'] == ['up_votes'] and details['check']: assertDetails(details, ['up_votes'], check=True) field_constraints.add(name) elif details['columns'] == ['voting_number'] and details['check']: assertDetails(details, ['voting_number'], check=True) field_constraints.add(name) elif details['columns'] == ['ref'] and details['unique']: assertDetails(details, ['ref'], unique=True) field_constraints.add(name) elif details['columns'] == ['voting_number'] and details['unique']: assertDetails(details, ['voting_number'], unique=True) field_constraints.add(name) elif details['columns'] == ['article_id'] and details['index']: assertDetails(details, ['article_id'], index=True) field_constraints.add(name) elif details['columns'] == ['id'] and details['primary_key']: assertDetails(details, ['id'], primary_key=True, unique=True) field_constraints.add(name) elif details['columns'] == ['article_id'] and details['foreign_key']: assertDetails(details, ['article_id'], foreign_key=('introspection_article', 'id')) field_constraints.add(name) elif details['check']: # Some databases (e.g. Oracle) include additional check # constraints. field_constraints.add(name) # All constraints are accounted for. self.assertEqual(constraints.keys() ^ (custom_constraints | field_constraints), set())
1f394c247bdc29920b33d2a13afcba7ba87fe54c840c2670151c4389156eaae3
import datetime import decimal import gettext as gettext_module import os import pickle import re import tempfile from contextlib import contextmanager from importlib import import_module from pathlib import Path from unittest import mock from asgiref.local import Local from django import forms from django.apps import AppConfig from django.conf import settings from django.conf.locale import LANG_INFO from django.conf.urls.i18n import i18n_patterns from django.template import Context, Template from django.test import ( RequestFactory, SimpleTestCase, TestCase, override_settings, ) from django.utils import translation from django.utils.deprecation import RemovedInDjango40Warning from django.utils.formats import ( date_format, get_format, get_format_modules, iter_format_modules, localize, localize_input, reset_format_cache, sanitize_separators, time_format, ) from django.utils.numberformat import format as nformat from django.utils.safestring import SafeString, mark_safe from django.utils.translation import ( LANGUAGE_SESSION_KEY, activate, check_for_language, deactivate, get_language, get_language_bidi, get_language_from_request, get_language_info, gettext, gettext_lazy, ngettext, ngettext_lazy, npgettext, npgettext_lazy, pgettext, round_away_from_one, to_language, to_locale, trans_null, trans_real, ugettext, ugettext_lazy, ugettext_noop, ungettext, ungettext_lazy, ) from django.utils.translation.reloader import ( translation_file_changed, watch_for_translation_changes, ) from .forms import CompanyForm, I18nForm, SelectDateForm from .models import Company, TestModel here = os.path.dirname(os.path.abspath(__file__)) extended_locale_paths = settings.LOCALE_PATHS + [ os.path.join(here, 'other', 'locale'), ] class AppModuleStub: def __init__(self, **kwargs): self.__dict__.update(kwargs) @contextmanager def patch_formats(lang, **settings): from django.utils.formats import _format_cache # Populate _format_cache with temporary values for key, value in settings.items(): _format_cache[(key, lang)] = value try: yield finally: reset_format_cache() class TranslationTests(SimpleTestCase): @translation.override('de') def test_legacy_aliases(self): """ Pre-Django 2.0 aliases with u prefix are still available. """ msg = ( 'django.utils.translation.ugettext_noop() is deprecated in favor ' 'of django.utils.translation.gettext_noop().' ) with self.assertWarnsMessage(RemovedInDjango40Warning, msg): self.assertEqual(ugettext_noop("Image"), "Image") msg = ( 'django.utils.translation.ugettext() is deprecated in favor of ' 'django.utils.translation.gettext().' ) with self.assertWarnsMessage(RemovedInDjango40Warning, msg): self.assertEqual(ugettext("Image"), "Bild") msg = ( 'django.utils.translation.ugettext_lazy() is deprecated in favor ' 'of django.utils.translation.gettext_lazy().' ) with self.assertWarnsMessage(RemovedInDjango40Warning, msg): self.assertEqual(ugettext_lazy("Image"), gettext_lazy("Image")) msg = ( 'django.utils.translation.ungettext() is deprecated in favor of ' 'django.utils.translation.ngettext().' ) with self.assertWarnsMessage(RemovedInDjango40Warning, msg): self.assertEqual(ungettext("%d year", "%d years", 0) % 0, "0 Jahre") msg = ( 'django.utils.translation.ungettext_lazy() is deprecated in favor ' 'of django.utils.translation.ngettext_lazy().' ) with self.assertWarnsMessage(RemovedInDjango40Warning, msg): self.assertEqual( ungettext_lazy("%d year", "%d years", 0) % 0, ngettext_lazy("%d year", "%d years", 0) % 0, ) @translation.override('fr') def test_plural(self): """ Test plurals with ngettext. French differs from English in that 0 is singular. """ self.assertEqual(ngettext("%d year", "%d years", 0) % 0, "0 année") self.assertEqual(ngettext("%d year", "%d years", 2) % 2, "2 années") self.assertEqual(ngettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}, "0 octet") self.assertEqual(ngettext("%(size)d byte", "%(size)d bytes", 2) % {'size': 2}, "2 octets") def test_plural_null(self): g = trans_null.ngettext self.assertEqual(g('%d year', '%d years', 0) % 0, '0 years') self.assertEqual(g('%d year', '%d years', 1) % 1, '1 year') self.assertEqual(g('%d year', '%d years', 2) % 2, '2 years') @override_settings(LOCALE_PATHS=extended_locale_paths) @translation.override('fr') def test_multiple_plurals_per_language(self): """ Normally, French has 2 plurals. As other/locale/fr/LC_MESSAGES/django.po has a different plural equation with 3 plurals, this tests if those plural are honored. """ self.assertEqual(ngettext("%d singular", "%d plural", 0) % 0, "0 pluriel1") self.assertEqual(ngettext("%d singular", "%d plural", 1) % 1, "1 singulier") self.assertEqual(ngettext("%d singular", "%d plural", 2) % 2, "2 pluriel2") french = trans_real.catalog() # Internal _catalog can query subcatalogs (from different po files). self.assertEqual(french._catalog[('%d singular', 0)], '%d singulier') self.assertEqual(french._catalog[('%d hour', 0)], '%d heure') def test_override(self): activate('de') try: with translation.override('pl'): self.assertEqual(get_language(), 'pl') self.assertEqual(get_language(), 'de') with translation.override(None): self.assertIsNone(get_language()) with translation.override('pl'): pass self.assertIsNone(get_language()) self.assertEqual(get_language(), 'de') finally: deactivate() def test_override_decorator(self): @translation.override('pl') def func_pl(): self.assertEqual(get_language(), 'pl') @translation.override(None) def func_none(): self.assertIsNone(get_language()) try: activate('de') func_pl() self.assertEqual(get_language(), 'de') func_none() self.assertEqual(get_language(), 'de') finally: deactivate() def test_override_exit(self): """ The language restored is the one used when the function was called, not the one used when the decorator was initialized (#23381). """ activate('fr') @translation.override('pl') def func_pl(): pass deactivate() try: activate('en') func_pl() self.assertEqual(get_language(), 'en') finally: deactivate() def test_lazy_objects(self): """ Format string interpolation should work with *_lazy objects. """ s = gettext_lazy('Add %(name)s') d = {'name': 'Ringo'} self.assertEqual('Add Ringo', s % d) with translation.override('de', deactivate=True): self.assertEqual('Ringo hinzuf\xfcgen', s % d) with translation.override('pl'): self.assertEqual('Dodaj Ringo', s % d) # It should be possible to compare *_lazy objects. s1 = gettext_lazy('Add %(name)s') self.assertEqual(s, s1) s2 = gettext_lazy('Add %(name)s') s3 = gettext_lazy('Add %(name)s') self.assertEqual(s2, s3) self.assertEqual(s, s2) s4 = gettext_lazy('Some other string') self.assertNotEqual(s, s4) def test_lazy_pickle(self): s1 = gettext_lazy("test") self.assertEqual(str(s1), "test") s2 = pickle.loads(pickle.dumps(s1)) self.assertEqual(str(s2), "test") @override_settings(LOCALE_PATHS=extended_locale_paths) def test_ngettext_lazy(self): simple_with_format = ngettext_lazy('%d good result', '%d good results') simple_context_with_format = npgettext_lazy('Exclamation', '%d good result', '%d good results') simple_without_format = ngettext_lazy('good result', 'good results') with translation.override('de'): self.assertEqual(simple_with_format % 1, '1 gutes Resultat') self.assertEqual(simple_with_format % 4, '4 guten Resultate') self.assertEqual(simple_context_with_format % 1, '1 gutes Resultat!') self.assertEqual(simple_context_with_format % 4, '4 guten Resultate!') self.assertEqual(simple_without_format % 1, 'gutes Resultat') self.assertEqual(simple_without_format % 4, 'guten Resultate') complex_nonlazy = ngettext_lazy('Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4) complex_deferred = ngettext_lazy( 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num' ) complex_context_nonlazy = npgettext_lazy( 'Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4 ) complex_context_deferred = npgettext_lazy( 'Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num' ) with translation.override('de'): self.assertEqual(complex_nonlazy % {'num': 4, 'name': 'Jim'}, 'Hallo Jim, 4 guten Resultate') self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 1}, 'Hallo Jim, 1 gutes Resultat') self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 5}, 'Hallo Jim, 5 guten Resultate') with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'): complex_deferred % {'name': 'Jim'} self.assertEqual(complex_context_nonlazy % {'num': 4, 'name': 'Jim'}, 'Willkommen Jim, 4 guten Resultate') self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 1}, 'Willkommen Jim, 1 gutes Resultat') self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 5}, 'Willkommen Jim, 5 guten Resultate') with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'): complex_context_deferred % {'name': 'Jim'} @override_settings(LOCALE_PATHS=extended_locale_paths) def test_ngettext_lazy_format_style(self): simple_with_format = ngettext_lazy('{} good result', '{} good results') simple_context_with_format = npgettext_lazy('Exclamation', '{} good result', '{} good results') with translation.override('de'): self.assertEqual(simple_with_format.format(1), '1 gutes Resultat') self.assertEqual(simple_with_format.format(4), '4 guten Resultate') self.assertEqual(simple_context_with_format.format(1), '1 gutes Resultat!') self.assertEqual(simple_context_with_format.format(4), '4 guten Resultate!') complex_nonlazy = ngettext_lazy('Hi {name}, {num} good result', 'Hi {name}, {num} good results', 4) complex_deferred = ngettext_lazy( 'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 'num' ) complex_context_nonlazy = npgettext_lazy( 'Greeting', 'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 4 ) complex_context_deferred = npgettext_lazy( 'Greeting', 'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 'num' ) with translation.override('de'): self.assertEqual(complex_nonlazy.format(num=4, name='Jim'), 'Hallo Jim, 4 guten Resultate') self.assertEqual(complex_deferred.format(name='Jim', num=1), 'Hallo Jim, 1 gutes Resultat') self.assertEqual(complex_deferred.format(name='Jim', num=5), 'Hallo Jim, 5 guten Resultate') with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'): complex_deferred.format(name='Jim') self.assertEqual(complex_context_nonlazy.format(num=4, name='Jim'), 'Willkommen Jim, 4 guten Resultate') self.assertEqual(complex_context_deferred.format(name='Jim', num=1), 'Willkommen Jim, 1 gutes Resultat') self.assertEqual(complex_context_deferred.format(name='Jim', num=5), 'Willkommen Jim, 5 guten Resultate') with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'): complex_context_deferred.format(name='Jim') def test_ngettext_lazy_bool(self): self.assertTrue(ngettext_lazy('%d good result', '%d good results')) self.assertFalse(ngettext_lazy('', '')) def test_ngettext_lazy_pickle(self): s1 = ngettext_lazy('%d good result', '%d good results') self.assertEqual(s1 % 1, '1 good result') self.assertEqual(s1 % 8, '8 good results') s2 = pickle.loads(pickle.dumps(s1)) self.assertEqual(s2 % 1, '1 good result') self.assertEqual(s2 % 8, '8 good results') @override_settings(LOCALE_PATHS=extended_locale_paths) def test_pgettext(self): trans_real._active = Local() trans_real._translations = {} with translation.override('de'): self.assertEqual(pgettext("unexisting", "May"), "May") self.assertEqual(pgettext("month name", "May"), "Mai") self.assertEqual(pgettext("verb", "May"), "Kann") self.assertEqual(npgettext("search", "%d result", "%d results", 4) % 4, "4 Resultate") def test_empty_value(self): """Empty value must stay empty after being translated (#23196).""" with translation.override('de'): self.assertEqual('', gettext('')) s = mark_safe('') self.assertEqual(s, gettext(s)) @override_settings(LOCALE_PATHS=extended_locale_paths) def test_safe_status(self): """ Translating a string requiring no auto-escaping with gettext or pgettext shouldn't change the "safe" status. """ trans_real._active = Local() trans_real._translations = {} s1 = mark_safe('Password') s2 = mark_safe('May') with translation.override('de', deactivate=True): self.assertIs(type(gettext(s1)), SafeString) self.assertIs(type(pgettext('month name', s2)), SafeString) self.assertEqual('aPassword', SafeString('a') + s1) self.assertEqual('Passworda', s1 + SafeString('a')) self.assertEqual('Passworda', s1 + mark_safe('a')) self.assertEqual('aPassword', mark_safe('a') + s1) self.assertEqual('as', mark_safe('a') + mark_safe('s')) def test_maclines(self): """ Translations on files with Mac or DOS end of lines will be converted to unix EOF in .po catalogs. """ ca_translation = trans_real.translation('ca') ca_translation._catalog['Mac\nEOF\n'] = 'Catalan Mac\nEOF\n' ca_translation._catalog['Win\nEOF\n'] = 'Catalan Win\nEOF\n' with translation.override('ca', deactivate=True): self.assertEqual('Catalan Mac\nEOF\n', gettext('Mac\rEOF\r')) self.assertEqual('Catalan Win\nEOF\n', gettext('Win\r\nEOF\r\n')) def test_to_locale(self): tests = ( ('en', 'en'), ('EN', 'en'), ('en-us', 'en_US'), ('EN-US', 'en_US'), # With > 2 characters after the dash. ('sr-latn', 'sr_Latn'), ('sr-LATN', 'sr_Latn'), # With private use subtag (x-informal). ('nl-nl-x-informal', 'nl_NL-x-informal'), ('NL-NL-X-INFORMAL', 'nl_NL-x-informal'), ('sr-latn-x-informal', 'sr_Latn-x-informal'), ('SR-LATN-X-INFORMAL', 'sr_Latn-x-informal'), ) for lang, locale in tests: with self.subTest(lang=lang): self.assertEqual(to_locale(lang), locale) def test_to_language(self): self.assertEqual(to_language('en_US'), 'en-us') self.assertEqual(to_language('sr_Lat'), 'sr-lat') def test_language_bidi(self): self.assertIs(get_language_bidi(), False) with translation.override(None): self.assertIs(get_language_bidi(), False) def test_language_bidi_null(self): self.assertIs(trans_null.get_language_bidi(), False) with override_settings(LANGUAGE_CODE='he'): self.assertIs(get_language_bidi(), True) class TranslationLoadingTests(SimpleTestCase): def setUp(self): """Clear translation state.""" self._old_language = get_language() self._old_translations = trans_real._translations deactivate() trans_real._translations = {} def tearDown(self): trans_real._translations = self._old_translations activate(self._old_language) @override_settings( USE_I18N=True, LANGUAGE_CODE='en', LANGUAGES=[ ('en', 'English'), ('en-ca', 'English (Canada)'), ('en-nz', 'English (New Zealand)'), ('en-au', 'English (Australia)'), ], LOCALE_PATHS=[os.path.join(here, 'loading')], INSTALLED_APPS=['i18n.loading_app'], ) def test_translation_loading(self): """ "loading_app" does not have translations for all languages provided by "loading". Catalogs are merged correctly. """ tests = [ ('en', 'local country person'), ('en_AU', 'aussie'), ('en_NZ', 'kiwi'), ('en_CA', 'canuck'), ] # Load all relevant translations. for language, _ in tests: activate(language) # Catalogs are merged correctly. for language, nickname in tests: with self.subTest(language=language): activate(language) self.assertEqual(gettext('local country person'), nickname) class TranslationThreadSafetyTests(SimpleTestCase): def setUp(self): self._old_language = get_language() self._translations = trans_real._translations # here we rely on .split() being called inside the _fetch() # in trans_real.translation() class sideeffect_str(str): def split(self, *args, **kwargs): res = str.split(self, *args, **kwargs) trans_real._translations['en-YY'] = None return res trans_real._translations = {sideeffect_str('en-XX'): None} def tearDown(self): trans_real._translations = self._translations activate(self._old_language) def test_bug14894_translation_activate_thread_safety(self): translation_count = len(trans_real._translations) # May raise RuntimeError if translation.activate() isn't thread-safe. translation.activate('pl') # make sure sideeffect_str actually added a new translation self.assertLess(translation_count, len(trans_real._translations)) @override_settings(USE_L10N=True) class FormattingTests(SimpleTestCase): def setUp(self): super().setUp() self.n = decimal.Decimal('66666.666') self.f = 99999.999 self.d = datetime.date(2009, 12, 31) self.dt = datetime.datetime(2009, 12, 31, 20, 50) self.t = datetime.time(10, 15, 48) self.long = 10000 self.ctxt = Context({ 'n': self.n, 't': self.t, 'd': self.d, 'dt': self.dt, 'f': self.f, 'l': self.long, }) def test_all_format_strings(self): all_locales = LANG_INFO.keys() some_date = datetime.date(2017, 10, 14) some_datetime = datetime.datetime(2017, 10, 14, 10, 23) for locale in all_locales: with self.subTest(locale=locale), translation.override(locale): self.assertIn('2017', date_format(some_date)) # Uses DATE_FORMAT by default self.assertIn('23', time_format(some_datetime)) # Uses TIME_FORMAT by default self.assertIn('2017', date_format(some_datetime, format=get_format('DATETIME_FORMAT'))) self.assertIn('2017', date_format(some_date, format=get_format('YEAR_MONTH_FORMAT'))) self.assertIn('14', date_format(some_date, format=get_format('MONTH_DAY_FORMAT'))) self.assertIn('2017', date_format(some_date, format=get_format('SHORT_DATE_FORMAT'))) self.assertIn('2017', date_format(some_datetime, format=get_format('SHORT_DATETIME_FORMAT'))) def test_locale_independent(self): """ Localization of numbers """ with self.settings(USE_THOUSAND_SEPARATOR=False): self.assertEqual('66666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=',')) self.assertEqual('66666A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B')) self.assertEqual('66666', nformat(self.n, decimal_sep='X', decimal_pos=0, grouping=1, thousand_sep='Y')) with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual( '66,666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=',') ) self.assertEqual( '6B6B6B6B6A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B') ) self.assertEqual('-66666.6', nformat(-66666.666, decimal_sep='.', decimal_pos=1)) self.assertEqual('-66666.0', nformat(int('-66666'), decimal_sep='.', decimal_pos=1)) self.assertEqual('10000.0', nformat(self.long, decimal_sep='.', decimal_pos=1)) self.assertEqual( '10,00,00,000.00', nformat(100000000.00, decimal_sep='.', decimal_pos=2, grouping=(3, 2, 0), thousand_sep=',') ) self.assertEqual( '1,0,00,000,0000.00', nformat(10000000000.00, decimal_sep='.', decimal_pos=2, grouping=(4, 3, 2, 1, 0), thousand_sep=',') ) self.assertEqual( '10000,00,000.00', nformat(1000000000.00, decimal_sep='.', decimal_pos=2, grouping=(3, 2, -1), thousand_sep=',') ) # This unusual grouping/force_grouping combination may be triggered by the intcomma filter (#17414) self.assertEqual( '10000', nformat(self.long, decimal_sep='.', decimal_pos=0, grouping=0, force_grouping=True) ) # date filter self.assertEqual('31.12.2009 в 20:50', Template('{{ dt|date:"d.m.Y в H:i" }}').render(self.ctxt)) self.assertEqual('⌚ 10:15', Template('{{ t|time:"⌚ H:i" }}').render(self.ctxt)) @override_settings(USE_L10N=False) def test_l10n_disabled(self): """ Catalan locale with format i18n disabled translations will be used, but not formats """ with translation.override('ca', deactivate=True): self.maxDiff = 3000 self.assertEqual('N j, Y', get_format('DATE_FORMAT')) self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK')) self.assertEqual('.', get_format('DECIMAL_SEPARATOR')) self.assertEqual('10:15 a.m.', time_format(self.t)) self.assertEqual('des. 31, 2009', date_format(self.d)) self.assertEqual('desembre 2009', date_format(self.d, 'YEAR_MONTH_FORMAT')) self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT')) self.assertEqual('No localizable', localize('No localizable')) self.assertEqual('66666.666', localize(self.n)) self.assertEqual('99999.999', localize(self.f)) self.assertEqual('10000', localize(self.long)) self.assertEqual('des. 31, 2009', localize(self.d)) self.assertEqual('des. 31, 2009, 8:50 p.m.', localize(self.dt)) self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt)) self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt)) self.assertEqual('des. 31, 2009', Template('{{ d }}').render(self.ctxt)) self.assertEqual('des. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt)) self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt)) self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt)) self.assertEqual( '66666.67', Template('{{ n|floatformat:"2g" }}').render(self.ctxt), ) self.assertEqual( '100000.0', Template('{{ f|floatformat:"g" }}').render(self.ctxt), ) self.assertEqual('10:15 a.m.', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt)) self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt)) self.assertEqual( '12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt) ) form = I18nForm({ 'decimal_field': '66666,666', 'float_field': '99999,999', 'date_field': '31/12/2009', 'datetime_field': '31/12/2009 20:50', 'time_field': '20:50', 'integer_field': '1.234', }) self.assertFalse(form.is_valid()) self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['float_field']) self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['decimal_field']) self.assertEqual(['Introdu\xefu una data v\xe0lida.'], form.errors['date_field']) self.assertEqual(['Introdu\xefu una data/hora v\xe0lides.'], form.errors['datetime_field']) self.assertEqual(['Introdu\xefu un n\xfamero sencer.'], form.errors['integer_field']) form2 = SelectDateForm({ 'date_field_month': '12', 'date_field_day': '31', 'date_field_year': '2009' }) self.assertTrue(form2.is_valid()) self.assertEqual(datetime.date(2009, 12, 31), form2.cleaned_data['date_field']) self.assertHTMLEqual( '<select name="mydate_month" id="id_mydate_month">' '<option value="">---</option>' '<option value="1">gener</option>' '<option value="2">febrer</option>' '<option value="3">mar\xe7</option>' '<option value="4">abril</option>' '<option value="5">maig</option>' '<option value="6">juny</option>' '<option value="7">juliol</option>' '<option value="8">agost</option>' '<option value="9">setembre</option>' '<option value="10">octubre</option>' '<option value="11">novembre</option>' '<option value="12" selected>desembre</option>' '</select>' '<select name="mydate_day" id="id_mydate_day">' '<option value="">---</option>' '<option value="1">1</option>' '<option value="2">2</option>' '<option value="3">3</option>' '<option value="4">4</option>' '<option value="5">5</option>' '<option value="6">6</option>' '<option value="7">7</option>' '<option value="8">8</option>' '<option value="9">9</option>' '<option value="10">10</option>' '<option value="11">11</option>' '<option value="12">12</option>' '<option value="13">13</option>' '<option value="14">14</option>' '<option value="15">15</option>' '<option value="16">16</option>' '<option value="17">17</option>' '<option value="18">18</option>' '<option value="19">19</option>' '<option value="20">20</option>' '<option value="21">21</option>' '<option value="22">22</option>' '<option value="23">23</option>' '<option value="24">24</option>' '<option value="25">25</option>' '<option value="26">26</option>' '<option value="27">27</option>' '<option value="28">28</option>' '<option value="29">29</option>' '<option value="30">30</option>' '<option value="31" selected>31</option>' '</select>' '<select name="mydate_year" id="id_mydate_year">' '<option value="">---</option>' '<option value="2009" selected>2009</option>' '<option value="2010">2010</option>' '<option value="2011">2011</option>' '<option value="2012">2012</option>' '<option value="2013">2013</option>' '<option value="2014">2014</option>' '<option value="2015">2015</option>' '<option value="2016">2016</option>' '<option value="2017">2017</option>' '<option value="2018">2018</option>' '</select>', forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31)) ) # We shouldn't change the behavior of the floatformat filter re: # thousand separator and grouping when USE_L10N is False even # if the USE_THOUSAND_SEPARATOR, NUMBER_GROUPING and # THOUSAND_SEPARATOR settings are specified with self.settings(USE_THOUSAND_SEPARATOR=True, NUMBER_GROUPING=1, THOUSAND_SEPARATOR='!'): self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt)) self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt)) def test_false_like_locale_formats(self): """ The active locale's formats take precedence over the default settings even if they would be interpreted as False in a conditional test (e.g. 0 or empty string) (#16938). """ with translation.override('fr'): with self.settings(USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR='!'): self.assertEqual('\xa0', get_format('THOUSAND_SEPARATOR')) # Even a second time (after the format has been cached)... self.assertEqual('\xa0', get_format('THOUSAND_SEPARATOR')) with self.settings(FIRST_DAY_OF_WEEK=0): self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK')) # Even a second time (after the format has been cached)... self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK')) def test_l10n_enabled(self): self.maxDiff = 3000 # Catalan locale with translation.override('ca', deactivate=True): self.assertEqual(r'j \d\e F \d\e Y', get_format('DATE_FORMAT')) self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK')) self.assertEqual(',', get_format('DECIMAL_SEPARATOR')) self.assertEqual('10:15', time_format(self.t)) self.assertEqual('31 de desembre de 2009', date_format(self.d)) self.assertEqual('desembre del 2009', date_format(self.d, 'YEAR_MONTH_FORMAT')) self.assertEqual('31/12/2009 20:50', date_format(self.dt, 'SHORT_DATETIME_FORMAT')) self.assertEqual('No localizable', localize('No localizable')) with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual('66.666,666', localize(self.n)) self.assertEqual('99.999,999', localize(self.f)) self.assertEqual('10.000', localize(self.long)) self.assertEqual('True', localize(True)) with self.settings(USE_THOUSAND_SEPARATOR=False): self.assertEqual('66666,666', localize(self.n)) self.assertEqual('99999,999', localize(self.f)) self.assertEqual('10000', localize(self.long)) self.assertEqual('31 de desembre de 2009', localize(self.d)) self.assertEqual('31 de desembre de 2009 a les 20:50', localize(self.dt)) with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt)) self.assertEqual('99.999,999', Template('{{ f }}').render(self.ctxt)) self.assertEqual('10.000', Template('{{ l }}').render(self.ctxt)) with self.settings(USE_THOUSAND_SEPARATOR=True): form3 = I18nForm({ 'decimal_field': '66.666,666', 'float_field': '99.999,999', 'date_field': '31/12/2009', 'datetime_field': '31/12/2009 20:50', 'time_field': '20:50', 'integer_field': '1.234', }) self.assertTrue(form3.is_valid()) self.assertEqual(decimal.Decimal('66666.666'), form3.cleaned_data['decimal_field']) self.assertEqual(99999.999, form3.cleaned_data['float_field']) self.assertEqual(datetime.date(2009, 12, 31), form3.cleaned_data['date_field']) self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form3.cleaned_data['datetime_field']) self.assertEqual(datetime.time(20, 50), form3.cleaned_data['time_field']) self.assertEqual(1234, form3.cleaned_data['integer_field']) with self.settings(USE_THOUSAND_SEPARATOR=False): self.assertEqual('66666,666', Template('{{ n }}').render(self.ctxt)) self.assertEqual('99999,999', Template('{{ f }}').render(self.ctxt)) self.assertEqual('31 de desembre de 2009', Template('{{ d }}').render(self.ctxt)) self.assertEqual('31 de desembre de 2009 a les 20:50', Template('{{ dt }}').render(self.ctxt)) self.assertEqual('66666,67', Template('{{ n|floatformat:2 }}').render(self.ctxt)) self.assertEqual('100000,0', Template('{{ f|floatformat }}').render(self.ctxt)) self.assertEqual( '66.666,67', Template('{{ n|floatformat:"2g" }}').render(self.ctxt), ) self.assertEqual( '100.000,0', Template('{{ f|floatformat:"g" }}').render(self.ctxt), ) self.assertEqual('10:15', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt)) self.assertEqual('31/12/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt)) self.assertEqual( '31/12/2009 20:50', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt) ) self.assertEqual(date_format(datetime.datetime.now(), "DATE_FORMAT"), Template('{% now "DATE_FORMAT" %}').render(self.ctxt)) with self.settings(USE_THOUSAND_SEPARATOR=False): form4 = I18nForm({ 'decimal_field': '66666,666', 'float_field': '99999,999', 'date_field': '31/12/2009', 'datetime_field': '31/12/2009 20:50', 'time_field': '20:50', 'integer_field': '1234', }) self.assertTrue(form4.is_valid()) self.assertEqual(decimal.Decimal('66666.666'), form4.cleaned_data['decimal_field']) self.assertEqual(99999.999, form4.cleaned_data['float_field']) self.assertEqual(datetime.date(2009, 12, 31), form4.cleaned_data['date_field']) self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form4.cleaned_data['datetime_field']) self.assertEqual(datetime.time(20, 50), form4.cleaned_data['time_field']) self.assertEqual(1234, form4.cleaned_data['integer_field']) form5 = SelectDateForm({ 'date_field_month': '12', 'date_field_day': '31', 'date_field_year': '2009' }) self.assertTrue(form5.is_valid()) self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field']) self.assertHTMLEqual( '<select name="mydate_day" id="id_mydate_day">' '<option value="">---</option>' '<option value="1">1</option>' '<option value="2">2</option>' '<option value="3">3</option>' '<option value="4">4</option>' '<option value="5">5</option>' '<option value="6">6</option>' '<option value="7">7</option>' '<option value="8">8</option>' '<option value="9">9</option>' '<option value="10">10</option>' '<option value="11">11</option>' '<option value="12">12</option>' '<option value="13">13</option>' '<option value="14">14</option>' '<option value="15">15</option>' '<option value="16">16</option>' '<option value="17">17</option>' '<option value="18">18</option>' '<option value="19">19</option>' '<option value="20">20</option>' '<option value="21">21</option>' '<option value="22">22</option>' '<option value="23">23</option>' '<option value="24">24</option>' '<option value="25">25</option>' '<option value="26">26</option>' '<option value="27">27</option>' '<option value="28">28</option>' '<option value="29">29</option>' '<option value="30">30</option>' '<option value="31" selected>31</option>' '</select>' '<select name="mydate_month" id="id_mydate_month">' '<option value="">---</option>' '<option value="1">gener</option>' '<option value="2">febrer</option>' '<option value="3">mar\xe7</option>' '<option value="4">abril</option>' '<option value="5">maig</option>' '<option value="6">juny</option>' '<option value="7">juliol</option>' '<option value="8">agost</option>' '<option value="9">setembre</option>' '<option value="10">octubre</option>' '<option value="11">novembre</option>' '<option value="12" selected>desembre</option>' '</select>' '<select name="mydate_year" id="id_mydate_year">' '<option value="">---</option>' '<option value="2009" selected>2009</option>' '<option value="2010">2010</option>' '<option value="2011">2011</option>' '<option value="2012">2012</option>' '<option value="2013">2013</option>' '<option value="2014">2014</option>' '<option value="2015">2015</option>' '<option value="2016">2016</option>' '<option value="2017">2017</option>' '<option value="2018">2018</option>' '</select>', forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31)) ) # Russian locale (with E as month) with translation.override('ru', deactivate=True): self.assertHTMLEqual( '<select name="mydate_day" id="id_mydate_day">' '<option value="">---</option>' '<option value="1">1</option>' '<option value="2">2</option>' '<option value="3">3</option>' '<option value="4">4</option>' '<option value="5">5</option>' '<option value="6">6</option>' '<option value="7">7</option>' '<option value="8">8</option>' '<option value="9">9</option>' '<option value="10">10</option>' '<option value="11">11</option>' '<option value="12">12</option>' '<option value="13">13</option>' '<option value="14">14</option>' '<option value="15">15</option>' '<option value="16">16</option>' '<option value="17">17</option>' '<option value="18">18</option>' '<option value="19">19</option>' '<option value="20">20</option>' '<option value="21">21</option>' '<option value="22">22</option>' '<option value="23">23</option>' '<option value="24">24</option>' '<option value="25">25</option>' '<option value="26">26</option>' '<option value="27">27</option>' '<option value="28">28</option>' '<option value="29">29</option>' '<option value="30">30</option>' '<option value="31" selected>31</option>' '</select>' '<select name="mydate_month" id="id_mydate_month">' '<option value="">---</option>' '<option value="1">\u042f\u043d\u0432\u0430\u0440\u044c</option>' '<option value="2">\u0424\u0435\u0432\u0440\u0430\u043b\u044c</option>' '<option value="3">\u041c\u0430\u0440\u0442</option>' '<option value="4">\u0410\u043f\u0440\u0435\u043b\u044c</option>' '<option value="5">\u041c\u0430\u0439</option>' '<option value="6">\u0418\u044e\u043d\u044c</option>' '<option value="7">\u0418\u044e\u043b\u044c</option>' '<option value="8">\u0410\u0432\u0433\u0443\u0441\u0442</option>' '<option value="9">\u0421\u0435\u043d\u0442\u044f\u0431\u0440\u044c</option>' '<option value="10">\u041e\u043a\u0442\u044f\u0431\u0440\u044c</option>' '<option value="11">\u041d\u043e\u044f\u0431\u0440\u044c</option>' '<option value="12" selected>\u0414\u0435\u043a\u0430\u0431\u0440\u044c</option>' '</select>' '<select name="mydate_year" id="id_mydate_year">' '<option value="">---</option>' '<option value="2009" selected>2009</option>' '<option value="2010">2010</option>' '<option value="2011">2011</option>' '<option value="2012">2012</option>' '<option value="2013">2013</option>' '<option value="2014">2014</option>' '<option value="2015">2015</option>' '<option value="2016">2016</option>' '<option value="2017">2017</option>' '<option value="2018">2018</option>' '</select>', forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31)) ) # English locale with translation.override('en', deactivate=True): self.assertEqual('N j, Y', get_format('DATE_FORMAT')) self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK')) self.assertEqual('.', get_format('DECIMAL_SEPARATOR')) self.assertEqual('Dec. 31, 2009', date_format(self.d)) self.assertEqual('December 2009', date_format(self.d, 'YEAR_MONTH_FORMAT')) self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT')) self.assertEqual('No localizable', localize('No localizable')) with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual('66,666.666', localize(self.n)) self.assertEqual('99,999.999', localize(self.f)) self.assertEqual('10,000', localize(self.long)) with self.settings(USE_THOUSAND_SEPARATOR=False): self.assertEqual('66666.666', localize(self.n)) self.assertEqual('99999.999', localize(self.f)) self.assertEqual('10000', localize(self.long)) self.assertEqual('Dec. 31, 2009', localize(self.d)) self.assertEqual('Dec. 31, 2009, 8:50 p.m.', localize(self.dt)) with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual('66,666.666', Template('{{ n }}').render(self.ctxt)) self.assertEqual('99,999.999', Template('{{ f }}').render(self.ctxt)) self.assertEqual('10,000', Template('{{ l }}').render(self.ctxt)) with self.settings(USE_THOUSAND_SEPARATOR=False): self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt)) self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt)) self.assertEqual('Dec. 31, 2009', Template('{{ d }}').render(self.ctxt)) self.assertEqual('Dec. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt)) self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt)) self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt)) self.assertEqual( '66,666.67', Template('{{ n|floatformat:"2g" }}').render(self.ctxt), ) self.assertEqual( '100,000.0', Template('{{ f|floatformat:"g" }}').render(self.ctxt), ) self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt)) self.assertEqual( '12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt) ) form5 = I18nForm({ 'decimal_field': '66666.666', 'float_field': '99999.999', 'date_field': '12/31/2009', 'datetime_field': '12/31/2009 20:50', 'time_field': '20:50', 'integer_field': '1234', }) self.assertTrue(form5.is_valid()) self.assertEqual(decimal.Decimal('66666.666'), form5.cleaned_data['decimal_field']) self.assertEqual(99999.999, form5.cleaned_data['float_field']) self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field']) self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form5.cleaned_data['datetime_field']) self.assertEqual(datetime.time(20, 50), form5.cleaned_data['time_field']) self.assertEqual(1234, form5.cleaned_data['integer_field']) form6 = SelectDateForm({ 'date_field_month': '12', 'date_field_day': '31', 'date_field_year': '2009' }) self.assertTrue(form6.is_valid()) self.assertEqual(datetime.date(2009, 12, 31), form6.cleaned_data['date_field']) self.assertHTMLEqual( '<select name="mydate_month" id="id_mydate_month">' '<option value="">---</option>' '<option value="1">January</option>' '<option value="2">February</option>' '<option value="3">March</option>' '<option value="4">April</option>' '<option value="5">May</option>' '<option value="6">June</option>' '<option value="7">July</option>' '<option value="8">August</option>' '<option value="9">September</option>' '<option value="10">October</option>' '<option value="11">November</option>' '<option value="12" selected>December</option>' '</select>' '<select name="mydate_day" id="id_mydate_day">' '<option value="">---</option>' '<option value="1">1</option>' '<option value="2">2</option>' '<option value="3">3</option>' '<option value="4">4</option>' '<option value="5">5</option>' '<option value="6">6</option>' '<option value="7">7</option>' '<option value="8">8</option>' '<option value="9">9</option>' '<option value="10">10</option>' '<option value="11">11</option>' '<option value="12">12</option>' '<option value="13">13</option>' '<option value="14">14</option>' '<option value="15">15</option>' '<option value="16">16</option>' '<option value="17">17</option>' '<option value="18">18</option>' '<option value="19">19</option>' '<option value="20">20</option>' '<option value="21">21</option>' '<option value="22">22</option>' '<option value="23">23</option>' '<option value="24">24</option>' '<option value="25">25</option>' '<option value="26">26</option>' '<option value="27">27</option>' '<option value="28">28</option>' '<option value="29">29</option>' '<option value="30">30</option>' '<option value="31" selected>31</option>' '</select>' '<select name="mydate_year" id="id_mydate_year">' '<option value="">---</option>' '<option value="2009" selected>2009</option>' '<option value="2010">2010</option>' '<option value="2011">2011</option>' '<option value="2012">2012</option>' '<option value="2013">2013</option>' '<option value="2014">2014</option>' '<option value="2015">2015</option>' '<option value="2016">2016</option>' '<option value="2017">2017</option>' '<option value="2018">2018</option>' '</select>', forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31)) ) def test_sub_locales(self): """ Check if sublocales fall back to the main locale """ with self.settings(USE_THOUSAND_SEPARATOR=True): with translation.override('de-at', deactivate=True): self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt)) with translation.override('es-us', deactivate=True): self.assertEqual('31 de Diciembre de 2009', date_format(self.d)) def test_localized_input(self): """ Tests if form input is correctly localized """ self.maxDiff = 1200 with translation.override('de-at', deactivate=True): form6 = CompanyForm({ 'name': 'acme', 'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0), 'cents_paid': decimal.Decimal('59.47'), 'products_delivered': 12000, }) self.assertTrue(form6.is_valid()) self.assertHTMLEqual( form6.as_ul(), '<li><label for="id_name">Name:</label>' '<input id="id_name" type="text" name="name" value="acme" maxlength="50" required></li>' '<li><label for="id_date_added">Date added:</label>' '<input type="text" name="date_added" value="31.12.2009 06:00:00" id="id_date_added" required></li>' '<li><label for="id_cents_paid">Cents paid:</label>' '<input type="text" name="cents_paid" value="59,47" id="id_cents_paid" required></li>' '<li><label for="id_products_delivered">Products delivered:</label>' '<input type="text" name="products_delivered" value="12000" id="id_products_delivered" required>' '</li>' ) self.assertEqual(localize_input(datetime.datetime(2009, 12, 31, 6, 0, 0)), '31.12.2009 06:00:00') self.assertEqual(datetime.datetime(2009, 12, 31, 6, 0, 0), form6.cleaned_data['date_added']) with self.settings(USE_THOUSAND_SEPARATOR=True): # Checking for the localized "products_delivered" field self.assertInHTML( '<input type="text" name="products_delivered" ' 'value="12.000" id="id_products_delivered" required>', form6.as_ul() ) def test_localized_input_func(self): tests = ( (True, 'True'), (datetime.date(1, 1, 1), '0001-01-01'), (datetime.datetime(1, 1, 1), '0001-01-01 00:00:00'), ) with self.settings(USE_THOUSAND_SEPARATOR=True): for value, expected in tests: with self.subTest(value=value): self.assertEqual(localize_input(value), expected) def test_sanitize_separators(self): """ Tests django.utils.formats.sanitize_separators. """ # Non-strings are untouched self.assertEqual(sanitize_separators(123), 123) with translation.override('ru', deactivate=True): # Russian locale has non-breaking space (\xa0) as thousand separator # Usual space is accepted too when sanitizing inputs with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual(sanitize_separators('1\xa0234\xa0567'), '1234567') self.assertEqual(sanitize_separators('77\xa0777,777'), '77777.777') self.assertEqual(sanitize_separators('12 345'), '12345') self.assertEqual(sanitize_separators('77 777,777'), '77777.777') with self.settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=False): self.assertEqual(sanitize_separators('12\xa0345'), '12\xa0345') with self.settings(USE_THOUSAND_SEPARATOR=True): with patch_formats(get_language(), THOUSAND_SEPARATOR='.', DECIMAL_SEPARATOR=','): self.assertEqual(sanitize_separators('10.234'), '10234') # Suspicion that user entered dot as decimal separator (#22171) self.assertEqual(sanitize_separators('10.10'), '10.10') with self.settings(USE_L10N=False, DECIMAL_SEPARATOR=','): self.assertEqual(sanitize_separators('1001,10'), '1001.10') self.assertEqual(sanitize_separators('1001.10'), '1001.10') with self.settings( USE_L10N=False, DECIMAL_SEPARATOR=',', USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR='.' ): self.assertEqual(sanitize_separators('1.001,10'), '1001.10') self.assertEqual(sanitize_separators('1001,10'), '1001.10') self.assertEqual(sanitize_separators('1001.10'), '1001.10') self.assertEqual(sanitize_separators('1,001.10'), '1.001.10') # Invalid output def test_iter_format_modules(self): """ Tests the iter_format_modules function. """ # Importing some format modules so that we can compare the returned # modules with these expected modules default_mod = import_module('django.conf.locale.de.formats') test_mod = import_module('i18n.other.locale.de.formats') test_mod2 = import_module('i18n.other2.locale.de.formats') with translation.override('de-at', deactivate=True): # Should return the correct default module when no setting is set self.assertEqual(list(iter_format_modules('de')), [default_mod]) # When the setting is a string, should return the given module and # the default module self.assertEqual( list(iter_format_modules('de', 'i18n.other.locale')), [test_mod, default_mod]) # When setting is a list of strings, should return the given # modules and the default module self.assertEqual( list(iter_format_modules('de', ['i18n.other.locale', 'i18n.other2.locale'])), [test_mod, test_mod2, default_mod]) def test_iter_format_modules_stability(self): """ Tests the iter_format_modules function always yields format modules in a stable and correct order in presence of both base ll and ll_CC formats. """ en_format_mod = import_module('django.conf.locale.en.formats') en_gb_format_mod = import_module('django.conf.locale.en_GB.formats') self.assertEqual(list(iter_format_modules('en-gb')), [en_gb_format_mod, en_format_mod]) def test_get_format_modules_lang(self): with translation.override('de', deactivate=True): self.assertEqual('.', get_format('DECIMAL_SEPARATOR', lang='en')) def test_get_format_modules_stability(self): with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'): with translation.override('de', deactivate=True): old = "%r" % get_format_modules(reverse=True) new = "%r" % get_format_modules(reverse=True) # second try self.assertEqual(new, old, 'Value returned by get_formats_modules() must be preserved between calls.') def test_localize_templatetag_and_filter(self): """ Test the {% localize %} templatetag and the localize/unlocalize filters. """ context = Context({'int': 1455, 'float': 3.14, 'date': datetime.date(2016, 12, 31)}) template1 = Template( '{% load l10n %}{% localize %}{{ int }}/{{ float }}/{{ date }}{% endlocalize %}; ' '{% localize on %}{{ int }}/{{ float }}/{{ date }}{% endlocalize %}' ) template2 = Template( '{% load l10n %}{{ int }}/{{ float }}/{{ date }}; ' '{% localize off %}{{ int }}/{{ float }}/{{ date }};{% endlocalize %} ' '{{ int }}/{{ float }}/{{ date }}' ) template3 = Template( '{% load l10n %}{{ int }}/{{ float }}/{{ date }}; ' '{{ int|unlocalize }}/{{ float|unlocalize }}/{{ date|unlocalize }}' ) template4 = Template( '{% load l10n %}{{ int }}/{{ float }}/{{ date }}; ' '{{ int|localize }}/{{ float|localize }}/{{ date|localize }}' ) expected_localized = '1.455/3,14/31. Dezember 2016' expected_unlocalized = '1455/3.14/Dez. 31, 2016' output1 = '; '.join([expected_localized, expected_localized]) output2 = '; '.join([expected_localized, expected_unlocalized, expected_localized]) output3 = '; '.join([expected_localized, expected_unlocalized]) output4 = '; '.join([expected_unlocalized, expected_localized]) with translation.override('de', deactivate=True): with self.settings(USE_L10N=False, USE_THOUSAND_SEPARATOR=True): self.assertEqual(template1.render(context), output1) self.assertEqual(template4.render(context), output4) with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True): self.assertEqual(template1.render(context), output1) self.assertEqual(template2.render(context), output2) self.assertEqual(template3.render(context), output3) def test_localized_off_numbers(self): """A string representation is returned for unlocalized numbers.""" template = Template( '{% load l10n %}{% localize off %}' '{{ int }}/{{ float }}/{{ decimal }}{% endlocalize %}' ) context = Context( {'int': 1455, 'float': 3.14, 'decimal': decimal.Decimal('24.1567')} ) for use_l10n in [True, False]: with self.subTest(use_l10n=use_l10n), self.settings( USE_L10N=use_l10n, DECIMAL_SEPARATOR=',', USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR='°', NUMBER_GROUPING=2, ): self.assertEqual(template.render(context), '1455/3.14/24.1567') def test_localized_as_text_as_hidden_input(self): """ Tests if form input with 'as_hidden' or 'as_text' is correctly localized. Ticket #18777 """ self.maxDiff = 1200 with translation.override('de-at', deactivate=True): template = Template('{% load l10n %}{{ form.date_added }}; {{ form.cents_paid }}') template_as_text = Template('{% load l10n %}{{ form.date_added.as_text }}; {{ form.cents_paid.as_text }}') template_as_hidden = Template( '{% load l10n %}{{ form.date_added.as_hidden }}; {{ form.cents_paid.as_hidden }}' ) form = CompanyForm({ 'name': 'acme', 'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0), 'cents_paid': decimal.Decimal('59.47'), 'products_delivered': 12000, }) context = Context({'form': form}) self.assertTrue(form.is_valid()) self.assertHTMLEqual( template.render(context), '<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" required>;' '<input id="id_cents_paid" name="cents_paid" type="text" value="59,47" required>' ) self.assertHTMLEqual( template_as_text.render(context), '<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" required>;' ' <input id="id_cents_paid" name="cents_paid" type="text" value="59,47" required>' ) self.assertHTMLEqual( template_as_hidden.render(context), '<input id="id_date_added" name="date_added" type="hidden" value="31.12.2009 06:00:00">;' '<input id="id_cents_paid" name="cents_paid" type="hidden" value="59,47">' ) def test_format_arbitrary_settings(self): self.assertEqual(get_format('DEBUG'), 'DEBUG') def test_get_custom_format(self): with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'): with translation.override('fr', deactivate=True): self.assertEqual('d/m/Y CUSTOM', get_format('CUSTOM_DAY_FORMAT')) def test_admin_javascript_supported_input_formats(self): """ The first input format for DATE_INPUT_FORMATS, TIME_INPUT_FORMATS, and DATETIME_INPUT_FORMATS must not contain %f since that's unsupported by the admin's time picker widget. """ regex = re.compile('%([^BcdHImMpSwxXyY%])') for language_code, language_name in settings.LANGUAGES: for format_name in ('DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'): with self.subTest(language=language_code, format=format_name): formatter = get_format(format_name, lang=language_code)[0] self.assertEqual( regex.findall(formatter), [], "%s locale's %s uses an unsupported format code." % (language_code, format_name) ) class MiscTests(SimpleTestCase): rf = RequestFactory() @override_settings(LANGUAGE_CODE='de') def test_english_fallback(self): """ With a non-English LANGUAGE_CODE and if the active language is English or one of its variants, the untranslated string should be returned (instead of falling back to LANGUAGE_CODE) (See #24413). """ self.assertEqual(gettext("Image"), "Bild") with translation.override('en'): self.assertEqual(gettext("Image"), "Image") with translation.override('en-us'): self.assertEqual(gettext("Image"), "Image") with translation.override('en-ca'): self.assertEqual(gettext("Image"), "Image") def test_parse_spec_http_header(self): """ Testing HTTP header parsing. First, we test that we can parse the values according to the spec (and that we extract all the pieces in the right order). """ tests = [ # Good headers ('de', [('de', 1.0)]), ('en-AU', [('en-au', 1.0)]), ('es-419', [('es-419', 1.0)]), ('*;q=1.00', [('*', 1.0)]), ('en-AU;q=0.123', [('en-au', 0.123)]), ('en-au;q=0.5', [('en-au', 0.5)]), ('en-au;q=1.0', [('en-au', 1.0)]), ('da, en-gb;q=0.25, en;q=0.5', [('da', 1.0), ('en', 0.5), ('en-gb', 0.25)]), ('en-au-xx', [('en-au-xx', 1.0)]), ('de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125', [('de', 1.0), ('en-au', 0.75), ('en-us', 0.5), ('en', 0.25), ('es', 0.125), ('fa', 0.125)]), ('*', [('*', 1.0)]), ('de;q=0.', [('de', 0.0)]), ('en; q=1,', [('en', 1.0)]), ('en; q=1.0, * ; q=0.5', [('en', 1.0), ('*', 0.5)]), # Bad headers ('en-gb;q=1.0000', []), ('en;q=0.1234', []), ('en;q=.2', []), ('abcdefghi-au', []), ('**', []), ('en,,gb', []), ('en-au;q=0.1.0', []), (('X' * 97) + 'Z,en', []), ('da, en-gb;q=0.8, en;q=0.7,#', []), ('de;q=2.0', []), ('de;q=0.a', []), ('12-345', []), ('', []), ('en;q=1e0', []), ] for value, expected in tests: with self.subTest(value=value): self.assertEqual(trans_real.parse_accept_lang_header(value), tuple(expected)) def test_parse_literal_http_header(self): """ Now test that we parse a literal HTTP header correctly. """ g = get_language_from_request r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'} self.assertEqual('pt-br', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt'} self.assertEqual('pt', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'es,de'} self.assertEqual('es', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-ar,de'} self.assertEqual('es-ar', g(r)) # This test assumes there won't be a Django translation to a US # variation of the Spanish language, a safe assumption. When the # user sets it as the preferred language, the main 'es' # translation should be selected instead. r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-us'} self.assertEqual(g(r), 'es') # This tests the following scenario: there isn't a main language (zh) # translation of Django but there is a translation to variation (zh-hans) # the user sets zh-hans as the preferred language, it should be selected # by Django without falling back nor ignoring it. r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans,de'} self.assertEqual(g(r), 'zh-hans') r.META = {'HTTP_ACCEPT_LANGUAGE': 'NL'} self.assertEqual('nl', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'fy'} self.assertEqual('fy', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'ia'} self.assertEqual('ia', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'sr-latn'} self.assertEqual('sr-latn', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans'} self.assertEqual('zh-hans', g(r)) r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hant'} self.assertEqual('zh-hant', g(r)) @override_settings( LANGUAGES=[ ('en', 'English'), ('zh-hans', 'Simplified Chinese'), ('zh-hant', 'Traditional Chinese'), ] ) def test_support_for_deprecated_chinese_language_codes(self): """ Some browsers (Firefox, IE, etc.) use deprecated language codes. As these language codes will be removed in Django 1.9, these will be incorrectly matched. For example zh-tw (traditional) will be interpreted as zh-hans (simplified), which is wrong. So we should also accept these deprecated language codes. refs #18419 -- this is explicitly for browser compatibility """ g = get_language_from_request r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-cn,en'} self.assertEqual(g(r), 'zh-hans') r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-tw,en'} self.assertEqual(g(r), 'zh-hant') def test_special_fallback_language(self): """ Some languages may have special fallbacks that don't follow the simple 'fr-ca' -> 'fr' logic (notably Chinese codes). """ r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-my,en'} self.assertEqual(get_language_from_request(r), 'zh-hans') def test_parse_language_cookie(self): """ Now test that we parse language preferences stored in a cookie correctly. """ g = get_language_from_request r = self.rf.get('/') r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt-br'} r.META = {} self.assertEqual('pt-br', g(r)) r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt'} r.META = {} self.assertEqual('pt', g(r)) r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es'} r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'} self.assertEqual('es', g(r)) # This test assumes there won't be a Django translation to a US # variation of the Spanish language, a safe assumption. When the # user sets it as the preferred language, the main 'es' # translation should be selected instead. r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es-us'} r.META = {} self.assertEqual(g(r), 'es') # This tests the following scenario: there isn't a main language (zh) # translation of Django but there is a translation to variation (zh-hans) # the user sets zh-hans as the preferred language, it should be selected # by Django without falling back nor ignoring it. r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'zh-hans'} r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'} self.assertEqual(g(r), 'zh-hans') @override_settings( USE_I18N=True, LANGUAGES=[ ('en', 'English'), ('de', 'German'), ('de-at', 'Austrian German'), ('pt-br', 'Portuguese (Brazil)'), ], ) def test_get_supported_language_variant_real(self): g = trans_real.get_supported_language_variant self.assertEqual(g('en'), 'en') self.assertEqual(g('en-gb'), 'en') self.assertEqual(g('de'), 'de') self.assertEqual(g('de-at'), 'de-at') self.assertEqual(g('de-ch'), 'de') self.assertEqual(g('pt-br'), 'pt-br') self.assertEqual(g('pt'), 'pt-br') self.assertEqual(g('pt-pt'), 'pt-br') with self.assertRaises(LookupError): g('pt', strict=True) with self.assertRaises(LookupError): g('pt-pt', strict=True) with self.assertRaises(LookupError): g('xyz') with self.assertRaises(LookupError): g('xy-zz') def test_get_supported_language_variant_null(self): g = trans_null.get_supported_language_variant self.assertEqual(g(settings.LANGUAGE_CODE), settings.LANGUAGE_CODE) with self.assertRaises(LookupError): g('pt') with self.assertRaises(LookupError): g('de') with self.assertRaises(LookupError): g('de-at') with self.assertRaises(LookupError): g('de', strict=True) with self.assertRaises(LookupError): g('de-at', strict=True) with self.assertRaises(LookupError): g('xyz') @override_settings( LANGUAGES=[ ('en', 'English'), ('de', 'German'), ('de-at', 'Austrian German'), ('pl', 'Polish'), ], ) def test_get_language_from_path_real(self): g = trans_real.get_language_from_path self.assertEqual(g('/pl/'), 'pl') self.assertEqual(g('/pl'), 'pl') self.assertIsNone(g('/xyz/')) self.assertEqual(g('/en/'), 'en') self.assertEqual(g('/en-gb/'), 'en') self.assertEqual(g('/de/'), 'de') self.assertEqual(g('/de-at/'), 'de-at') self.assertEqual(g('/de-ch/'), 'de') self.assertIsNone(g('/de-simple-page/')) def test_get_language_from_path_null(self): g = trans_null.get_language_from_path self.assertIsNone(g('/pl/')) self.assertIsNone(g('/pl')) self.assertIsNone(g('/xyz/')) def test_cache_resetting(self): """ After setting LANGUAGE, the cache should be cleared and languages previously valid should not be used (#14170). """ g = get_language_from_request r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'} self.assertEqual('pt-br', g(r)) with self.settings(LANGUAGES=[('en', 'English')]): self.assertNotEqual('pt-br', g(r)) def test_i18n_patterns_returns_list(self): with override_settings(USE_I18N=False): self.assertIsInstance(i18n_patterns([]), list) with override_settings(USE_I18N=True): self.assertIsInstance(i18n_patterns([]), list) class ResolutionOrderI18NTests(SimpleTestCase): def setUp(self): super().setUp() activate('de') def tearDown(self): deactivate() super().tearDown() def assertGettext(self, msgid, msgstr): result = gettext(msgid) self.assertIn( msgstr, result, "The string '%s' isn't in the translation of '%s'; the actual result is '%s'." % (msgstr, msgid, result) ) class AppResolutionOrderI18NTests(ResolutionOrderI18NTests): @override_settings(LANGUAGE_CODE='de') def test_app_translation(self): # Original translation. self.assertGettext('Date/time', 'Datum/Zeit') # Different translation. with self.modify_settings(INSTALLED_APPS={'append': 'i18n.resolution'}): # Force refreshing translations. activate('de') # Doesn't work because it's added later in the list. self.assertGettext('Date/time', 'Datum/Zeit') with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.admin.apps.SimpleAdminConfig'}): # Force refreshing translations. activate('de') # Unless the original is removed from the list. self.assertGettext('Date/time', 'Datum/Zeit (APP)') @override_settings(LOCALE_PATHS=extended_locale_paths) class LocalePathsResolutionOrderI18NTests(ResolutionOrderI18NTests): def test_locale_paths_translation(self): self.assertGettext('Time', 'LOCALE_PATHS') def test_locale_paths_override_app_translation(self): with self.settings(INSTALLED_APPS=['i18n.resolution']): self.assertGettext('Time', 'LOCALE_PATHS') class DjangoFallbackResolutionOrderI18NTests(ResolutionOrderI18NTests): def test_django_fallback(self): self.assertEqual(gettext('Date/time'), 'Datum/Zeit') @override_settings(INSTALLED_APPS=['i18n.territorial_fallback']) class TranslationFallbackI18NTests(ResolutionOrderI18NTests): def test_sparse_territory_catalog(self): """ Untranslated strings for territorial language variants use the translations of the generic language. In this case, the de-de translation falls back to de. """ with translation.override('de-de'): self.assertGettext('Test 1 (en)', '(de-de)') self.assertGettext('Test 2 (en)', '(de)') class TestModels(TestCase): def test_lazy(self): tm = TestModel() tm.save() def test_safestr(self): c = Company(cents_paid=12, products_delivered=1) c.name = SafeString('Iñtërnâtiônàlizætiøn1') c.save() class TestLanguageInfo(SimpleTestCase): def test_localized_language_info(self): li = get_language_info('de') self.assertEqual(li['code'], 'de') self.assertEqual(li['name_local'], 'Deutsch') self.assertEqual(li['name'], 'German') self.assertIs(li['bidi'], False) def test_unknown_language_code(self): with self.assertRaisesMessage(KeyError, "Unknown language code xx"): get_language_info('xx') with translation.override('xx'): # A language with no translation catalogs should fallback to the # untranslated string. self.assertEqual(gettext("Title"), "Title") def test_unknown_only_country_code(self): li = get_language_info('de-xx') self.assertEqual(li['code'], 'de') self.assertEqual(li['name_local'], 'Deutsch') self.assertEqual(li['name'], 'German') self.assertIs(li['bidi'], False) def test_unknown_language_code_and_country_code(self): with self.assertRaisesMessage(KeyError, "Unknown language code xx-xx and xx"): get_language_info('xx-xx') def test_fallback_language_code(self): """ get_language_info return the first fallback language info if the lang_info struct does not contain the 'name' key. """ li = get_language_info('zh-my') self.assertEqual(li['code'], 'zh-hans') li = get_language_info('zh-hans') self.assertEqual(li['code'], 'zh-hans') @override_settings( USE_I18N=True, LANGUAGES=[ ('en', 'English'), ('fr', 'French'), ], MIDDLEWARE=[ 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', ], ROOT_URLCONF='i18n.urls', ) class LocaleMiddlewareTests(TestCase): def test_streaming_response(self): # Regression test for #5241 response = self.client.get('/fr/streaming/') self.assertContains(response, "Oui/Non") response = self.client.get('/en/streaming/') self.assertContains(response, "Yes/No") @override_settings( MIDDLEWARE=[ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', ], ) def test_language_not_saved_to_session(self): """ The Current language isno' automatically saved to the session on every request (#21473). """ self.client.get('/fr/simple/') self.assertNotIn(LANGUAGE_SESSION_KEY, self.client.session) @override_settings( USE_I18N=True, LANGUAGES=[ ('en', 'English'), ('de', 'German'), ('fr', 'French'), ], MIDDLEWARE=[ 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', ], ROOT_URLCONF='i18n.urls_default_unprefixed', LANGUAGE_CODE='en', ) class UnprefixedDefaultLanguageTests(SimpleTestCase): def test_default_lang_without_prefix(self): """ With i18n_patterns(..., prefix_default_language=False), the default language (settings.LANGUAGE_CODE) should be accessible without a prefix. """ response = self.client.get('/simple/') self.assertEqual(response.content, b'Yes') def test_other_lang_with_prefix(self): response = self.client.get('/fr/simple/') self.assertEqual(response.content, b'Oui') def test_unprefixed_language_other_than_accept_language(self): response = self.client.get('/simple/', HTTP_ACCEPT_LANGUAGE='fr') self.assertEqual(response.content, b'Yes') def test_page_with_dash(self): # A page starting with /de* shouldn't match the 'de' language code. response = self.client.get('/de-simple-page/') self.assertEqual(response.content, b'Yes') def test_no_redirect_on_404(self): """ A request for a nonexistent URL shouldn't cause a redirect to /<default_language>/<request_url> when prefix_default_language=False and /<default_language>/<request_url> has a URL match (#27402). """ # A match for /group1/group2/ must exist for this to act as a # regression test. response = self.client.get('/group1/group2/') self.assertEqual(response.status_code, 200) response = self.client.get('/nonexistent/') self.assertEqual(response.status_code, 404) @override_settings( USE_I18N=True, LANGUAGES=[ ('bg', 'Bulgarian'), ('en-us', 'English'), ('pt-br', 'Portuguese (Brazil)'), ], MIDDLEWARE=[ 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', ], ROOT_URLCONF='i18n.urls' ) class CountrySpecificLanguageTests(SimpleTestCase): rf = RequestFactory() def test_check_for_language(self): self.assertTrue(check_for_language('en')) self.assertTrue(check_for_language('en-us')) self.assertTrue(check_for_language('en-US')) self.assertFalse(check_for_language('en_US')) self.assertTrue(check_for_language('be')) self.assertTrue(check_for_language('be@latin')) self.assertTrue(check_for_language('sr-RS@latin')) self.assertTrue(check_for_language('sr-RS@12345')) self.assertFalse(check_for_language('en-ü')) self.assertFalse(check_for_language('en\x00')) self.assertFalse(check_for_language(None)) self.assertFalse(check_for_language('be@ ')) # Specifying encoding is not supported (Django enforces UTF-8) self.assertFalse(check_for_language('tr-TR.UTF-8')) self.assertFalse(check_for_language('tr-TR.UTF8')) self.assertFalse(check_for_language('de-DE.utf-8')) def test_check_for_language_null(self): self.assertIs(trans_null.check_for_language('en'), True) def test_get_language_from_request(self): # issue 19919 r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8,bg;q=0.6,ru;q=0.4'} lang = get_language_from_request(r) self.assertEqual('en-us', lang) r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'bg-bg,en-US;q=0.8,en;q=0.6,ru;q=0.4'} lang = get_language_from_request(r) self.assertEqual('bg', lang) def test_get_language_from_request_null(self): lang = trans_null.get_language_from_request(None) self.assertEqual(lang, 'en') with override_settings(LANGUAGE_CODE='de'): lang = trans_null.get_language_from_request(None) self.assertEqual(lang, 'de') def test_specific_language_codes(self): # issue 11915 r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'} lang = get_language_from_request(r) self.assertEqual('pt-br', lang) r = self.rf.get('/') r.COOKIES = {} r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'} lang = get_language_from_request(r) self.assertEqual('pt-br', lang) class TranslationFilesMissing(SimpleTestCase): def setUp(self): super().setUp() self.gettext_find_builtin = gettext_module.find def tearDown(self): gettext_module.find = self.gettext_find_builtin super().tearDown() def patchGettextFind(self): gettext_module.find = lambda *args, **kw: None def test_failure_finding_default_mo_files(self): """OSError is raised if the default language is unparseable.""" self.patchGettextFind() trans_real._translations = {} with self.assertRaises(OSError): activate('en') class NonDjangoLanguageTests(SimpleTestCase): """ A language non present in default Django languages can still be installed/used by a Django project. """ @override_settings( USE_I18N=True, LANGUAGES=[ ('en-us', 'English'), ('xxx', 'Somelanguage'), ], LANGUAGE_CODE='xxx', LOCALE_PATHS=[os.path.join(here, 'commands', 'locale')], ) def test_non_django_language(self): self.assertEqual(get_language(), 'xxx') self.assertEqual(gettext("year"), "reay") @override_settings(USE_I18N=True) def test_check_for_language(self): with tempfile.TemporaryDirectory() as app_dir: os.makedirs(os.path.join(app_dir, 'locale', 'dummy_Lang', 'LC_MESSAGES')) open(os.path.join(app_dir, 'locale', 'dummy_Lang', 'LC_MESSAGES', 'django.mo'), 'w').close() app_config = AppConfig('dummy_app', AppModuleStub(__path__=[app_dir])) with mock.patch('django.apps.apps.get_app_configs', return_value=[app_config]): self.assertIs(check_for_language('dummy-lang'), True) @override_settings( USE_I18N=True, LANGUAGES=[ ('en-us', 'English'), # xyz language has no locale files ('xyz', 'XYZ'), ], ) @translation.override('xyz') def test_plural_non_django_language(self): self.assertEqual(get_language(), 'xyz') self.assertEqual(ngettext('year', 'years', 2), 'years') @override_settings(USE_I18N=True) class WatchForTranslationChangesTests(SimpleTestCase): @override_settings(USE_I18N=False) def test_i18n_disabled(self): mocked_sender = mock.MagicMock() watch_for_translation_changes(mocked_sender) mocked_sender.watch_dir.assert_not_called() def test_i18n_enabled(self): mocked_sender = mock.MagicMock() watch_for_translation_changes(mocked_sender) self.assertGreater(mocked_sender.watch_dir.call_count, 1) def test_i18n_locale_paths(self): mocked_sender = mock.MagicMock() with tempfile.TemporaryDirectory() as app_dir: with self.settings(LOCALE_PATHS=[app_dir]): watch_for_translation_changes(mocked_sender) mocked_sender.watch_dir.assert_any_call(Path(app_dir), '**/*.mo') def test_i18n_app_dirs(self): mocked_sender = mock.MagicMock() with self.settings(INSTALLED_APPS=['tests.i18n.sampleproject']): watch_for_translation_changes(mocked_sender) project_dir = Path(__file__).parent / 'sampleproject' / 'locale' mocked_sender.watch_dir.assert_any_call(project_dir, '**/*.mo') def test_i18n_app_dirs_ignore_django_apps(self): mocked_sender = mock.MagicMock() with self.settings(INSTALLED_APPS=['django.contrib.admin']): watch_for_translation_changes(mocked_sender) mocked_sender.watch_dir.assert_called_once_with(Path('locale'), '**/*.mo') def test_i18n_local_locale(self): mocked_sender = mock.MagicMock() watch_for_translation_changes(mocked_sender) locale_dir = Path(__file__).parent / 'locale' mocked_sender.watch_dir.assert_any_call(locale_dir, '**/*.mo') class TranslationFileChangedTests(SimpleTestCase): def setUp(self): self.gettext_translations = gettext_module._translations.copy() self.trans_real_translations = trans_real._translations.copy() def tearDown(self): gettext._translations = self.gettext_translations trans_real._translations = self.trans_real_translations def test_ignores_non_mo_files(self): gettext_module._translations = {'foo': 'bar'} path = Path('test.py') self.assertIsNone(translation_file_changed(None, path)) self.assertEqual(gettext_module._translations, {'foo': 'bar'}) def test_resets_cache_with_mo_files(self): gettext_module._translations = {'foo': 'bar'} trans_real._translations = {'foo': 'bar'} trans_real._default = 1 trans_real._active = False path = Path('test.mo') self.assertIs(translation_file_changed(None, path), True) self.assertEqual(gettext_module._translations, {}) self.assertEqual(trans_real._translations, {}) self.assertIsNone(trans_real._default) self.assertIsInstance(trans_real._active, Local) class UtilsTests(SimpleTestCase): def test_round_away_from_one(self): tests = [ (0, 0), (0., 0), (0.25, 0), (0.5, 0), (0.75, 0), (1, 1), (1., 1), (1.25, 2), (1.5, 2), (1.75, 2), (-0., 0), (-0.25, -1), (-0.5, -1), (-0.75, -1), (-1, -1), (-1., -1), (-1.25, -2), (-1.5, -2), (-1.75, -2), ] for value, expected in tests: with self.subTest(value=value): self.assertEqual(round_away_from_one(value), expected)
2f9f008b611c1f36de59957d59cb163e380de662bff3e6108fea72b190383a8a
import collections.abc from datetime import datetime from math import ceil from operator import attrgetter from django.core.exceptions import FieldError from django.db import connection, models from django.db.models import Exists, Max, OuterRef from django.db.models.functions import Substr from django.test import TestCase, skipUnlessDBFeature from django.test.utils import isolate_apps from django.utils.deprecation import RemovedInDjango40Warning from .models import ( Article, Author, Freebie, Game, IsNullWithNoneAsRHS, Player, Season, Tag, ) class LookupTests(TestCase): @classmethod def setUpTestData(cls): # Create a few Authors. cls.au1 = Author.objects.create(name='Author 1', alias='a1') cls.au2 = Author.objects.create(name='Author 2', alias='a2') # Create a few Articles. cls.a1 = Article.objects.create( headline='Article 1', pub_date=datetime(2005, 7, 26), author=cls.au1, slug='a1', ) cls.a2 = Article.objects.create( headline='Article 2', pub_date=datetime(2005, 7, 27), author=cls.au1, slug='a2', ) cls.a3 = Article.objects.create( headline='Article 3', pub_date=datetime(2005, 7, 27), author=cls.au1, slug='a3', ) cls.a4 = Article.objects.create( headline='Article 4', pub_date=datetime(2005, 7, 28), author=cls.au1, slug='a4', ) cls.a5 = Article.objects.create( headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=cls.au2, slug='a5', ) cls.a6 = Article.objects.create( headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=cls.au2, slug='a6', ) cls.a7 = Article.objects.create( headline='Article 7', pub_date=datetime(2005, 7, 27), author=cls.au2, slug='a7', ) # Create a few Tags. cls.t1 = Tag.objects.create(name='Tag 1') cls.t1.articles.add(cls.a1, cls.a2, cls.a3) cls.t2 = Tag.objects.create(name='Tag 2') cls.t2.articles.add(cls.a3, cls.a4, cls.a5) cls.t3 = Tag.objects.create(name='Tag 3') cls.t3.articles.add(cls.a5, cls.a6, cls.a7) def test_exists(self): # We can use .exists() to check that there are some self.assertTrue(Article.objects.exists()) for a in Article.objects.all(): a.delete() # There should be none now! self.assertFalse(Article.objects.exists()) def test_lookup_int_as_str(self): # Integer value can be queried using string self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)), ['<Article: Article 1>']) @skipUnlessDBFeature('supports_date_lookup_using_string') def test_lookup_date_as_str(self): # A date lookup can be performed using a string search self.assertQuerysetEqual( Article.objects.filter(pub_date__startswith='2005'), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) def test_iterator(self): # Each QuerySet gets iterator(), which is a generator that "lazily" # returns results using database-level iteration. self.assertIsInstance(Article.objects.iterator(), collections.abc.Iterator) self.assertQuerysetEqual( Article.objects.iterator(), [ 'Article 5', 'Article 6', 'Article 4', 'Article 2', 'Article 3', 'Article 7', 'Article 1', ], transform=attrgetter('headline') ) # iterator() can be used on any QuerySet. self.assertQuerysetEqual( Article.objects.filter(headline__endswith='4').iterator(), ['Article 4'], transform=attrgetter('headline')) def test_count(self): # count() returns the number of objects matching search criteria. self.assertEqual(Article.objects.count(), 7) self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3) self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0) # count() should respect sliced query sets. articles = Article.objects.all() self.assertEqual(articles.count(), 7) self.assertEqual(articles[:4].count(), 4) self.assertEqual(articles[1:100].count(), 6) self.assertEqual(articles[10:100].count(), 0) # Date and date/time lookups can also be done with strings. self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3) def test_in_bulk(self): # in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects. arts = Article.objects.in_bulk([self.a1.id, self.a2.id]) self.assertEqual(arts[self.a1.id], self.a1) self.assertEqual(arts[self.a2.id], self.a2) self.assertEqual( Article.objects.in_bulk(), { self.a1.id: self.a1, self.a2.id: self.a2, self.a3.id: self.a3, self.a4.id: self.a4, self.a5.id: self.a5, self.a6.id: self.a6, self.a7.id: self.a7, } ) self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk([1000]), {}) self.assertEqual(Article.objects.in_bulk([]), {}) self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1}) self.assertEqual(Article.objects.in_bulk(iter([])), {}) with self.assertRaises(TypeError): Article.objects.in_bulk(headline__startswith='Blah') def test_in_bulk_lots_of_ids(self): test_range = 2000 max_query_params = connection.features.max_query_params expected_num_queries = ceil(test_range / max_query_params) if max_query_params else 1 Author.objects.bulk_create([Author() for i in range(test_range - Author.objects.count())]) authors = {author.pk: author for author in Author.objects.all()} with self.assertNumQueries(expected_num_queries): self.assertEqual(Author.objects.in_bulk(authors), authors) def test_in_bulk_with_field(self): self.assertEqual( Article.objects.in_bulk([self.a1.slug, self.a2.slug, self.a3.slug], field_name='slug'), { self.a1.slug: self.a1, self.a2.slug: self.a2, self.a3.slug: self.a3, } ) def test_in_bulk_meta_constraint(self): season_2011 = Season.objects.create(year=2011) season_2012 = Season.objects.create(year=2012) Season.objects.create(year=2013) self.assertEqual( Season.objects.in_bulk( [season_2011.year, season_2012.year], field_name='year', ), {season_2011.year: season_2011, season_2012.year: season_2012}, ) def test_in_bulk_non_unique_field(self): msg = "in_bulk()'s field_name must be a unique field but 'author' isn't." with self.assertRaisesMessage(ValueError, msg): Article.objects.in_bulk([self.au1], field_name='author') @skipUnlessDBFeature('can_distinct_on_fields') def test_in_bulk_distinct_field(self): self.assertEqual( Article.objects.order_by('headline').distinct('headline').in_bulk( [self.a1.headline, self.a5.headline], field_name='headline', ), {self.a1.headline: self.a1, self.a5.headline: self.a5}, ) @skipUnlessDBFeature('can_distinct_on_fields') def test_in_bulk_multiple_distinct_field(self): msg = "in_bulk()'s field_name must be a unique field but 'pub_date' isn't." with self.assertRaisesMessage(ValueError, msg): Article.objects.order_by('headline', 'pub_date').distinct( 'headline', 'pub_date', ).in_bulk(field_name='pub_date') @isolate_apps('lookup') def test_in_bulk_non_unique_meta_constaint(self): class Model(models.Model): ean = models.CharField(max_length=100) brand = models.CharField(max_length=100) name = models.CharField(max_length=80) class Meta: constraints = [ models.UniqueConstraint( fields=['ean'], name='partial_ean_unique', condition=models.Q(is_active=True) ), models.UniqueConstraint( fields=['brand', 'name'], name='together_brand_name_unique', ), ] msg = "in_bulk()'s field_name must be a unique field but '%s' isn't." for field_name in ['brand', 'ean']: with self.subTest(field_name=field_name): with self.assertRaisesMessage(ValueError, msg % field_name): Model.objects.in_bulk(field_name=field_name) def test_values(self): # values() returns a list of dictionaries instead of object instances -- # and you can specify which fields you want to retrieve. self.assertSequenceEqual( Article.objects.values('headline'), [ {'headline': 'Article 5'}, {'headline': 'Article 6'}, {'headline': 'Article 4'}, {'headline': 'Article 2'}, {'headline': 'Article 3'}, {'headline': 'Article 7'}, {'headline': 'Article 1'}, ], ) self.assertSequenceEqual( Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'), [{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}], ) self.assertSequenceEqual( Article.objects.values('id', 'headline'), [ {'id': self.a5.id, 'headline': 'Article 5'}, {'id': self.a6.id, 'headline': 'Article 6'}, {'id': self.a4.id, 'headline': 'Article 4'}, {'id': self.a2.id, 'headline': 'Article 2'}, {'id': self.a3.id, 'headline': 'Article 3'}, {'id': self.a7.id, 'headline': 'Article 7'}, {'id': self.a1.id, 'headline': 'Article 1'}, ], ) # You can use values() with iterator() for memory savings, # because iterator() uses database-level iteration. self.assertSequenceEqual( list(Article.objects.values('id', 'headline').iterator()), [ {'headline': 'Article 5', 'id': self.a5.id}, {'headline': 'Article 6', 'id': self.a6.id}, {'headline': 'Article 4', 'id': self.a4.id}, {'headline': 'Article 2', 'id': self.a2.id}, {'headline': 'Article 3', 'id': self.a3.id}, {'headline': 'Article 7', 'id': self.a7.id}, {'headline': 'Article 1', 'id': self.a1.id}, ], ) # The values() method works with "extra" fields specified in extra(select). self.assertSequenceEqual( Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'), [ {'id': self.a5.id, 'id_plus_one': self.a5.id + 1}, {'id': self.a6.id, 'id_plus_one': self.a6.id + 1}, {'id': self.a4.id, 'id_plus_one': self.a4.id + 1}, {'id': self.a2.id, 'id_plus_one': self.a2.id + 1}, {'id': self.a3.id, 'id_plus_one': self.a3.id + 1}, {'id': self.a7.id, 'id_plus_one': self.a7.id + 1}, {'id': self.a1.id, 'id_plus_one': self.a1.id + 1}, ], ) data = { 'id_plus_one': 'id+1', 'id_plus_two': 'id+2', 'id_plus_three': 'id+3', 'id_plus_four': 'id+4', 'id_plus_five': 'id+5', 'id_plus_six': 'id+6', 'id_plus_seven': 'id+7', 'id_plus_eight': 'id+8', } self.assertSequenceEqual( Article.objects.filter(id=self.a1.id).extra(select=data).values(*data), [{ 'id_plus_one': self.a1.id + 1, 'id_plus_two': self.a1.id + 2, 'id_plus_three': self.a1.id + 3, 'id_plus_four': self.a1.id + 4, 'id_plus_five': self.a1.id + 5, 'id_plus_six': self.a1.id + 6, 'id_plus_seven': self.a1.id + 7, 'id_plus_eight': self.a1.id + 8, }], ) # You can specify fields from forward and reverse relations, just like filter(). self.assertSequenceEqual( Article.objects.values('headline', 'author__name'), [ {'headline': self.a5.headline, 'author__name': self.au2.name}, {'headline': self.a6.headline, 'author__name': self.au2.name}, {'headline': self.a4.headline, 'author__name': self.au1.name}, {'headline': self.a2.headline, 'author__name': self.au1.name}, {'headline': self.a3.headline, 'author__name': self.au1.name}, {'headline': self.a7.headline, 'author__name': self.au2.name}, {'headline': self.a1.headline, 'author__name': self.au1.name}, ], ) self.assertSequenceEqual( Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'), [ {'name': self.au1.name, 'article__headline': self.a1.headline}, {'name': self.au1.name, 'article__headline': self.a2.headline}, {'name': self.au1.name, 'article__headline': self.a3.headline}, {'name': self.au1.name, 'article__headline': self.a4.headline}, {'name': self.au2.name, 'article__headline': self.a5.headline}, {'name': self.au2.name, 'article__headline': self.a6.headline}, {'name': self.au2.name, 'article__headline': self.a7.headline}, ], ) self.assertSequenceEqual( ( Author.objects .values('name', 'article__headline', 'article__tag__name') .order_by('name', 'article__headline', 'article__tag__name') ), [ {'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name}, {'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name}, {'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name}, {'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name}, {'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name}, {'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name}, ], ) # However, an exception FieldDoesNotExist will be thrown if you specify # a nonexistent field name in values() (a field that is neither in the # model nor in extra(select)). msg = ( "Cannot resolve keyword 'id_plus_two' into field. Choices are: " "author, author_id, headline, id, id_plus_one, pub_date, slug, tag" ) with self.assertRaisesMessage(FieldError, msg): Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_two') # If you don't specify field names to values(), all are returned. self.assertSequenceEqual( Article.objects.filter(id=self.a5.id).values(), [{ 'id': self.a5.id, 'author_id': self.au2.id, 'headline': 'Article 5', 'pub_date': datetime(2005, 8, 1, 9, 0), 'slug': 'a5', }], ) def test_values_list(self): # values_list() is similar to values(), except that the results are # returned as a list of tuples, rather than a list of dictionaries. # Within each tuple, the order of the elements is the same as the order # of fields in the values_list() call. self.assertSequenceEqual( Article.objects.values_list('headline'), [ ('Article 5',), ('Article 6',), ('Article 4',), ('Article 2',), ('Article 3',), ('Article 7',), ('Article 1',), ], ) self.assertSequenceEqual( Article.objects.values_list('id').order_by('id'), [(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)], ) self.assertSequenceEqual( Article.objects.values_list('id', flat=True).order_by('id'), [self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id], ) self.assertSequenceEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id'), [(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)], ) self.assertSequenceEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id_plus_one', 'id'), [ (self.a1.id + 1, self.a1.id), (self.a2.id + 1, self.a2.id), (self.a3.id + 1, self.a3.id), (self.a4.id + 1, self.a4.id), (self.a5.id + 1, self.a5.id), (self.a6.id + 1, self.a6.id), (self.a7.id + 1, self.a7.id) ], ) self.assertSequenceEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id', 'id_plus_one'), [ (self.a1.id, self.a1.id + 1), (self.a2.id, self.a2.id + 1), (self.a3.id, self.a3.id + 1), (self.a4.id, self.a4.id + 1), (self.a5.id, self.a5.id + 1), (self.a6.id, self.a6.id + 1), (self.a7.id, self.a7.id + 1) ], ) args = ('name', 'article__headline', 'article__tag__name') self.assertSequenceEqual( Author.objects.values_list(*args).order_by(*args), [ (self.au1.name, self.a1.headline, self.t1.name), (self.au1.name, self.a2.headline, self.t1.name), (self.au1.name, self.a3.headline, self.t1.name), (self.au1.name, self.a3.headline, self.t2.name), (self.au1.name, self.a4.headline, self.t2.name), (self.au2.name, self.a5.headline, self.t2.name), (self.au2.name, self.a5.headline, self.t3.name), (self.au2.name, self.a6.headline, self.t3.name), (self.au2.name, self.a7.headline, self.t3.name), ], ) with self.assertRaises(TypeError): Article.objects.values_list('id', 'headline', flat=True) def test_get_next_previous_by(self): # Every DateField and DateTimeField creates get_next_by_FOO() and # get_previous_by_FOO() methods. In the case of identical date values, # these methods will use the ID as a fallback check. This guarantees # that no records are skipped or duplicated. self.assertEqual(repr(self.a1.get_next_by_pub_date()), '<Article: Article 2>') self.assertEqual(repr(self.a2.get_next_by_pub_date()), '<Article: Article 3>') self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')), '<Article: Article 6>') self.assertEqual(repr(self.a3.get_next_by_pub_date()), '<Article: Article 7>') self.assertEqual(repr(self.a4.get_next_by_pub_date()), '<Article: Article 6>') with self.assertRaises(Article.DoesNotExist): self.a5.get_next_by_pub_date() self.assertEqual(repr(self.a6.get_next_by_pub_date()), '<Article: Article 5>') self.assertEqual(repr(self.a7.get_next_by_pub_date()), '<Article: Article 4>') self.assertEqual(repr(self.a7.get_previous_by_pub_date()), '<Article: Article 3>') self.assertEqual(repr(self.a6.get_previous_by_pub_date()), '<Article: Article 4>') self.assertEqual(repr(self.a5.get_previous_by_pub_date()), '<Article: Article 6>') self.assertEqual(repr(self.a4.get_previous_by_pub_date()), '<Article: Article 7>') self.assertEqual(repr(self.a3.get_previous_by_pub_date()), '<Article: Article 2>') self.assertEqual(repr(self.a2.get_previous_by_pub_date()), '<Article: Article 1>') def test_escaping(self): # Underscores, percent signs and backslashes have special meaning in the # underlying SQL code, but Django handles the quoting of them automatically. Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20)) self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Article'), [ '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Article_'), ['<Article: Article_ with underscore>'] ) Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21)) self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Article'), [ '<Article: Article% with percent sign>', '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Article%'), ['<Article: Article% with percent sign>'] ) Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22)) self.assertQuerysetEqual( Article.objects.filter(headline__contains='\\'), [r'<Article: Article with \ backslash>'] ) def test_exclude(self): Article.objects.bulk_create([ Article(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20)), Article(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21)), Article(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22)), ]) # exclude() is the opposite of filter() when doing lookups: self.assertQuerysetEqual( Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) self.assertQuerysetEqual( Article.objects.exclude(headline__startswith="Article_"), [ '<Article: Article with \\ backslash>', '<Article: Article% with percent sign>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) self.assertQuerysetEqual( Article.objects.exclude(headline="Article 7"), [ '<Article: Article with \\ backslash>', '<Article: Article% with percent sign>', '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 1>', ] ) def test_none(self): # none() returns a QuerySet that behaves like any other QuerySet object self.assertQuerysetEqual(Article.objects.none(), []) self.assertQuerysetEqual(Article.objects.none().filter(headline__startswith='Article'), []) self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article').none(), []) self.assertEqual(Article.objects.none().count(), 0) self.assertEqual(Article.objects.none().update(headline="This should not take effect"), 0) self.assertQuerysetEqual(Article.objects.none().iterator(), []) def test_in(self): self.assertQuerysetEqual( Article.objects.exclude(id__in=[]), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ] ) def test_in_empty_list(self): self.assertSequenceEqual(Article.objects.filter(id__in=[]), []) def test_in_different_database(self): with self.assertRaisesMessage( ValueError, "Subqueries aren't allowed across different databases. Force the " "inner query to be evaluated using `list(inner_query)`." ): list(Article.objects.filter(id__in=Article.objects.using('other').all())) def test_in_keeps_value_ordering(self): query = Article.objects.filter(slug__in=['a%d' % i for i in range(1, 8)]).values('pk').query self.assertIn(' IN (a1, a2, a3, a4, a5, a6, a7) ', str(query)) def test_in_ignore_none(self): with self.assertNumQueries(1) as ctx: self.assertSequenceEqual( Article.objects.filter(id__in=[None, self.a1.id]), [self.a1], ) sql = ctx.captured_queries[0]['sql'] self.assertIn('IN (%s)' % self.a1.pk, sql) def test_in_ignore_solo_none(self): with self.assertNumQueries(0): self.assertSequenceEqual(Article.objects.filter(id__in=[None]), []) def test_in_ignore_none_with_unhashable_items(self): class UnhashableInt(int): __hash__ = None with self.assertNumQueries(1) as ctx: self.assertSequenceEqual( Article.objects.filter(id__in=[None, UnhashableInt(self.a1.id)]), [self.a1], ) sql = ctx.captured_queries[0]['sql'] self.assertIn('IN (%s)' % self.a1.pk, sql) def test_error_messages(self): # Programming errors are pointed out with nice error messages with self.assertRaisesMessage( FieldError, "Cannot resolve keyword 'pub_date_year' into field. Choices are: " "author, author_id, headline, id, pub_date, slug, tag" ): Article.objects.filter(pub_date_year='2005').count() def test_unsupported_lookups(self): with self.assertRaisesMessage( FieldError, "Unsupported lookup 'starts' for CharField or join on the field " "not permitted, perhaps you meant startswith or istartswith?" ): Article.objects.filter(headline__starts='Article') with self.assertRaisesMessage( FieldError, "Unsupported lookup 'is_null' for DateTimeField or join on the field " "not permitted, perhaps you meant isnull?" ): Article.objects.filter(pub_date__is_null=True) with self.assertRaisesMessage( FieldError, "Unsupported lookup 'gobbledygook' for DateTimeField or join on the field " "not permitted." ): Article.objects.filter(pub_date__gobbledygook='blahblah') def test_relation_nested_lookup_error(self): # An invalid nested lookup on a related field raises a useful error. msg = 'Related Field got invalid lookup: editor' with self.assertRaisesMessage(FieldError, msg): Article.objects.filter(author__editor__name='James') msg = 'Related Field got invalid lookup: foo' with self.assertRaisesMessage(FieldError, msg): Tag.objects.filter(articles__foo='bar') def test_regex(self): # Create some articles with a bit more interesting headlines for testing field lookups: for a in Article.objects.all(): a.delete() now = datetime.now() Article.objects.bulk_create([ Article(pub_date=now, headline='f'), Article(pub_date=now, headline='fo'), Article(pub_date=now, headline='foo'), Article(pub_date=now, headline='fooo'), Article(pub_date=now, headline='hey-Foo'), Article(pub_date=now, headline='bar'), Article(pub_date=now, headline='AbBa'), Article(pub_date=now, headline='baz'), Article(pub_date=now, headline='baxZ'), ]) # zero-or-more self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'fo*'), ['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>'] ) self.assertQuerysetEqual( Article.objects.filter(headline__iregex=r'fo*'), [ '<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>', '<Article: hey-Foo>', ] ) # one-or-more self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'fo+'), ['<Article: fo>', '<Article: foo>', '<Article: fooo>'] ) # wildcard self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'fooo?'), ['<Article: foo>', '<Article: fooo>'] ) # leading anchor self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'^b'), ['<Article: bar>', '<Article: baxZ>', '<Article: baz>'] ) self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'), ['<Article: AbBa>']) # trailing anchor self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'), ['<Article: baz>']) self.assertQuerysetEqual( Article.objects.filter(headline__iregex=r'z$'), ['<Article: baxZ>', '<Article: baz>'] ) # character sets self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'ba[rz]'), ['<Article: bar>', '<Article: baz>'] ) self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'), ['<Article: baxZ>']) self.assertQuerysetEqual( Article.objects.filter(headline__iregex=r'ba[RxZ]'), ['<Article: bar>', '<Article: baxZ>', '<Article: baz>'] ) # and more articles: Article.objects.bulk_create([ Article(pub_date=now, headline='foobar'), Article(pub_date=now, headline='foobaz'), Article(pub_date=now, headline='ooF'), Article(pub_date=now, headline='foobarbaz'), Article(pub_date=now, headline='zoocarfaz'), Article(pub_date=now, headline='barfoobaz'), Article(pub_date=now, headline='bazbaRFOO'), ]) # alternation self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'oo(f|b)'), [ '<Article: barfoobaz>', '<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>', ] ) self.assertQuerysetEqual( Article.objects.filter(headline__iregex=r'oo(f|b)'), [ '<Article: barfoobaz>', '<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>', '<Article: ooF>', ] ) self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'^foo(f|b)'), ['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>'] ) # greedy matching self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'b.*az'), [ '<Article: barfoobaz>', '<Article: baz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>', '<Article: foobaz>', ] ) self.assertQuerysetEqual( Article.objects.filter(headline__iregex=r'b.*ar'), [ '<Article: bar>', '<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobar>', '<Article: foobarbaz>', ] ) @skipUnlessDBFeature('supports_regex_backreferencing') def test_regex_backreferencing(self): # grouping and backreferences now = datetime.now() Article.objects.bulk_create([ Article(pub_date=now, headline='foobar'), Article(pub_date=now, headline='foobaz'), Article(pub_date=now, headline='ooF'), Article(pub_date=now, headline='foobarbaz'), Article(pub_date=now, headline='zoocarfaz'), Article(pub_date=now, headline='barfoobaz'), Article(pub_date=now, headline='bazbaRFOO'), ]) self.assertQuerysetEqual( Article.objects.filter(headline__regex=r'b(.).*b\1'), ['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>'] ) def test_regex_null(self): """ A regex lookup does not fail on null/None values """ Season.objects.create(year=2012, gt=None) self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), []) def test_regex_non_string(self): """ A regex lookup does not fail on non-string fields """ Season.objects.create(year=2013, gt=444) self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'), ['<Season: 2013>']) def test_regex_non_ascii(self): """ A regex lookup does not trip on non-ASCII characters. """ Player.objects.create(name='\u2660') Player.objects.get(name__regex='\u2660') def test_nonfield_lookups(self): """ A lookup query containing non-fields raises the proper exception. """ msg = "Unsupported lookup 'blahblah' for CharField or join on the field not permitted." with self.assertRaisesMessage(FieldError, msg): Article.objects.filter(headline__blahblah=99) with self.assertRaisesMessage(FieldError, msg): Article.objects.filter(headline__blahblah__exact=99) msg = ( "Cannot resolve keyword 'blahblah' into field. Choices are: " "author, author_id, headline, id, pub_date, slug, tag" ) with self.assertRaisesMessage(FieldError, msg): Article.objects.filter(blahblah=99) def test_lookup_collision(self): """ Genuine field names don't collide with built-in lookup types ('year', 'gt', 'range', 'in' etc.) (#11670). """ # 'gt' is used as a code number for the year, e.g. 111=>2009. season_2009 = Season.objects.create(year=2009, gt=111) season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals") season_2010 = Season.objects.create(year=2010, gt=222) season_2010.games.create(home="Houston Astros", away="Chicago Cubs") season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers") season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals") season_2011 = Season.objects.create(year=2011, gt=333) season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals") season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers") hunter_pence = Player.objects.create(name="Hunter Pence") hunter_pence.games.set(Game.objects.filter(season__year__in=[2009, 2010])) pudge = Player.objects.create(name="Ivan Rodriquez") pudge.games.set(Game.objects.filter(season__year=2009)) pedro_feliz = Player.objects.create(name="Pedro Feliz") pedro_feliz.games.set(Game.objects.filter(season__year__in=[2011])) johnson = Player.objects.create(name="Johnson") johnson.games.set(Game.objects.filter(season__year__in=[2011])) # Games in 2010 self.assertEqual(Game.objects.filter(season__year=2010).count(), 3) self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3) self.assertEqual(Game.objects.filter(season__gt=222).count(), 3) self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3) # Games in 2011 self.assertEqual(Game.objects.filter(season__year=2011).count(), 2) self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2) self.assertEqual(Game.objects.filter(season__gt=333).count(), 2) self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2) self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2) self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2) # Games played in 2010 and 2011 self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5) self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5) self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5) self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5) # Players who played in 2009 self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2) # Players who played in 2010 self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1) self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1) self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1) self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1) # Players who played in 2011 self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2) def test_chain_date_time_lookups(self): self.assertQuerysetEqual( Article.objects.filter(pub_date__month__gt=7), ['<Article: Article 5>', '<Article: Article 6>'], ordered=False ) self.assertQuerysetEqual( Article.objects.filter(pub_date__day__gte=27), ['<Article: Article 2>', '<Article: Article 3>', '<Article: Article 4>', '<Article: Article 7>'], ordered=False ) self.assertQuerysetEqual( Article.objects.filter(pub_date__hour__lt=8), ['<Article: Article 1>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 4>', '<Article: Article 7>'], ordered=False ) self.assertQuerysetEqual( Article.objects.filter(pub_date__minute__lte=0), ['<Article: Article 1>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 4>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 7>'], ordered=False ) def test_exact_none_transform(self): """Transforms are used for __exact=None.""" Season.objects.create(year=1, nulled_text_field='not null') self.assertFalse(Season.objects.filter(nulled_text_field__isnull=True)) self.assertTrue(Season.objects.filter(nulled_text_field__nulled__isnull=True)) self.assertTrue(Season.objects.filter(nulled_text_field__nulled__exact=None)) self.assertTrue(Season.objects.filter(nulled_text_field__nulled=None)) def test_exact_sliced_queryset_limit_one(self): self.assertCountEqual( Article.objects.filter(author=Author.objects.all()[:1]), [self.a1, self.a2, self.a3, self.a4] ) def test_exact_sliced_queryset_limit_one_offset(self): self.assertCountEqual( Article.objects.filter(author=Author.objects.all()[1:2]), [self.a5, self.a6, self.a7] ) def test_exact_sliced_queryset_not_limited_to_one(self): msg = ( 'The QuerySet value for an exact lookup must be limited to one ' 'result using slicing.' ) with self.assertRaisesMessage(ValueError, msg): list(Article.objects.filter(author=Author.objects.all()[:2])) with self.assertRaisesMessage(ValueError, msg): list(Article.objects.filter(author=Author.objects.all()[1:])) def test_custom_field_none_rhs(self): """ __exact=value is transformed to __isnull=True if Field.get_prep_value() converts value to None. """ season = Season.objects.create(year=2012, nulled_text_field=None) self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field__isnull=True)) self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field='')) def test_pattern_lookups_with_substr(self): a = Author.objects.create(name='John Smith', alias='Johx') b = Author.objects.create(name='Rhonda Simpson', alias='sonx') tests = ( ('startswith', [a]), ('istartswith', [a]), ('contains', [a, b]), ('icontains', [a, b]), ('endswith', [b]), ('iendswith', [b]), ) for lookup, result in tests: with self.subTest(lookup=lookup): authors = Author.objects.filter(**{'name__%s' % lookup: Substr('alias', 1, 3)}) self.assertCountEqual(authors, result) def test_custom_lookup_none_rhs(self): """Lookup.can_use_none_as_rhs=True allows None as a lookup value.""" season = Season.objects.create(year=2012, nulled_text_field=None) query = Season.objects.get_queryset().query field = query.model._meta.get_field('nulled_text_field') self.assertIsInstance(query.build_lookup(['isnull_none_rhs'], field, None), IsNullWithNoneAsRHS) self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field__isnull_none_rhs=True)) def test_exact_exists(self): qs = Article.objects.filter(pk=OuterRef('pk')) seasons = Season.objects.annotate( pk_exists=Exists(qs), ).filter( pk_exists=Exists(qs), ) self.assertCountEqual(seasons, Season.objects.all()) def test_nested_outerref_lhs(self): tag = Tag.objects.create(name=self.au1.alias) tag.articles.add(self.a1) qs = Tag.objects.annotate( has_author_alias_match=Exists( Article.objects.annotate( author_exists=Exists( Author.objects.filter(alias=OuterRef(OuterRef('name'))) ), ).filter(author_exists=True) ), ) self.assertEqual(qs.get(has_author_alias_match=True), tag) def test_exact_query_rhs_with_selected_columns(self): newest_author = Author.objects.create(name='Author 2') authors_max_ids = Author.objects.filter( name='Author 2', ).values( 'name', ).annotate( max_id=Max('id'), ).values('max_id') authors = Author.objects.filter(id=authors_max_ids[:1]) self.assertEqual(authors.get(), newest_author) def test_isnull_non_boolean_value(self): # These tests will catch ValueError in Django 4.0 when using # non-boolean values for an isnull lookup becomes forbidden. # msg = ( # 'The QuerySet value for an isnull lookup must be True or False.' # ) msg = ( 'Using a non-boolean value for an isnull lookup is deprecated, ' 'use True or False instead.' ) tests = [ Author.objects.filter(alias__isnull=1), Article.objects.filter(author__isnull=1), Season.objects.filter(games__isnull=1), Freebie.objects.filter(stock__isnull=1), ] for qs in tests: with self.subTest(qs=qs): with self.assertWarnsMessage(RemovedInDjango40Warning, msg): qs.exists()
e6f1a1a3cf13a192933c635e6ed52fc14883ab36a003eed5daa6e297ddcdc8d0
import copy import json import os import pickle import unittest import uuid from django.core.exceptions import DisallowedRedirect from django.core.serializers.json import DjangoJSONEncoder from django.core.signals import request_finished from django.db import close_old_connections from django.http import ( BadHeaderError, HttpResponse, HttpResponseNotAllowed, HttpResponseNotModified, HttpResponsePermanentRedirect, HttpResponseRedirect, JsonResponse, QueryDict, SimpleCookie, StreamingHttpResponse, parse_cookie, ) from django.test import SimpleTestCase from django.utils.functional import lazystr class QueryDictTests(SimpleTestCase): def test_create_with_no_args(self): self.assertEqual(QueryDict(), QueryDict('')) def test_missing_key(self): q = QueryDict() with self.assertRaises(KeyError): q.__getitem__('foo') def test_immutability(self): q = QueryDict() with self.assertRaises(AttributeError): q.__setitem__('something', 'bar') with self.assertRaises(AttributeError): q.setlist('foo', ['bar']) with self.assertRaises(AttributeError): q.appendlist('foo', ['bar']) with self.assertRaises(AttributeError): q.update({'foo': 'bar'}) with self.assertRaises(AttributeError): q.pop('foo') with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() def test_immutable_get_with_default(self): q = QueryDict() self.assertEqual(q.get('foo', 'default'), 'default') def test_immutable_basic_operations(self): q = QueryDict() self.assertEqual(q.getlist('foo'), []) self.assertNotIn('foo', q) self.assertEqual(list(q), []) self.assertEqual(list(q.items()), []) self.assertEqual(list(q.lists()), []) self.assertEqual(list(q.keys()), []) self.assertEqual(list(q.values()), []) self.assertEqual(len(q), 0) self.assertEqual(q.urlencode(), '') def test_single_key_value(self): """Test QueryDict with one key/value pair""" q = QueryDict('foo=bar') self.assertEqual(q['foo'], 'bar') with self.assertRaises(KeyError): q.__getitem__('bar') with self.assertRaises(AttributeError): q.__setitem__('something', 'bar') self.assertEqual(q.get('foo', 'default'), 'bar') self.assertEqual(q.get('bar', 'default'), 'default') self.assertEqual(q.getlist('foo'), ['bar']) self.assertEqual(q.getlist('bar'), []) with self.assertRaises(AttributeError): q.setlist('foo', ['bar']) with self.assertRaises(AttributeError): q.appendlist('foo', ['bar']) self.assertIn('foo', q) self.assertNotIn('bar', q) self.assertEqual(list(q), ['foo']) self.assertEqual(list(q.items()), [('foo', 'bar')]) self.assertEqual(list(q.lists()), [('foo', ['bar'])]) self.assertEqual(list(q.keys()), ['foo']) self.assertEqual(list(q.values()), ['bar']) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({'foo': 'bar'}) with self.assertRaises(AttributeError): q.pop('foo') with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault('foo', 'bar') self.assertEqual(q.urlencode(), 'foo=bar') def test_urlencode(self): q = QueryDict(mutable=True) q['next'] = '/a&b/' self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F') self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/') q = QueryDict(mutable=True) q['next'] = '/t\xebst&key/' self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F') self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/') def test_urlencode_int(self): # Normally QueryDict doesn't contain non-string values but lazily # written tests may make that mistake. q = QueryDict(mutable=True) q['a'] = 1 self.assertEqual(q.urlencode(), 'a=1') def test_mutable_copy(self): """A copy of a QueryDict is mutable.""" q = QueryDict().copy() with self.assertRaises(KeyError): q.__getitem__("foo") q['name'] = 'john' self.assertEqual(q['name'], 'john') def test_mutable_delete(self): q = QueryDict(mutable=True) q['name'] = 'john' del q['name'] self.assertNotIn('name', q) def test_basic_mutable_operations(self): q = QueryDict(mutable=True) q['name'] = 'john' self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.get('name', 'default'), 'john') self.assertEqual(q.getlist('name'), ['john']) self.assertEqual(q.getlist('foo'), []) q.setlist('foo', ['bar', 'baz']) self.assertEqual(q.get('foo', 'default'), 'baz') self.assertEqual(q.getlist('foo'), ['bar', 'baz']) q.appendlist('foo', 'another') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another']) self.assertEqual(q['foo'], 'another') self.assertIn('foo', q) self.assertCountEqual(q, ['foo', 'name']) self.assertCountEqual(q.items(), [('foo', 'another'), ('name', 'john')]) self.assertCountEqual(q.lists(), [('foo', ['bar', 'baz', 'another']), ('name', ['john'])]) self.assertCountEqual(q.keys(), ['foo', 'name']) self.assertCountEqual(q.values(), ['another', 'john']) q.update({'foo': 'hello'}) self.assertEqual(q['foo'], 'hello') self.assertEqual(q.get('foo', 'not available'), 'hello') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello']) self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello']) self.assertEqual(q.pop('foo', 'not there'), 'not there') self.assertEqual(q.get('foo', 'not there'), 'not there') self.assertEqual(q.setdefault('foo', 'bar'), 'bar') self.assertEqual(q['foo'], 'bar') self.assertEqual(q.getlist('foo'), ['bar']) self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar']) q.clear() self.assertEqual(len(q), 0) def test_multiple_keys(self): """Test QueryDict with two key/value pairs with same keys.""" q = QueryDict('vote=yes&vote=no') self.assertEqual(q['vote'], 'no') with self.assertRaises(AttributeError): q.__setitem__('something', 'bar') self.assertEqual(q.get('vote', 'default'), 'no') self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.getlist('vote'), ['yes', 'no']) self.assertEqual(q.getlist('foo'), []) with self.assertRaises(AttributeError): q.setlist('foo', ['bar', 'baz']) with self.assertRaises(AttributeError): q.setlist('foo', ['bar', 'baz']) with self.assertRaises(AttributeError): q.appendlist('foo', ['bar']) self.assertIn('vote', q) self.assertNotIn('foo', q) self.assertEqual(list(q), ['vote']) self.assertEqual(list(q.items()), [('vote', 'no')]) self.assertEqual(list(q.lists()), [('vote', ['yes', 'no'])]) self.assertEqual(list(q.keys()), ['vote']) self.assertEqual(list(q.values()), ['no']) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({'foo': 'bar'}) with self.assertRaises(AttributeError): q.pop('foo') with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault('foo', 'bar') with self.assertRaises(AttributeError): q.__delitem__('vote') def test_pickle(self): q = QueryDict() q1 = pickle.loads(pickle.dumps(q, 2)) self.assertEqual(q, q1) q = QueryDict('a=b&c=d') q1 = pickle.loads(pickle.dumps(q, 2)) self.assertEqual(q, q1) q = QueryDict('a=b&c=d&a=1') q1 = pickle.loads(pickle.dumps(q, 2)) self.assertEqual(q, q1) def test_update_from_querydict(self): """Regression test for #8278: QueryDict.update(QueryDict)""" x = QueryDict("a=1&a=2", mutable=True) y = QueryDict("a=3&a=4") x.update(y) self.assertEqual(x.getlist('a'), ['1', '2', '3', '4']) def test_non_default_encoding(self): """#13572 - QueryDict with a non-default encoding""" q = QueryDict('cur=%A4', encoding='iso-8859-15') self.assertEqual(q.encoding, 'iso-8859-15') self.assertEqual(list(q.items()), [('cur', '€')]) self.assertEqual(q.urlencode(), 'cur=%A4') q = q.copy() self.assertEqual(q.encoding, 'iso-8859-15') self.assertEqual(list(q.items()), [('cur', '€')]) self.assertEqual(q.urlencode(), 'cur=%A4') self.assertEqual(copy.copy(q).encoding, 'iso-8859-15') self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15') def test_querydict_fromkeys(self): self.assertEqual(QueryDict.fromkeys(['key1', 'key2', 'key3']), QueryDict('key1&key2&key3')) def test_fromkeys_with_nonempty_value(self): self.assertEqual( QueryDict.fromkeys(['key1', 'key2', 'key3'], value='val'), QueryDict('key1=val&key2=val&key3=val') ) def test_fromkeys_is_immutable_by_default(self): # Match behavior of __init__() which is also immutable by default. q = QueryDict.fromkeys(['key1', 'key2', 'key3']) with self.assertRaisesMessage(AttributeError, 'This QueryDict instance is immutable'): q['key4'] = 'nope' def test_fromkeys_mutable_override(self): q = QueryDict.fromkeys(['key1', 'key2', 'key3'], mutable=True) q['key4'] = 'yep' self.assertEqual(q, QueryDict('key1&key2&key3&key4=yep')) def test_duplicates_in_fromkeys_iterable(self): self.assertEqual(QueryDict.fromkeys('xyzzy'), QueryDict('x&y&z&z&y')) def test_fromkeys_with_nondefault_encoding(self): key_utf16 = b'\xff\xfe\x8e\x02\xdd\x01\x9e\x02' value_utf16 = b'\xff\xfe\xdd\x01n\x00l\x00P\x02\x8c\x02' q = QueryDict.fromkeys([key_utf16], value=value_utf16, encoding='utf-16') expected = QueryDict('', mutable=True) expected['ʎǝʞ'] = 'ǝnlɐʌ' self.assertEqual(q, expected) def test_fromkeys_empty_iterable(self): self.assertEqual(QueryDict.fromkeys([]), QueryDict('')) def test_fromkeys_noniterable(self): with self.assertRaises(TypeError): QueryDict.fromkeys(0) class HttpResponseTests(SimpleTestCase): def test_headers_type(self): r = HttpResponse() # ASCII strings or bytes values are converted to strings. r.headers['key'] = 'test' self.assertEqual(r.headers['key'], 'test') r.headers['key'] = b'test' self.assertEqual(r.headers['key'], 'test') self.assertIn(b'test', r.serialize_headers()) # Non-ASCII values are serialized to Latin-1. r.headers['key'] = 'café' self.assertIn('café'.encode('latin-1'), r.serialize_headers()) # Other Unicode values are MIME-encoded (there's no way to pass them as # bytes). r.headers['key'] = '†' self.assertEqual(r.headers['key'], '=?utf-8?b?4oCg?=') self.assertIn(b'=?utf-8?b?4oCg?=', r.serialize_headers()) # The response also converts string or bytes keys to strings, but requires # them to contain ASCII r = HttpResponse() del r.headers['Content-Type'] r.headers['foo'] = 'bar' headers = list(r.headers.items()) self.assertEqual(len(headers), 1) self.assertEqual(headers[0], ('foo', 'bar')) r = HttpResponse() del r.headers['Content-Type'] r.headers[b'foo'] = 'bar' headers = list(r.headers.items()) self.assertEqual(len(headers), 1) self.assertEqual(headers[0], ('foo', 'bar')) self.assertIsInstance(headers[0][0], str) r = HttpResponse() with self.assertRaises(UnicodeError): r.headers.__setitem__('føø', 'bar') with self.assertRaises(UnicodeError): r.headers.__setitem__('føø'.encode(), 'bar') def test_long_line(self): # Bug #20889: long lines trigger newlines to be added to headers # (which is not allowed due to bug #10188) h = HttpResponse() f = b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88' f = f.decode('utf-8') h.headers['Content-Disposition'] = 'attachment; filename="%s"' % f # This one is triggering https://bugs.python.org/issue20747, that is Python # will itself insert a newline in the header h.headers['Content-Disposition'] = 'attachment; filename="EdelRot_Blu\u0308te (3)-0.JPG"' def test_newlines_in_headers(self): # Bug #10188: Do not allow newlines in headers (CR or LF) r = HttpResponse() with self.assertRaises(BadHeaderError): r.headers.__setitem__('test\rstr', 'test') with self.assertRaises(BadHeaderError): r.headers.__setitem__('test\nstr', 'test') def test_dict_behavior(self): """ Test for bug #14020: Make HttpResponse.get work like dict.get """ r = HttpResponse() self.assertIsNone(r.get('test')) def test_non_string_content(self): # Bug 16494: HttpResponse should behave consistently with non-strings r = HttpResponse(12345) self.assertEqual(r.content, b'12345') # test content via property r = HttpResponse() r.content = 12345 self.assertEqual(r.content, b'12345') def test_memoryview_content(self): r = HttpResponse(memoryview(b'memoryview')) self.assertEqual(r.content, b'memoryview') def test_iter_content(self): r = HttpResponse(['abc', 'def', 'ghi']) self.assertEqual(r.content, b'abcdefghi') # test iter content via property r = HttpResponse() r.content = ['idan', 'alex', 'jacob'] self.assertEqual(r.content, b'idanalexjacob') r = HttpResponse() r.content = [1, 2, 3] self.assertEqual(r.content, b'123') # test odd inputs r = HttpResponse() r.content = ['1', '2', 3, '\u079e'] # '\xde\x9e' == unichr(1950).encode() self.assertEqual(r.content, b'123\xde\x9e') # .content can safely be accessed multiple times. r = HttpResponse(iter(['hello', 'world'])) self.assertEqual(r.content, r.content) self.assertEqual(r.content, b'helloworld') # __iter__ can safely be called multiple times (#20187). self.assertEqual(b''.join(r), b'helloworld') self.assertEqual(b''.join(r), b'helloworld') # Accessing .content still works. self.assertEqual(r.content, b'helloworld') # Accessing .content also works if the response was iterated first. r = HttpResponse(iter(['hello', 'world'])) self.assertEqual(b''.join(r), b'helloworld') self.assertEqual(r.content, b'helloworld') # Additional content can be written to the response. r = HttpResponse(iter(['hello', 'world'])) self.assertEqual(r.content, b'helloworld') r.write('!') self.assertEqual(r.content, b'helloworld!') def test_iterator_isnt_rewound(self): # Regression test for #13222 r = HttpResponse('abc') i = iter(r) self.assertEqual(list(i), [b'abc']) self.assertEqual(list(i), []) def test_lazy_content(self): r = HttpResponse(lazystr('helloworld')) self.assertEqual(r.content, b'helloworld') def test_file_interface(self): r = HttpResponse() r.write(b"hello") self.assertEqual(r.tell(), 5) r.write("привет") self.assertEqual(r.tell(), 17) r = HttpResponse(['abc']) r.write('def') self.assertEqual(r.tell(), 6) self.assertEqual(r.content, b'abcdef') # with Content-Encoding header r = HttpResponse() r.headers['Content-Encoding'] = 'winning' r.write(b'abc') r.write(b'def') self.assertEqual(r.content, b'abcdef') def test_stream_interface(self): r = HttpResponse('asdf') self.assertEqual(r.getvalue(), b'asdf') r = HttpResponse() self.assertIs(r.writable(), True) r.writelines(['foo\n', 'bar\n', 'baz\n']) self.assertEqual(r.content, b'foo\nbar\nbaz\n') def test_unsafe_redirect(self): bad_urls = [ 'data:text/html,<script>window.alert("xss")</script>', 'mailto:[email protected]', 'file:///etc/passwd', ] for url in bad_urls: with self.assertRaises(DisallowedRedirect): HttpResponseRedirect(url) with self.assertRaises(DisallowedRedirect): HttpResponsePermanentRedirect(url) def test_header_deletion(self): r = HttpResponse('hello') r.headers['X-Foo'] = 'foo' del r.headers['X-Foo'] self.assertNotIn('X-Foo', r.headers) # del doesn't raise a KeyError on nonexistent headers. del r.headers['X-Foo'] def test_instantiate_with_headers(self): r = HttpResponse('hello', headers={'X-Foo': 'foo'}) self.assertEqual(r.headers['X-Foo'], 'foo') self.assertEqual(r.headers['x-foo'], 'foo') def test_content_type(self): r = HttpResponse('hello', content_type='application/json') self.assertEqual(r.headers['Content-Type'], 'application/json') def test_content_type_headers(self): r = HttpResponse('hello', headers={'Content-Type': 'application/json'}) self.assertEqual(r.headers['Content-Type'], 'application/json') def test_content_type_mutually_exclusive(self): msg = ( "'headers' must not contain 'Content-Type' when the " "'content_type' parameter is provided." ) with self.assertRaisesMessage(ValueError, msg): HttpResponse( 'hello', content_type='application/json', headers={'Content-Type': 'text/csv'}, ) class HttpResponseSubclassesTests(SimpleTestCase): def test_redirect(self): response = HttpResponseRedirect('/redirected/') self.assertEqual(response.status_code, 302) # Standard HttpResponse init args can be used response = HttpResponseRedirect( '/redirected/', content='The resource has temporarily moved', content_type='text/html', ) self.assertContains(response, 'The resource has temporarily moved', status_code=302) self.assertEqual(response.url, response.headers['Location']) def test_redirect_lazy(self): """Make sure HttpResponseRedirect works with lazy strings.""" r = HttpResponseRedirect(lazystr('/redirected/')) self.assertEqual(r.url, '/redirected/') def test_redirect_repr(self): response = HttpResponseRedirect('/redirected/') expected = '<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="/redirected/">' self.assertEqual(repr(response), expected) def test_invalid_redirect_repr(self): """ If HttpResponseRedirect raises DisallowedRedirect, its __repr__() should work (in the debug view, for example). """ response = HttpResponseRedirect.__new__(HttpResponseRedirect) with self.assertRaisesMessage(DisallowedRedirect, "Unsafe redirect to URL with protocol 'ssh'"): HttpResponseRedirect.__init__(response, 'ssh://foo') expected = '<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="ssh://foo">' self.assertEqual(repr(response), expected) def test_not_modified(self): response = HttpResponseNotModified() self.assertEqual(response.status_code, 304) # 304 responses should not have content/content-type with self.assertRaises(AttributeError): response.content = "Hello dear" self.assertNotIn('content-type', response) def test_not_modified_repr(self): response = HttpResponseNotModified() self.assertEqual(repr(response), '<HttpResponseNotModified status_code=304>') def test_not_allowed(self): response = HttpResponseNotAllowed(['GET']) self.assertEqual(response.status_code, 405) # Standard HttpResponse init args can be used response = HttpResponseNotAllowed(['GET'], content='Only the GET method is allowed', content_type='text/html') self.assertContains(response, 'Only the GET method is allowed', status_code=405) def test_not_allowed_repr(self): response = HttpResponseNotAllowed(['GET', 'OPTIONS'], content_type='text/plain') expected = '<HttpResponseNotAllowed [GET, OPTIONS] status_code=405, "text/plain">' self.assertEqual(repr(response), expected) def test_not_allowed_repr_no_content_type(self): response = HttpResponseNotAllowed(('GET', 'POST')) del response.headers['Content-Type'] self.assertEqual(repr(response), '<HttpResponseNotAllowed [GET, POST] status_code=405>') class JsonResponseTests(SimpleTestCase): def test_json_response_non_ascii(self): data = {'key': 'łóżko'} response = JsonResponse(data) self.assertEqual(json.loads(response.content.decode()), data) def test_json_response_raises_type_error_with_default_setting(self): with self.assertRaisesMessage( TypeError, 'In order to allow non-dict objects to be serialized set the ' 'safe parameter to False' ): JsonResponse([1, 2, 3]) def test_json_response_text(self): response = JsonResponse('foobar', safe=False) self.assertEqual(json.loads(response.content.decode()), 'foobar') def test_json_response_list(self): response = JsonResponse(['foo', 'bar'], safe=False) self.assertEqual(json.loads(response.content.decode()), ['foo', 'bar']) def test_json_response_uuid(self): u = uuid.uuid4() response = JsonResponse(u, safe=False) self.assertEqual(json.loads(response.content.decode()), str(u)) def test_json_response_custom_encoder(self): class CustomDjangoJSONEncoder(DjangoJSONEncoder): def encode(self, o): return json.dumps({'foo': 'bar'}) response = JsonResponse({}, encoder=CustomDjangoJSONEncoder) self.assertEqual(json.loads(response.content.decode()), {'foo': 'bar'}) def test_json_response_passing_arguments_to_json_dumps(self): response = JsonResponse({'foo': 'bar'}, json_dumps_params={'indent': 2}) self.assertEqual(response.content.decode(), '{\n "foo": "bar"\n}') class StreamingHttpResponseTests(SimpleTestCase): def test_streaming_response(self): r = StreamingHttpResponse(iter(['hello', 'world'])) # iterating over the response itself yields bytestring chunks. chunks = list(r) self.assertEqual(chunks, [b'hello', b'world']) for chunk in chunks: self.assertIsInstance(chunk, bytes) # and the response can only be iterated once. self.assertEqual(list(r), []) # even when a sequence that can be iterated many times, like a list, # is given as content. r = StreamingHttpResponse(['abc', 'def']) self.assertEqual(list(r), [b'abc', b'def']) self.assertEqual(list(r), []) # iterating over strings still yields bytestring chunks. r.streaming_content = iter(['hello', 'café']) chunks = list(r) # '\xc3\xa9' == unichr(233).encode() self.assertEqual(chunks, [b'hello', b'caf\xc3\xa9']) for chunk in chunks: self.assertIsInstance(chunk, bytes) # streaming responses don't have a `content` attribute. self.assertFalse(hasattr(r, 'content')) # and you can't accidentally assign to a `content` attribute. with self.assertRaises(AttributeError): r.content = 'xyz' # but they do have a `streaming_content` attribute. self.assertTrue(hasattr(r, 'streaming_content')) # that exists so we can check if a response is streaming, and wrap or # replace the content iterator. r.streaming_content = iter(['abc', 'def']) r.streaming_content = (chunk.upper() for chunk in r.streaming_content) self.assertEqual(list(r), [b'ABC', b'DEF']) # coercing a streaming response to bytes doesn't return a complete HTTP # message like a regular response does. it only gives us the headers. r = StreamingHttpResponse(iter(['hello', 'world'])) self.assertEqual(bytes(r), b'Content-Type: text/html; charset=utf-8') # and this won't consume its content. self.assertEqual(list(r), [b'hello', b'world']) # additional content cannot be written to the response. r = StreamingHttpResponse(iter(['hello', 'world'])) with self.assertRaises(Exception): r.write('!') # and we can't tell the current position. with self.assertRaises(Exception): r.tell() r = StreamingHttpResponse(iter(['hello', 'world'])) self.assertEqual(r.getvalue(), b'helloworld') class FileCloseTests(SimpleTestCase): def setUp(self): # Disable the request_finished signal during this test # to avoid interfering with the database connection. request_finished.disconnect(close_old_connections) def tearDown(self): request_finished.connect(close_old_connections) def test_response(self): filename = os.path.join(os.path.dirname(__file__), 'abc.txt') # file isn't closed until we close the response. file1 = open(filename) r = HttpResponse(file1) self.assertTrue(file1.closed) r.close() # when multiple file are assigned as content, make sure they are all # closed with the response. file1 = open(filename) file2 = open(filename) r = HttpResponse(file1) r.content = file2 self.assertTrue(file1.closed) self.assertTrue(file2.closed) def test_streaming_response(self): filename = os.path.join(os.path.dirname(__file__), 'abc.txt') # file isn't closed until we close the response. file1 = open(filename) r = StreamingHttpResponse(file1) self.assertFalse(file1.closed) r.close() self.assertTrue(file1.closed) # when multiple file are assigned as content, make sure they are all # closed with the response. file1 = open(filename) file2 = open(filename) r = StreamingHttpResponse(file1) r.streaming_content = file2 self.assertFalse(file1.closed) self.assertFalse(file2.closed) r.close() self.assertTrue(file1.closed) self.assertTrue(file2.closed) class CookieTests(unittest.TestCase): def test_encode(self): """Semicolons and commas are encoded.""" c = SimpleCookie() c['test'] = "An,awkward;value" self.assertNotIn(";", c.output().rstrip(';')) # IE compat self.assertNotIn(",", c.output().rstrip(';')) # Safari compat def test_decode(self): """Semicolons and commas are decoded.""" c = SimpleCookie() c['test'] = "An,awkward;value" c2 = SimpleCookie() c2.load(c.output()[12:]) self.assertEqual(c['test'].value, c2['test'].value) c3 = parse_cookie(c.output()[12:]) self.assertEqual(c['test'].value, c3['test']) def test_nonstandard_keys(self): """ A single non-standard cookie name doesn't affect all cookies (#13007). """ self.assertIn('good_cookie', parse_cookie('good_cookie=yes;bad:cookie=yes')) def test_repeated_nonstandard_keys(self): """ A repeated non-standard name doesn't affect all cookies (#15852). """ self.assertIn('good_cookie', parse_cookie('a:=b; a:=c; good_cookie=yes')) def test_python_cookies(self): """ Test cases copied from Python's Lib/test/test_http_cookies.py """ self.assertEqual(parse_cookie('chips=ahoy; vienna=finger'), {'chips': 'ahoy', 'vienna': 'finger'}) # Here parse_cookie() differs from Python's cookie parsing in that it # treats all semicolons as delimiters, even within quotes. self.assertEqual( parse_cookie('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'), {'keebler': '"E=mc2', 'L': '\\"Loves\\"', 'fudge': '\\012', '': '"'} ) # Illegal cookies that have an '=' char in an unquoted value. self.assertEqual(parse_cookie('keebler=E=mc2'), {'keebler': 'E=mc2'}) # Cookies with ':' character in their name. self.assertEqual(parse_cookie('key:term=value:term'), {'key:term': 'value:term'}) # Cookies with '[' and ']'. self.assertEqual(parse_cookie('a=b; c=[; d=r; f=h'), {'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'}) def test_cookie_edgecases(self): # Cookies that RFC6265 allows. self.assertEqual(parse_cookie('a=b; Domain=example.com'), {'a': 'b', 'Domain': 'example.com'}) # parse_cookie() has historically kept only the last cookie with the # same name. self.assertEqual(parse_cookie('a=b; h=i; a=c'), {'a': 'c', 'h': 'i'}) def test_invalid_cookies(self): """ Cookie strings that go against RFC6265 but browsers will send if set via document.cookie. """ # Chunks without an equals sign appear as unnamed values per # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 self.assertIn('django_language', parse_cookie('abc=def; unnamed; django_language=en')) # Even a double quote may be an unnamed value. self.assertEqual(parse_cookie('a=b; "; c=d'), {'a': 'b', '': '"', 'c': 'd'}) # Spaces in names and values, and an equals sign in values. self.assertEqual(parse_cookie('a b c=d e = f; gh=i'), {'a b c': 'd e = f', 'gh': 'i'}) # More characters the spec forbids. self.assertEqual(parse_cookie('a b,c<>@:/[]?{}=d " =e,f g'), {'a b,c<>@:/[]?{}': 'd " =e,f g'}) # Unicode characters. The spec only allows ASCII. self.assertEqual(parse_cookie('saint=André Bessette'), {'saint': 'André Bessette'}) # Browsers don't send extra whitespace or semicolons in Cookie headers, # but parse_cookie() should parse whitespace the same way # document.cookie parses whitespace. self.assertEqual(parse_cookie(' = b ; ; = ; c = ; '), {'': 'b', 'c': ''}) def test_samesite(self): c = SimpleCookie('name=value; samesite=lax; httponly') self.assertEqual(c['name']['samesite'], 'lax') self.assertIn('SameSite=lax', c.output()) def test_httponly_after_load(self): c = SimpleCookie() c.load("name=val") c['name']['httponly'] = True self.assertTrue(c['name']['httponly']) def test_load_dict(self): c = SimpleCookie() c.load({'name': 'val'}) self.assertEqual(c['name'].value, 'val') def test_pickle(self): rawdata = 'Customer="WILE_E_COYOTE"; Path=/acme; Version=1' expected_output = 'Set-Cookie: %s' % rawdata C = SimpleCookie() C.load(rawdata) self.assertEqual(C.output(), expected_output) for proto in range(pickle.HIGHEST_PROTOCOL + 1): C1 = pickle.loads(pickle.dumps(C, protocol=proto)) self.assertEqual(C1.output(), expected_output) class HttpResponseHeadersTestCase(SimpleTestCase): """Headers by treating HttpResponse like a dictionary.""" def test_headers(self): response = HttpResponse() response['X-Foo'] = 'bar' self.assertEqual(response['X-Foo'], 'bar') self.assertEqual(response.headers['X-Foo'], 'bar') self.assertIn('X-Foo', response) self.assertIs(response.has_header('X-Foo'), True) del response['X-Foo'] self.assertNotIn('X-Foo', response) self.assertNotIn('X-Foo', response.headers) # del doesn't raise a KeyError on nonexistent headers. del response['X-Foo'] def test_headers_bytestring(self): response = HttpResponse() response['X-Foo'] = b'bar' self.assertEqual(response['X-Foo'], 'bar') self.assertEqual(response.headers['X-Foo'], 'bar') def test_newlines_in_headers(self): response = HttpResponse() with self.assertRaises(BadHeaderError): response['test\rstr'] = 'test' with self.assertRaises(BadHeaderError): response['test\nstr'] = 'test'
8f761e51d4ca7f325c6d3bef8ac8b5a418957cd3aee1eae0a6c5c49925d20d4c
from django.contrib import admin from django.contrib.admin.options import ModelAdmin from django.contrib.auth.models import User from django.db.models import F from django.test import RequestFactory, TestCase from .models import ( Band, DynOrderingBandAdmin, Song, SongInlineDefaultOrdering, SongInlineNewOrdering, ) class MockRequest: pass class MockSuperUser: def has_perm(self, perm, obj=None): return True def has_module_perms(self, module): return True request = MockRequest() request.user = MockSuperUser() site = admin.AdminSite() class TestAdminOrdering(TestCase): """ Let's make sure that ModelAdmin.get_queryset uses the ordering we define in ModelAdmin rather that ordering defined in the model's inner Meta class. """ request_factory = RequestFactory() @classmethod def setUpTestData(cls): Band.objects.bulk_create([ Band(name='Aerosmith', bio='', rank=3), Band(name='Radiohead', bio='', rank=1), Band(name='Van Halen', bio='', rank=2), ]) def test_default_ordering(self): """ The default ordering should be by name, as specified in the inner Meta class. """ ma = ModelAdmin(Band, site) names = [b.name for b in ma.get_queryset(request)] self.assertEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names) def test_specified_ordering(self): """ Let's use a custom ModelAdmin that changes the ordering, and make sure it actually changes. """ class BandAdmin(ModelAdmin): ordering = ('rank',) # default ordering is ('name',) ma = BandAdmin(Band, site) names = [b.name for b in ma.get_queryset(request)] self.assertEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names) def test_specified_ordering_by_f_expression(self): class BandAdmin(ModelAdmin): ordering = (F('rank').desc(nulls_last=True),) band_admin = BandAdmin(Band, site) names = [b.name for b in band_admin.get_queryset(request)] self.assertEqual(['Aerosmith', 'Van Halen', 'Radiohead'], names) def test_dynamic_ordering(self): """ Let's use a custom ModelAdmin that changes the ordering dynamically. """ super_user = User.objects.create(username='admin', is_superuser=True) other_user = User.objects.create(username='other') request = self.request_factory.get('/') request.user = super_user ma = DynOrderingBandAdmin(Band, site) names = [b.name for b in ma.get_queryset(request)] self.assertEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names) request.user = other_user names = [b.name for b in ma.get_queryset(request)] self.assertEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names) class TestInlineModelAdminOrdering(TestCase): """ Let's make sure that InlineModelAdmin.get_queryset uses the ordering we define in InlineModelAdmin. """ @classmethod def setUpTestData(cls): cls.band = Band.objects.create(name='Aerosmith', bio='', rank=3) Song.objects.bulk_create([ Song(band=cls.band, name='Pink', duration=235), Song(band=cls.band, name='Dude (Looks Like a Lady)', duration=264), Song(band=cls.band, name='Jaded', duration=214), ]) def test_default_ordering(self): """ The default ordering should be by name, as specified in the inner Meta class. """ inline = SongInlineDefaultOrdering(self.band, site) names = [s.name for s in inline.get_queryset(request)] self.assertEqual(['Dude (Looks Like a Lady)', 'Jaded', 'Pink'], names) def test_specified_ordering(self): """ Let's check with ordering set to something different than the default. """ inline = SongInlineNewOrdering(self.band, site) names = [s.name for s in inline.get_queryset(request)] self.assertEqual(['Jaded', 'Pink', 'Dude (Looks Like a Lady)'], names) class TestRelatedFieldsAdminOrdering(TestCase): @classmethod def setUpTestData(cls): cls.b1 = Band.objects.create(name='Pink Floyd', bio='', rank=1) cls.b2 = Band.objects.create(name='Foo Fighters', bio='', rank=5) def setUp(self): # we need to register a custom ModelAdmin (instead of just using # ModelAdmin) because the field creator tries to find the ModelAdmin # for the related model class SongAdmin(admin.ModelAdmin): pass site.register(Song, SongAdmin) def tearDown(self): site.unregister(Song) if Band in site._registry: site.unregister(Band) def check_ordering_of_field_choices(self, correct_ordering): fk_field = site._registry[Song].formfield_for_foreignkey(Song.band.field, request=None) m2m_field = site._registry[Song].formfield_for_manytomany(Song.other_interpreters.field, request=None) self.assertEqual(list(fk_field.queryset), correct_ordering) self.assertEqual(list(m2m_field.queryset), correct_ordering) def test_no_admin_fallback_to_model_ordering(self): # should be ordered by name (as defined by the model) self.check_ordering_of_field_choices([self.b2, self.b1]) def test_admin_with_no_ordering_fallback_to_model_ordering(self): class NoOrderingBandAdmin(admin.ModelAdmin): pass site.register(Band, NoOrderingBandAdmin) # should be ordered by name (as defined by the model) self.check_ordering_of_field_choices([self.b2, self.b1]) def test_admin_ordering_beats_model_ordering(self): class StaticOrderingBandAdmin(admin.ModelAdmin): ordering = ('rank',) site.register(Band, StaticOrderingBandAdmin) # should be ordered by rank (defined by the ModelAdmin) self.check_ordering_of_field_choices([self.b1, self.b2]) def test_custom_queryset_still_wins(self): """Custom queryset has still precedence (#21405)""" class SongAdmin(admin.ModelAdmin): # Exclude one of the two Bands from the querysets def formfield_for_foreignkey(self, db_field, request, **kwargs): if db_field.name == 'band': kwargs["queryset"] = Band.objects.filter(rank__gt=2) return super().formfield_for_foreignkey(db_field, request, **kwargs) def formfield_for_manytomany(self, db_field, request, **kwargs): if db_field.name == 'other_interpreters': kwargs["queryset"] = Band.objects.filter(rank__gt=2) return super().formfield_for_foreignkey(db_field, request, **kwargs) class StaticOrderingBandAdmin(admin.ModelAdmin): ordering = ('rank',) site.unregister(Song) site.register(Song, SongAdmin) site.register(Band, StaticOrderingBandAdmin) self.check_ordering_of_field_choices([self.b2])
2fbee52887db33073b11d4fa915c245eeb16dba6c743df19176936fad917ded0
from django.db import connection, transaction from django.test import TransactionTestCase, skipUnlessDBFeature from .models import Thing class ForcedError(Exception): pass class TestConnectionOnCommit(TransactionTestCase): """ Tests for transaction.on_commit(). Creation/checking of database objects in parallel with callback tracking is to verify that the behavior of the two match in all tested cases. """ available_apps = ['transaction_hooks'] def setUp(self): self.notified = [] def notify(self, id_): if id_ == 'error': raise ForcedError() self.notified.append(id_) def do(self, num): """Create a Thing instance and notify about it.""" Thing.objects.create(num=num) transaction.on_commit(lambda: self.notify(num)) def assertDone(self, nums): self.assertNotified(nums) self.assertEqual(sorted(t.num for t in Thing.objects.all()), sorted(nums)) def assertNotified(self, nums): self.assertEqual(self.notified, nums) def test_executes_immediately_if_no_transaction(self): self.do(1) self.assertDone([1]) def test_delays_execution_until_after_transaction_commit(self): with transaction.atomic(): self.do(1) self.assertNotified([]) self.assertDone([1]) def test_does_not_execute_if_transaction_rolled_back(self): try: with transaction.atomic(): self.do(1) raise ForcedError() except ForcedError: pass self.assertDone([]) def test_executes_only_after_final_transaction_committed(self): with transaction.atomic(): with transaction.atomic(): self.do(1) self.assertNotified([]) self.assertNotified([]) self.assertDone([1]) def test_discards_hooks_from_rolled_back_savepoint(self): with transaction.atomic(): # one successful savepoint with transaction.atomic(): self.do(1) # one failed savepoint try: with transaction.atomic(): self.do(2) raise ForcedError() except ForcedError: pass # another successful savepoint with transaction.atomic(): self.do(3) # only hooks registered during successful savepoints execute self.assertDone([1, 3]) def test_no_hooks_run_from_failed_transaction(self): """If outer transaction fails, no hooks from within it run.""" try: with transaction.atomic(): with transaction.atomic(): self.do(1) raise ForcedError() except ForcedError: pass self.assertDone([]) def test_inner_savepoint_rolled_back_with_outer(self): with transaction.atomic(): try: with transaction.atomic(): with transaction.atomic(): self.do(1) raise ForcedError() except ForcedError: pass self.do(2) self.assertDone([2]) def test_no_savepoints_atomic_merged_with_outer(self): with transaction.atomic(): with transaction.atomic(): self.do(1) try: with transaction.atomic(savepoint=False): raise ForcedError() except ForcedError: pass self.assertDone([]) def test_inner_savepoint_does_not_affect_outer(self): with transaction.atomic(): with transaction.atomic(): self.do(1) try: with transaction.atomic(): raise ForcedError() except ForcedError: pass self.assertDone([1]) def test_runs_hooks_in_order_registered(self): with transaction.atomic(): self.do(1) with transaction.atomic(): self.do(2) self.do(3) self.assertDone([1, 2, 3]) def test_hooks_cleared_after_successful_commit(self): with transaction.atomic(): self.do(1) with transaction.atomic(): self.do(2) self.assertDone([1, 2]) # not [1, 1, 2] def test_hooks_cleared_after_rollback(self): try: with transaction.atomic(): self.do(1) raise ForcedError() except ForcedError: pass with transaction.atomic(): self.do(2) self.assertDone([2]) @skipUnlessDBFeature('test_db_allows_multiple_connections') def test_hooks_cleared_on_reconnect(self): with transaction.atomic(): self.do(1) connection.close() connection.connect() with transaction.atomic(): self.do(2) self.assertDone([2]) def test_error_in_hook_doesnt_prevent_clearing_hooks(self): try: with transaction.atomic(): transaction.on_commit(lambda: self.notify('error')) except ForcedError: pass with transaction.atomic(): self.do(1) self.assertDone([1]) def test_db_query_in_hook(self): with transaction.atomic(): Thing.objects.create(num=1) transaction.on_commit( lambda: [self.notify(t.num) for t in Thing.objects.all()] ) self.assertDone([1]) def test_transaction_in_hook(self): def on_commit(): with transaction.atomic(): t = Thing.objects.create(num=1) self.notify(t.num) with transaction.atomic(): transaction.on_commit(on_commit) self.assertDone([1]) def test_hook_in_hook(self): def on_commit(i, add_hook): with transaction.atomic(): if add_hook: transaction.on_commit(lambda: on_commit(i + 10, False)) t = Thing.objects.create(num=i) self.notify(t.num) with transaction.atomic(): transaction.on_commit(lambda: on_commit(1, True)) transaction.on_commit(lambda: on_commit(2, True)) self.assertDone([1, 11, 2, 12]) def test_raises_exception_non_autocommit_mode(self): def should_never_be_called(): raise AssertionError('this function should never be called') try: connection.set_autocommit(False) msg = 'on_commit() cannot be used in manual transaction management' with self.assertRaisesMessage(transaction.TransactionManagementError, msg): transaction.on_commit(should_never_be_called) finally: connection.set_autocommit(True) def test_raises_exception_non_callable(self): msg = "on_commit()'s callback must be a callable." with self.assertRaisesMessage(TypeError, msg): transaction.on_commit(None)
6728609b0d1e370495d8d96dd5d4ec947d6bfb67eb05c8e7570d0ae3eadf4654
from django.db.backends.mysql.client import DatabaseClient from django.test import SimpleTestCase class MySqlDbshellCommandTestCase(SimpleTestCase): def test_fails_with_keyerror_on_incomplete_config(self): with self.assertRaises(KeyError): self.get_command_line_arguments({}) def test_basic_params_specified_in_settings(self): self.assertEqual( ['mysql', '--user=someuser', '--password=somepassword', '--host=somehost', '--port=444', 'somedbname'], self.get_command_line_arguments({ 'NAME': 'somedbname', 'USER': 'someuser', 'PASSWORD': 'somepassword', 'HOST': 'somehost', 'PORT': 444, 'OPTIONS': {}, })) def test_options_override_settings_proper_values(self): settings_port = 444 options_port = 555 self.assertNotEqual(settings_port, options_port, 'test pre-req') self.assertEqual( ['mysql', '--user=optionuser', '--password=optionpassword', '--host=optionhost', '--port={}'.format(options_port), 'optiondbname'], self.get_command_line_arguments({ 'NAME': 'settingdbname', 'USER': 'settinguser', 'PASSWORD': 'settingpassword', 'HOST': 'settinghost', 'PORT': settings_port, 'OPTIONS': { 'db': 'optiondbname', 'user': 'optionuser', 'passwd': 'optionpassword', 'host': 'optionhost', 'port': options_port, }, })) def test_options_password(self): self.assertEqual( [ 'mysql', '--user=someuser', '--password=optionpassword', '--host=somehost', '--port=444', 'somedbname', ], self.get_command_line_arguments({ 'NAME': 'somedbname', 'USER': 'someuser', 'PASSWORD': 'settingpassword', 'HOST': 'somehost', 'PORT': 444, 'OPTIONS': {'password': 'optionpassword'}, }), ) def test_options_charset(self): self.assertEqual( [ 'mysql', '--user=someuser', '--password=somepassword', '--host=somehost', '--port=444', '--default-character-set=utf8', 'somedbname', ], self.get_command_line_arguments({ 'NAME': 'somedbname', 'USER': 'someuser', 'PASSWORD': 'somepassword', 'HOST': 'somehost', 'PORT': 444, 'OPTIONS': {'charset': 'utf8'}, }), ) def test_can_connect_using_sockets(self): self.assertEqual( ['mysql', '--user=someuser', '--password=somepassword', '--socket=/path/to/mysql.socket.file', 'somedbname'], self.get_command_line_arguments({ 'NAME': 'somedbname', 'USER': 'someuser', 'PASSWORD': 'somepassword', 'HOST': '/path/to/mysql.socket.file', 'PORT': None, 'OPTIONS': {}, })) def test_ssl_certificate_is_added(self): self.assertEqual( ['mysql', '--user=someuser', '--password=somepassword', '--host=somehost', '--port=444', '--ssl-ca=sslca', '--ssl-cert=sslcert', '--ssl-key=sslkey', 'somedbname'], self.get_command_line_arguments({ 'NAME': 'somedbname', 'USER': 'someuser', 'PASSWORD': 'somepassword', 'HOST': 'somehost', 'PORT': 444, 'OPTIONS': { 'ssl': { 'ca': 'sslca', 'cert': 'sslcert', 'key': 'sslkey', }, }, })) def test_parameters(self): self.assertEqual( ['mysql', 'somedbname', '--help'], self.get_command_line_arguments( { 'NAME': 'somedbname', 'USER': None, 'PASSWORD': None, 'HOST': None, 'PORT': None, 'OPTIONS': {}, }, ['--help'], ), ) def get_command_line_arguments(self, connection_settings, parameters=None): if parameters is None: parameters = [] return DatabaseClient.settings_to_cmd_args(connection_settings, parameters)
1c284a7dcd9a1fc263c0c37247c047219d1ad812424b83c997554cd5dd16d152
from .custom_permissions import CustomPermissionsUser from .custom_user import ( CustomUser, CustomUserWithoutIsActiveField, ExtensionUser, ) from .invalid_models import CustomUserNonUniqueUsername from .is_active import IsActiveTestUser1 from .minimal import MinimalUser from .no_password import NoPasswordUser from .proxy import Proxy, UserProxy from .uuid_pk import UUIDUser from .with_custom_email_field import CustomEmailField from .with_foreign_key import CustomUserWithFK, Email from .with_integer_username import IntegerUsernameUser from .with_last_login_attr import UserWithDisabledLastLoginField from .with_many_to_many import ( CustomUserWithM2M, CustomUserWithM2MThrough, Organization, ) __all__ = ( 'CustomEmailField', 'CustomPermissionsUser', 'CustomUser', 'CustomUserNonUniqueUsername', 'CustomUserWithFK', 'CustomUserWithM2M', 'CustomUserWithM2MThrough', 'CustomUserWithoutIsActiveField', 'Email', 'ExtensionUser', 'IntegerUsernameUser', 'IsActiveTestUser1', 'MinimalUser', 'NoPasswordUser', 'Organization', 'Proxy', 'UUIDUser', 'UserProxy', 'UserWithDisabledLastLoginField', )
b629d3b26c5ad16e03c62533a80f5a8cd355944838069899b7e60b5462fba1bd
from django.contrib.auth.base_user import AbstractBaseUser from django.contrib.auth.models import BaseUserManager from django.db import models class CustomEmailFieldUserManager(BaseUserManager): def create_user(self, username, password, email): user = self.model(username=username) user.set_password(password) user.email_address = email user.save(using=self._db) return user class CustomEmailField(AbstractBaseUser): username = models.CharField(max_length=255) password = models.CharField(max_length=255) email_address = models.EmailField(null=True) is_active = models.BooleanField(default=True) EMAIL_FIELD = 'email_address' USERNAME_FIELD = 'username' objects = CustomEmailFieldUserManager()
529909c439266e83d9009229d870a0ff6a2b34f322f42dcdf93c64ee5e7bfb50
from django.core.management.base import BaseCommand class Command(BaseCommand): def handle(self, **options): self.stdout.write('Working...') self.stdout.flush() self.stdout.write('OK')
7b3fd4d41225877697cce4e7406f01bceaa3db0a6726297354e5ca4a706cba45
from django.core.management.base import BaseCommand class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( '--append_const', action='append_const', const=42, required=True, ) parser.add_argument('--const', action='store_const', const=31, required=True) parser.add_argument('--count', action='count', required=True) parser.add_argument('--flag_false', action='store_false', required=True) parser.add_argument('--flag_true', action='store_true', required=True) def handle(self, *args, **options): for option, value in options.items(): if value is not None: self.stdout.write('%s=%s' % (option, value))
b1b671a9e0bc98f0d5b472f75ded8b5c6b68005df9db277df5fcbc27015a20a5
from django.core.management.base import BaseCommand class Command(BaseCommand): def add_arguments(self, parser): group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--foo-id', type=int, nargs='?', default=None) group.add_argument('--foo-name', type=str, nargs='?', default=None) group.add_argument('--append_const', action='append_const', const=42) group.add_argument('--const', action='store_const', const=31) group.add_argument('--count', action='count') group.add_argument('--flag_false', action='store_false') group.add_argument('--flag_true', action='store_true') def handle(self, *args, **options): for option, value in options.items(): if value is not None: self.stdout.write('%s=%s' % (option, value))
02ab17e1d7a23004f3b3518e5e335a9a758089e2561be4e7d0e42b2e210cde3e
from django.db import migrations, models from ..fields import ( ArrayField, BigIntegerRangeField, CICharField, CIEmailField, CITextField, DateRangeField, DateTimeRangeField, DecimalRangeField, EnumField, HStoreField, IntegerRangeField, SearchVectorField, ) from ..models import TagField class Migration(migrations.Migration): dependencies = [ ('postgres_tests', '0001_setup_extensions'), ] operations = [ migrations.CreateModel( name='CharArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', ArrayField(models.CharField(max_length=10), size=None)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='DateTimeArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('datetimes', ArrayField(models.DateTimeField(), size=None)), ('dates', ArrayField(models.DateField(), size=None)), ('times', ArrayField(models.TimeField(), size=None)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='HStoreModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', HStoreField(blank=True, null=True)), ('array_field', ArrayField(HStoreField(), null=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='OtherTypesArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('ips', ArrayField(models.GenericIPAddressField(), size=None, default=list)), ('uuids', ArrayField(models.UUIDField(), size=None, default=list)), ('decimals', ArrayField(models.DecimalField(max_digits=5, decimal_places=2), size=None, default=list)), ('tags', ArrayField(TagField(), blank=True, null=True, size=None)), ('json', ArrayField(models.JSONField(default={}), default=[])), ('int_ranges', ArrayField(IntegerRangeField(), null=True, blank=True)), ('bigint_ranges', ArrayField(BigIntegerRangeField(), null=True, blank=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='IntegerArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', ArrayField(models.IntegerField(), size=None)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='NestedIntegerArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', ArrayField(ArrayField(models.IntegerField(), size=None), size=None)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='NullableIntegerArrayModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', ArrayField(models.IntegerField(), size=None, null=True, blank=True)), ( 'field_nested', ArrayField(ArrayField(models.IntegerField(), size=None, null=True), size=None, null=True), ), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='CharFieldModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', models.CharField(max_length=16)), ], options=None, bases=None, ), migrations.CreateModel( name='TextFieldModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('field', models.TextField()), ], options=None, bases=None, ), migrations.CreateModel( name='SmallAutoFieldModel', fields=[ ('id', models.SmallAutoField(verbose_name='ID', serialize=False, primary_key=True)), ], options=None, ), migrations.CreateModel( name='BigAutoFieldModel', fields=[ ('id', models.BigAutoField(verbose_name='ID', serialize=False, primary_key=True)), ], options=None, ), migrations.CreateModel( name='Scene', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('scene', models.CharField(max_length=255)), ('setting', models.CharField(max_length=255)), ], options=None, bases=None, ), migrations.CreateModel( name='Character', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=255)), ], options=None, bases=None, ), migrations.CreateModel( name='CITestModel', fields=[ ('name', CICharField(primary_key=True, max_length=255)), ('email', CIEmailField()), ('description', CITextField()), ('array_field', ArrayField(CITextField(), null=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=None, ), migrations.CreateModel( name='Line', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('scene', models.ForeignKey('postgres_tests.Scene', on_delete=models.SET_NULL)), ('character', models.ForeignKey('postgres_tests.Character', on_delete=models.SET_NULL)), ('dialogue', models.TextField(blank=True, null=True)), ('dialogue_search_vector', SearchVectorField(blank=True, null=True)), ('dialogue_config', models.CharField(max_length=100, blank=True, null=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=None, ), migrations.CreateModel( name='LineSavedSearch', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('line', models.ForeignKey('postgres_tests.Line', on_delete=models.CASCADE)), ('query', models.CharField(max_length=100)), ], options={ 'required_db_vendor': 'postgresql', }, ), migrations.CreateModel( name='AggregateTestModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('boolean_field', models.BooleanField(null=True)), ('char_field', models.CharField(max_length=30, blank=True)), ('integer_field', models.IntegerField(null=True)), ('json_field', models.JSONField(null=True)), ], options={ 'required_db_vendor': 'postgresql', }, ), migrations.CreateModel( name='StatTestModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('int1', models.IntegerField()), ('int2', models.IntegerField()), ('related_field', models.ForeignKey( 'postgres_tests.AggregateTestModel', models.SET_NULL, null=True, )), ], options={ 'required_db_vendor': 'postgresql', }, ), migrations.CreateModel( name='NowTestModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('when', models.DateTimeField(null=True, default=None)), ] ), migrations.CreateModel( name='UUIDTestModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('uuid', models.UUIDField(default=None, null=True)), ] ), migrations.CreateModel( name='RangesModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('ints', IntegerRangeField(null=True, blank=True)), ('bigints', BigIntegerRangeField(null=True, blank=True)), ('decimals', DecimalRangeField(null=True, blank=True)), ('timestamps', DateTimeRangeField(null=True, blank=True)), ('timestamps_inner', DateTimeRangeField(null=True, blank=True)), ('dates', DateRangeField(null=True, blank=True)), ('dates_inner', DateRangeField(null=True, blank=True)), ], options={ 'required_db_vendor': 'postgresql' }, bases=(models.Model,) ), migrations.CreateModel( name='RangeLookupsModel', fields=[ ('parent', models.ForeignKey( 'postgres_tests.RangesModel', models.SET_NULL, blank=True, null=True, )), ('integer', models.IntegerField(blank=True, null=True)), ('big_integer', models.BigIntegerField(blank=True, null=True)), ('float', models.FloatField(blank=True, null=True)), ('timestamp', models.DateTimeField(blank=True, null=True)), ('date', models.DateField(blank=True, null=True)), ('small_integer', models.SmallIntegerField(blank=True, null=True)), ('decimal_field', models.DecimalField(max_digits=5, decimal_places=2, blank=True, null=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='ArrayEnumModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('array_of_enums', ArrayField(EnumField(max_length=20), null=True, blank=True)), ], options={ 'required_db_vendor': 'postgresql', }, bases=(models.Model,), ), migrations.CreateModel( name='Room', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('number', models.IntegerField(unique=True)), ], ), migrations.CreateModel( name='HotelReservation', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('room', models.ForeignKey('postgres_tests.Room', models.CASCADE)), ('datespan', DateRangeField()), ('start', models.DateTimeField()), ('end', models.DateTimeField()), ('cancelled', models.BooleanField(default=False)), ('requirements', models.JSONField(blank=True, null=True)), ], options={ 'required_db_vendor': 'postgresql', }, ), ]
2a279e3cf0891d8ce0645d907397b319409d539c6a69c414c4d7602cdc00ef77
import datetime from django import forms from django.forms import CheckboxSelectMultiple from django.test import override_settings from .base import WidgetTest class CheckboxSelectMultipleTest(WidgetTest): widget = CheckboxSelectMultiple def test_render_value(self): self.check_html(self.widget(choices=self.beatles), 'beatles', ['J'], html=( """<ul> <li><label><input checked type="checkbox" name="beatles" value="J"> John</label></li> <li><label><input type="checkbox" name="beatles" value="P"> Paul</label></li> <li><label><input type="checkbox" name="beatles" value="G"> George</label></li> <li><label><input type="checkbox" name="beatles" value="R"> Ringo</label></li> </ul>""" )) def test_render_value_multiple(self): self.check_html(self.widget(choices=self.beatles), 'beatles', ['J', 'P'], html=( """<ul> <li><label><input checked type="checkbox" name="beatles" value="J"> John</label></li> <li><label><input checked type="checkbox" name="beatles" value="P"> Paul</label></li> <li><label><input type="checkbox" name="beatles" value="G"> George</label></li> <li><label><input type="checkbox" name="beatles" value="R"> Ringo</label></li> </ul>""" )) def test_render_none(self): """ If the value is None, none of the options are selected, even if the choices have an empty option. """ self.check_html(self.widget(choices=(('', 'Unknown'),) + self.beatles), 'beatles', None, html=( """<ul> <li><label><input type="checkbox" name="beatles" value=""> Unknown</label></li> <li><label><input type="checkbox" name="beatles" value="J"> John</label></li> <li><label><input type="checkbox" name="beatles" value="P"> Paul</label></li> <li><label><input type="checkbox" name="beatles" value="G"> George</label></li> <li><label><input type="checkbox" name="beatles" value="R"> Ringo</label></li> </ul>""" )) def test_nested_choices(self): nested_choices = ( ('unknown', 'Unknown'), ('Audio', (('vinyl', 'Vinyl'), ('cd', 'CD'))), ('Video', (('vhs', 'VHS'), ('dvd', 'DVD'))), ) html = """ <ul id="media"> <li> <label for="media_0"><input id="media_0" name="nestchoice" type="checkbox" value="unknown"> Unknown</label> </li> <li>Audio<ul id="media_1"> <li> <label for="media_1_0"> <input checked id="media_1_0" name="nestchoice" type="checkbox" value="vinyl"> Vinyl </label> </li> <li> <label for="media_1_1"><input id="media_1_1" name="nestchoice" type="checkbox" value="cd"> CD</label> </li> </ul></li> <li>Video<ul id="media_2"> <li> <label for="media_2_0"><input id="media_2_0" name="nestchoice" type="checkbox" value="vhs"> VHS</label> </li> <li> <label for="media_2_1"> <input checked id="media_2_1" name="nestchoice" type="checkbox" value="dvd"> DVD </label> </li> </ul></li> </ul> """ self.check_html( self.widget(choices=nested_choices), 'nestchoice', ('vinyl', 'dvd'), attrs={'id': 'media'}, html=html, ) def test_nested_choices_without_id(self): nested_choices = ( ('unknown', 'Unknown'), ('Audio', (('vinyl', 'Vinyl'), ('cd', 'CD'))), ('Video', (('vhs', 'VHS'), ('dvd', 'DVD'))), ) html = """ <ul> <li> <label><input name="nestchoice" type="checkbox" value="unknown"> Unknown</label> </li> <li>Audio<ul> <li> <label> <input checked name="nestchoice" type="checkbox" value="vinyl"> Vinyl </label> </li> <li> <label><input name="nestchoice" type="checkbox" value="cd"> CD</label> </li> </ul></li> <li>Video<ul> <li> <label><input name="nestchoice" type="checkbox" value="vhs"> VHS</label> </li> <li> <label> <input checked name="nestchoice" type="checkbox" value="dvd"> DVD </label> </li> </ul></li> </ul> """ self.check_html(self.widget(choices=nested_choices), 'nestchoice', ('vinyl', 'dvd'), html=html) def test_separate_ids(self): """ Each input gets a separate ID. """ choices = [('a', 'A'), ('b', 'B'), ('c', 'C')] html = """ <ul id="abc"> <li> <label for="abc_0"><input checked type="checkbox" name="letters" value="a" id="abc_0"> A</label> </li> <li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1"> B</label></li> <li> <label for="abc_2"><input checked type="checkbox" name="letters" value="c" id="abc_2"> C</label> </li> </ul> """ self.check_html(self.widget(choices=choices), 'letters', ['a', 'c'], attrs={'id': 'abc'}, html=html) def test_separate_ids_constructor(self): """ Each input gets a separate ID when the ID is passed to the constructor. """ widget = CheckboxSelectMultiple(attrs={'id': 'abc'}, choices=[('a', 'A'), ('b', 'B'), ('c', 'C')]) html = """ <ul id="abc"> <li> <label for="abc_0"><input checked type="checkbox" name="letters" value="a" id="abc_0"> A</label> </li> <li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1"> B</label></li> <li> <label for="abc_2"><input checked type="checkbox" name="letters" value="c" id="abc_2"> C</label> </li> </ul> """ self.check_html(widget, 'letters', ['a', 'c'], html=html) @override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True) def test_doesnt_localize_input_value(self): choices = [ (1, 'One'), (1000, 'One thousand'), (1000000, 'One million'), ] html = """ <ul> <li><label><input type="checkbox" name="numbers" value="1"> One</label></li> <li><label><input type="checkbox" name="numbers" value="1000"> One thousand</label></li> <li><label><input type="checkbox" name="numbers" value="1000000"> One million</label></li> </ul> """ self.check_html(self.widget(choices=choices), 'numbers', None, html=html) choices = [ (datetime.time(0, 0), 'midnight'), (datetime.time(12, 0), 'noon'), ] html = """ <ul> <li><label><input type="checkbox" name="times" value="00:00:00"> midnight</label></li> <li><label><input type="checkbox" name="times" value="12:00:00"> noon</label></li> </ul> """ self.check_html(self.widget(choices=choices), 'times', None, html=html) def test_use_required_attribute(self): widget = self.widget(choices=self.beatles) # Always False because browser validation would require all checkboxes # to be checked instead of at least one. self.assertIs(widget.use_required_attribute(None), False) self.assertIs(widget.use_required_attribute([]), False) self.assertIs(widget.use_required_attribute(['J', 'P']), False) def test_value_omitted_from_data(self): widget = self.widget(choices=self.beatles) self.assertIs(widget.value_omitted_from_data({}, {}, 'field'), False) self.assertIs(widget.value_omitted_from_data({'field': 'value'}, {}, 'field'), False) def test_label(self): """ CheckboxSelectMultiple doesn't contain 'for="field_0"' in the <label> because clicking that would toggle the first checkbox. """ class TestForm(forms.Form): f = forms.MultipleChoiceField(widget=CheckboxSelectMultiple) bound_field = TestForm()['f'] self.assertEqual(bound_field.field.widget.id_for_label('id'), '') self.assertEqual(bound_field.label_tag(), '<label>F:</label>')
a2a5600a4d4f7c0b4df8e908b22fc1b762dfe6f27e006aaec116d0bf45ab442d
import json import uuid from django.core.serializers.json import DjangoJSONEncoder from django.forms import ( CharField, Form, JSONField, Textarea, TextInput, ValidationError, ) from django.test import SimpleTestCase class JSONFieldTest(SimpleTestCase): def test_valid(self): field = JSONField() value = field.clean('{"a": "b"}') self.assertEqual(value, {'a': 'b'}) def test_valid_empty(self): field = JSONField(required=False) self.assertIsNone(field.clean('')) self.assertIsNone(field.clean(None)) def test_invalid(self): field = JSONField() with self.assertRaisesMessage(ValidationError, 'Enter a valid JSON.'): field.clean('{some badly formed: json}') def test_prepare_value(self): field = JSONField() self.assertEqual(field.prepare_value({'a': 'b'}), '{"a": "b"}') self.assertEqual(field.prepare_value(None), 'null') self.assertEqual(field.prepare_value('foo'), '"foo"') self.assertEqual(field.prepare_value('你好,世界'), '"你好,世界"') self.assertEqual(field.prepare_value({'a': '😀🐱'}), '{"a": "😀🐱"}') self.assertEqual( field.prepare_value(["你好,世界", "jaźń"]), '["你好,世界", "jaźń"]', ) def test_widget(self): field = JSONField() self.assertIsInstance(field.widget, Textarea) def test_custom_widget_kwarg(self): field = JSONField(widget=TextInput) self.assertIsInstance(field.widget, TextInput) def test_custom_widget_attribute(self): """The widget can be overridden with an attribute.""" class CustomJSONField(JSONField): widget = TextInput field = CustomJSONField() self.assertIsInstance(field.widget, TextInput) def test_converted_value(self): field = JSONField(required=False) tests = [ '["a", "b", "c"]', '{"a": 1, "b": 2}', '1', '1.5', '"foo"', 'true', 'false', 'null', ] for json_string in tests: with self.subTest(json_string=json_string): val = field.clean(json_string) self.assertEqual(field.clean(val), val) def test_has_changed(self): field = JSONField() self.assertIs(field.has_changed({'a': True}, '{"a": 1}'), True) self.assertIs(field.has_changed({'a': 1, 'b': 2}, '{"b": 2, "a": 1}'), False) def test_custom_encoder_decoder(self): class CustomDecoder(json.JSONDecoder): def __init__(self, object_hook=None, *args, **kwargs): return super().__init__(object_hook=self.as_uuid, *args, **kwargs) def as_uuid(self, dct): if 'uuid' in dct: dct['uuid'] = uuid.UUID(dct['uuid']) return dct value = {'uuid': uuid.UUID('{c141e152-6550-4172-a784-05448d98204b}')} encoded_value = '{"uuid": "c141e152-6550-4172-a784-05448d98204b"}' field = JSONField(encoder=DjangoJSONEncoder, decoder=CustomDecoder) self.assertEqual(field.prepare_value(value), encoded_value) self.assertEqual(field.clean(encoded_value), value) def test_formfield_disabled(self): class JSONForm(Form): json_field = JSONField(disabled=True) form = JSONForm({'json_field': '["bar"]'}, initial={'json_field': ['foo']}) self.assertIn('[&quot;foo&quot;]</textarea>', form.as_p()) def test_redisplay_wrong_input(self): """ Displaying a bound form (typically due to invalid input). The form should not overquote JSONField inputs. """ class JSONForm(Form): name = CharField(max_length=2) json_field = JSONField() # JSONField input is valid, name is too long. form = JSONForm({'name': 'xyz', 'json_field': '["foo"]'}) self.assertNotIn('json_field', form.errors) self.assertIn('[&quot;foo&quot;]</textarea>', form.as_p()) # Invalid JSONField. form = JSONForm({'name': 'xy', 'json_field': '{"foo"}'}) self.assertEqual(form.errors['json_field'], ['Enter a valid JSON.']) self.assertIn('{&quot;foo&quot;}</textarea>', form.as_p())
9ec70d46c3855a5e2e217c435eabc96eb0a9bd9ebde7cfda1229ac9473078e67
import uuid from django.core.exceptions import ValidationError from django.forms import UUIDField from django.test import SimpleTestCase class UUIDFieldTest(SimpleTestCase): def test_uuidfield_1(self): field = UUIDField() value = field.clean('550e8400e29b41d4a716446655440000') self.assertEqual(value, uuid.UUID('550e8400e29b41d4a716446655440000')) def test_clean_value_with_dashes(self): field = UUIDField() value = field.clean('550e8400-e29b-41d4-a716-446655440000') self.assertEqual(value, uuid.UUID('550e8400e29b41d4a716446655440000')) def test_uuidfield_2(self): field = UUIDField(required=False) self.assertIsNone(field.clean('')) self.assertIsNone(field.clean(None)) def test_uuidfield_3(self): field = UUIDField() with self.assertRaisesMessage(ValidationError, 'Enter a valid UUID.'): field.clean('550e8400') def test_uuidfield_4(self): field = UUIDField() value = field.prepare_value(uuid.UUID('550e8400e29b41d4a716446655440000')) self.assertEqual(value, '550e8400-e29b-41d4-a716-446655440000')
9966f210aaf8c8b6382ff444bd71f81f13e3087ef6ab6b074a53f6f94ac8d995
from django.core.exceptions import ValidationError from django.db import models from django.forms import ChoiceField, Form from django.test import SimpleTestCase from . import FormFieldAssertionsMixin class ChoiceFieldTest(FormFieldAssertionsMixin, SimpleTestCase): def test_choicefield_1(self): f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')]) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean('') with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) self.assertEqual('1', f.clean(1)) self.assertEqual('1', f.clean('1')) msg = "'Select a valid choice. 3 is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): f.clean('3') def test_choicefield_2(self): f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False) self.assertEqual('', f.clean('')) self.assertEqual('', f.clean(None)) self.assertEqual('1', f.clean(1)) self.assertEqual('1', f.clean('1')) msg = "'Select a valid choice. 3 is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): f.clean('3') def test_choicefield_3(self): f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')]) self.assertEqual('J', f.clean('J')) msg = "'Select a valid choice. John is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): f.clean('John') def test_choicefield_4(self): f = ChoiceField( choices=[ ('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other'), ] ) self.assertEqual('1', f.clean(1)) self.assertEqual('1', f.clean('1')) self.assertEqual('3', f.clean(3)) self.assertEqual('3', f.clean('3')) self.assertEqual('5', f.clean(5)) self.assertEqual('5', f.clean('5')) msg = "'Select a valid choice. 6 is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): f.clean('6') def test_choicefield_choices_default(self): f = ChoiceField() self.assertEqual(f.choices, []) def test_choicefield_callable(self): def choices(): return [('J', 'John'), ('P', 'Paul')] f = ChoiceField(choices=choices) self.assertEqual('J', f.clean('J')) def test_choicefield_callable_may_evaluate_to_different_values(self): choices = [] def choices_as_callable(): return choices class ChoiceFieldForm(Form): choicefield = ChoiceField(choices=choices_as_callable) choices = [('J', 'John')] form = ChoiceFieldForm() self.assertEqual([('J', 'John')], list(form.fields['choicefield'].choices)) choices = [('P', 'Paul')] form = ChoiceFieldForm() self.assertEqual([('P', 'Paul')], list(form.fields['choicefield'].choices)) def test_choicefield_disabled(self): f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')], disabled=True) self.assertWidgetRendersTo( f, '<select id="id_f" name="f" disabled><option value="J">John</option>' '<option value="P">Paul</option></select>' ) def test_choicefield_enumeration(self): class FirstNames(models.TextChoices): JOHN = 'J', 'John' PAUL = 'P', 'Paul' f = ChoiceField(choices=FirstNames.choices) self.assertEqual(f.clean('J'), 'J') msg = "'Select a valid choice. 3 is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): f.clean('3')
7808310bd3fbf33232551eb8268e8cecd4b7557883e0b1d70ad5bfc87622789c
from django.core.exceptions import ValidationError from django.forms import URLField from django.test import SimpleTestCase from . import FormFieldAssertionsMixin class URLFieldTest(FormFieldAssertionsMixin, SimpleTestCase): def test_urlfield_1(self): f = URLField() self.assertWidgetRendersTo(f, '<input type="url" name="f" id="id_f" required>') with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean('') with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) self.assertEqual('http://localhost', f.clean('http://localhost')) self.assertEqual('http://example.com', f.clean('http://example.com')) self.assertEqual('http://example.com.', f.clean('http://example.com.')) self.assertEqual('http://www.example.com', f.clean('http://www.example.com')) self.assertEqual('http://www.example.com:8000/test', f.clean('http://www.example.com:8000/test')) self.assertEqual('http://valid-with-hyphens.com', f.clean('valid-with-hyphens.com')) self.assertEqual('http://subdomain.domain.com', f.clean('subdomain.domain.com')) self.assertEqual('http://200.8.9.10', f.clean('http://200.8.9.10')) self.assertEqual('http://200.8.9.10:8000/test', f.clean('http://200.8.9.10:8000/test')) with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('foo') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://example') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://example.') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('com.') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('.') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://.com') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://invalid-.com') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://-invalid.com') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://inv-.alid-.com') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://inv-.-alid.com') self.assertEqual('http://valid-----hyphens.com', f.clean('http://valid-----hyphens.com')) self.assertEqual( 'http://some.idn.xyz\xe4\xf6\xfc\xdfabc.domain.com:123/blah', f.clean('http://some.idn.xyzäöüßabc.domain.com:123/blah') ) self.assertEqual( 'http://www.example.com/s/http://code.djangoproject.com/ticket/13804', f.clean('www.example.com/s/http://code.djangoproject.com/ticket/13804') ) with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('[a') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://[a') def test_url_regex_ticket11198(self): f = URLField() # hangs "forever" if catastrophic backtracking in ticket:#11198 not fixed with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://%s' % ("X" * 200,)) # a second test, to make sure the problem is really addressed, even on # domains that don't fail the domain label length check in the regex with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://%s' % ("X" * 60,)) def test_urlfield_2(self): f = URLField(required=False) self.assertEqual('', f.clean('')) self.assertEqual('', f.clean(None)) self.assertEqual('http://example.com', f.clean('http://example.com')) self.assertEqual('http://www.example.com', f.clean('http://www.example.com')) with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('foo') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://example') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://example.') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://.com') def test_urlfield_5(self): f = URLField(min_length=15, max_length=20) self.assertWidgetRendersTo(f, '<input id="id_f" type="url" name="f" maxlength="20" minlength="15" required>') with self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 15 characters (it has 12).'"): f.clean('http://f.com') self.assertEqual('http://example.com', f.clean('http://example.com')) with self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 37).'"): f.clean('http://abcdefghijklmnopqrstuvwxyz.com') def test_urlfield_6(self): f = URLField(required=False) self.assertEqual('http://example.com', f.clean('example.com')) self.assertEqual('', f.clean('')) self.assertEqual('https://example.com', f.clean('https://example.com')) def test_urlfield_7(self): f = URLField() self.assertEqual('http://example.com', f.clean('http://example.com')) self.assertEqual('http://example.com/test', f.clean('http://example.com/test')) self.assertEqual( 'http://example.com?some_param=some_value', f.clean('http://example.com?some_param=some_value') ) def test_urlfield_9(self): f = URLField() urls = ( 'http://עברית.idn.icann.org/', 'http://sãopaulo.com/', 'http://sãopaulo.com.br/', 'http://пример.испытание/', 'http://مثال.إختبار/', 'http://例子.测试/', 'http://例子.測試/', 'http://उदाहरण.परीक्षा/', 'http://例え.テスト/', 'http://مثال.آزمایشی/', 'http://실례.테스트/', 'http://العربية.idn.icann.org/', ) for url in urls: with self.subTest(url=url): # Valid IDN self.assertEqual(url, f.clean(url)) def test_urlfield_10(self): """URLField correctly validates IPv6 (#18779).""" f = URLField() urls = ( 'http://[12:34::3a53]/', 'http://[a34:9238::]:8080/', ) for url in urls: with self.subTest(url=url): self.assertEqual(url, f.clean(url)) def test_urlfield_not_string(self): f = URLField(required=False) with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean(23) def test_urlfield_normalization(self): f = URLField() self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/') def test_urlfield_strip_on_none_value(self): f = URLField(required=False, empty_value=None) self.assertIsNone(f.clean('')) self.assertIsNone(f.clean(None)) def test_urlfield_unable_to_set_strip_kwarg(self): msg = "__init__() got multiple values for keyword argument 'strip'" with self.assertRaisesMessage(TypeError, msg): URLField(strip=False)
bfa286bb7f66012d116a8e0ab1272206113946fefafced9ee39eafb6bcedac76
from django.core.exceptions import ValidationError from django.forms import EmailField from django.test import SimpleTestCase from . import FormFieldAssertionsMixin class EmailFieldTest(FormFieldAssertionsMixin, SimpleTestCase): def test_emailfield_1(self): f = EmailField() self.assertWidgetRendersTo(f, '<input type="email" name="f" id="id_f" required>') with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean('') with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) self.assertEqual('[email protected]', f.clean('[email protected]')) with self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'"): f.clean('foo') self.assertEqual( '[email protected]\xe4\xf6\xfc\xdfabc.part.com', f.clean('[email protected]äöüßabc.part.com') ) def test_email_regexp_for_performance(self): f = EmailField() # Check for runaway regex security problem. This will take a long time # if the security fix isn't in place. addr = '[email protected]' self.assertEqual(addr, f.clean(addr)) def test_emailfield_not_required(self): f = EmailField(required=False) self.assertEqual('', f.clean('')) self.assertEqual('', f.clean(None)) self.assertEqual('[email protected]', f.clean('[email protected]')) self.assertEqual('[email protected]', f.clean(' [email protected] \t \t ')) with self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'"): f.clean('foo') def test_emailfield_min_max_length(self): f = EmailField(min_length=10, max_length=15) self.assertWidgetRendersTo( f, '<input id="id_f" type="email" name="f" maxlength="15" minlength="10" required>', ) with self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 9).'"): f.clean('[email protected]') self.assertEqual('[email protected]', f.clean('[email protected]')) with self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 15 characters (it has 20).'"): f.clean('[email protected]') def test_emailfield_strip_on_none_value(self): f = EmailField(required=False, empty_value=None) self.assertIsNone(f.clean('')) self.assertIsNone(f.clean(None)) def test_emailfield_unable_to_set_strip_kwarg(self): msg = "__init__() got multiple values for keyword argument 'strip'" with self.assertRaisesMessage(TypeError, msg): EmailField(strip=False)
ef99b4a1d0a6a25afe6d508fd5c4289435ff30f9025d85e6746845ef435e7caa
from django.forms import SlugField from django.test import SimpleTestCase class SlugFieldTest(SimpleTestCase): def test_slugfield_normalization(self): f = SlugField() self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc') def test_slugfield_unicode_normalization(self): f = SlugField(allow_unicode=True) self.assertEqual(f.clean('a'), 'a') self.assertEqual(f.clean('1'), '1') self.assertEqual(f.clean('a1'), 'a1') self.assertEqual(f.clean('你好'), '你好') self.assertEqual(f.clean(' 你-好 '), '你-好') self.assertEqual(f.clean('ıçğüş'), 'ıçğüş') self.assertEqual(f.clean('foo-ıç-bar'), 'foo-ıç-bar') def test_empty_value(self): f = SlugField(required=False) self.assertEqual(f.clean(''), '') self.assertEqual(f.clean(None), '') f = SlugField(required=False, empty_value=None) self.assertIsNone(f.clean('')) self.assertIsNone(f.clean(None))