hash
stringlengths
64
64
content
stringlengths
0
1.51M
ca7f9b10c4608e0209b6fd664a31779dcf6320e1fda41bd1af7b4ab2a6b97e37
import ctypes import itertools import json import pickle import random from binascii import a2b_hex from io import BytesIO from unittest import mock, skipIf from django.contrib.gis import gdal from django.contrib.gis.geos import ( GeometryCollection, GEOSException, GEOSGeometry, LinearRing, LineString, MultiLineString, MultiPoint, MultiPolygon, Point, Polygon, fromfile, fromstr, ) from django.contrib.gis.geos.libgeos import geos_version_tuple from django.contrib.gis.shortcuts import numpy from django.template import Context from django.template.engine import Engine from django.test import SimpleTestCase from ..test_data import TestDataMixin class GEOSTest(SimpleTestCase, TestDataMixin): def test_wkt(self): "Testing WKT output." for g in self.geometries.wkt_out: geom = fromstr(g.wkt) if geom.hasz: self.assertEqual(g.ewkt, geom.wkt) def test_hex(self): "Testing HEX output." for g in self.geometries.hex_wkt: geom = fromstr(g.wkt) self.assertEqual(g.hex, geom.hex.decode()) def test_hexewkb(self): "Testing (HEX)EWKB output." # For testing HEX(EWKB). ogc_hex = b'01010000000000000000000000000000000000F03F' ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040' # `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));` hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F' # `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));` hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040' pnt_2d = Point(0, 1, srid=4326) pnt_3d = Point(0, 1, 2, srid=4326) # OGC-compliant HEX will not have SRID value. self.assertEqual(ogc_hex, pnt_2d.hex) self.assertEqual(ogc_hex_3d, pnt_3d.hex) # HEXEWKB should be appropriate for its dimension -- have to use an # a WKBWriter w/dimension set accordingly, else GEOS will insert # garbage into 3D coordinate if there is none. self.assertEqual(hexewkb_2d, pnt_2d.hexewkb) self.assertEqual(hexewkb_3d, pnt_3d.hexewkb) self.assertIs(GEOSGeometry(hexewkb_3d).hasz, True) # Same for EWKB. self.assertEqual(memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb) self.assertEqual(memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb) # Redundant sanity check. self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid) def test_kml(self): "Testing KML output." for tg in self.geometries.wkt_out: geom = fromstr(tg.wkt) kml = getattr(tg, 'kml', False) if kml: self.assertEqual(kml, geom.kml) def test_errors(self): "Testing the Error handlers." # string-based for err in self.geometries.errors: with self.assertRaises((GEOSException, ValueError)): fromstr(err.wkt) # Bad WKB with self.assertRaises(GEOSException): GEOSGeometry(memoryview(b'0')) class NotAGeometry: pass # Some other object with self.assertRaises(TypeError): GEOSGeometry(NotAGeometry()) # None with self.assertRaises(TypeError): GEOSGeometry(None) def test_wkb(self): "Testing WKB output." for g in self.geometries.hex_wkt: geom = fromstr(g.wkt) wkb = geom.wkb self.assertEqual(wkb.hex().upper(), g.hex) def test_create_hex(self): "Testing creation from HEX." for g in self.geometries.hex_wkt: geom_h = GEOSGeometry(g.hex) # we need to do this so decimal places get normalized geom_t = fromstr(g.wkt) self.assertEqual(geom_t.wkt, geom_h.wkt) def test_create_wkb(self): "Testing creation from WKB." for g in self.geometries.hex_wkt: wkb = memoryview(bytes.fromhex(g.hex)) geom_h = GEOSGeometry(wkb) # we need to do this so decimal places get normalized geom_t = fromstr(g.wkt) self.assertEqual(geom_t.wkt, geom_h.wkt) def test_ewkt(self): "Testing EWKT." srids = (-1, 32140) for srid in srids: for p in self.geometries.polygons: ewkt = 'SRID=%d;%s' % (srid, p.wkt) poly = fromstr(ewkt) self.assertEqual(srid, poly.srid) self.assertEqual(srid, poly.shell.srid) self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export def test_json(self): "Testing GeoJSON input/output (via GDAL)." for g in self.geometries.json_geoms: geom = GEOSGeometry(g.wkt) if not hasattr(g, 'not_equal'): # Loading jsons to prevent decimal differences self.assertEqual(json.loads(g.json), json.loads(geom.json)) self.assertEqual(json.loads(g.json), json.loads(geom.geojson)) self.assertEqual(GEOSGeometry(g.wkt, 4326), GEOSGeometry(geom.json)) def test_json_srid(self): geojson_data = { "type": "Point", "coordinates": [2, 49], "crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::4322" } } } self.assertEqual(GEOSGeometry(json.dumps(geojson_data)), Point(2, 49, srid=4322)) def test_fromfile(self): "Testing the fromfile() factory." ref_pnt = GEOSGeometry('POINT(5 23)') wkt_f = BytesIO() wkt_f.write(ref_pnt.wkt.encode()) wkb_f = BytesIO() wkb_f.write(bytes(ref_pnt.wkb)) # Other tests use `fromfile()` on string filenames so those # aren't tested here. for fh in (wkt_f, wkb_f): fh.seek(0) pnt = fromfile(fh) self.assertEqual(ref_pnt, pnt) def test_eq(self): "Testing equivalence." p = fromstr('POINT(5 23)') self.assertEqual(p, p.wkt) self.assertNotEqual(p, 'foo') ls = fromstr('LINESTRING(0 0, 1 1, 5 5)') self.assertEqual(ls, ls.wkt) self.assertNotEqual(p, 'bar') self.assertEqual(p, 'POINT(5.0 23.0)') # Error shouldn't be raise on equivalence testing with # an invalid type. for g in (p, ls): self.assertIsNotNone(g) self.assertNotEqual(g, {'foo': 'bar'}) self.assertIsNot(g, False) def test_hash(self): point_1 = Point(5, 23) point_2 = Point(5, 23, srid=4326) point_3 = Point(5, 23, srid=32632) multipoint_1 = MultiPoint(point_1, srid=4326) multipoint_2 = MultiPoint(point_2) multipoint_3 = MultiPoint(point_3) self.assertNotEqual(hash(point_1), hash(point_2)) self.assertNotEqual(hash(point_1), hash(point_3)) self.assertNotEqual(hash(point_2), hash(point_3)) self.assertNotEqual(hash(multipoint_1), hash(multipoint_2)) self.assertEqual(hash(multipoint_2), hash(multipoint_3)) self.assertNotEqual(hash(multipoint_1), hash(point_1)) self.assertNotEqual(hash(multipoint_2), hash(point_2)) self.assertNotEqual(hash(multipoint_3), hash(point_3)) def test_eq_with_srid(self): "Testing non-equivalence with different srids." p0 = Point(5, 23) p1 = Point(5, 23, srid=4326) p2 = Point(5, 23, srid=32632) # GEOS self.assertNotEqual(p0, p1) self.assertNotEqual(p1, p2) # EWKT self.assertNotEqual(p0, p1.ewkt) self.assertNotEqual(p1, p0.ewkt) self.assertNotEqual(p1, p2.ewkt) # Equivalence with matching SRIDs self.assertEqual(p2, p2) self.assertEqual(p2, p2.ewkt) # WKT contains no SRID so will not equal self.assertNotEqual(p2, p2.wkt) # SRID of 0 self.assertEqual(p0, 'SRID=0;POINT (5 23)') self.assertNotEqual(p1, 'SRID=0;POINT (5 23)') def test_points(self): "Testing Point objects." prev = fromstr('POINT(0 0)') for p in self.geometries.points: # Creating the point from the WKT pnt = fromstr(p.wkt) self.assertEqual(pnt.geom_type, 'Point') self.assertEqual(pnt.geom_typeid, 0) self.assertEqual(pnt.dims, 0) self.assertEqual(p.x, pnt.x) self.assertEqual(p.y, pnt.y) self.assertEqual(pnt, fromstr(p.wkt)) self.assertIs(pnt == prev, False) # Use assertIs() to test __eq__. # Making sure that the point's X, Y components are what we expect self.assertAlmostEqual(p.x, pnt.tuple[0], 9) self.assertAlmostEqual(p.y, pnt.tuple[1], 9) # Testing the third dimension, and getting the tuple arguments if hasattr(p, 'z'): self.assertIs(pnt.hasz, True) self.assertEqual(p.z, pnt.z) self.assertEqual(p.z, pnt.tuple[2], 9) tup_args = (p.x, p.y, p.z) set_tup1 = (2.71, 3.14, 5.23) set_tup2 = (5.23, 2.71, 3.14) else: self.assertIs(pnt.hasz, False) self.assertIsNone(pnt.z) tup_args = (p.x, p.y) set_tup1 = (2.71, 3.14) set_tup2 = (3.14, 2.71) # Centroid operation on point should be point itself self.assertEqual(p.centroid, pnt.centroid.tuple) # Now testing the different constructors pnt2 = Point(tup_args) # e.g., Point((1, 2)) pnt3 = Point(*tup_args) # e.g., Point(1, 2) self.assertEqual(pnt, pnt2) self.assertEqual(pnt, pnt3) # Now testing setting the x and y pnt.y = 3.14 pnt.x = 2.71 self.assertEqual(3.14, pnt.y) self.assertEqual(2.71, pnt.x) # Setting via the tuple/coords property pnt.tuple = set_tup1 self.assertEqual(set_tup1, pnt.tuple) pnt.coords = set_tup2 self.assertEqual(set_tup2, pnt.coords) prev = pnt # setting the previous geometry def test_point_reverse(self): point = GEOSGeometry('POINT(144.963 -37.8143)', 4326) self.assertEqual(point.srid, 4326) point.reverse() self.assertEqual(point.ewkt, 'SRID=4326;POINT (-37.8143 144.963)') def test_multipoints(self): "Testing MultiPoint objects." for mp in self.geometries.multipoints: mpnt = fromstr(mp.wkt) self.assertEqual(mpnt.geom_type, 'MultiPoint') self.assertEqual(mpnt.geom_typeid, 4) self.assertEqual(mpnt.dims, 0) self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9) self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9) with self.assertRaises(IndexError): mpnt.__getitem__(len(mpnt)) self.assertEqual(mp.centroid, mpnt.centroid.tuple) self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt)) for p in mpnt: self.assertEqual(p.geom_type, 'Point') self.assertEqual(p.geom_typeid, 0) self.assertIs(p.empty, False) self.assertIs(p.valid, True) def test_linestring(self): "Testing LineString objects." prev = fromstr('POINT(0 0)') for l in self.geometries.linestrings: ls = fromstr(l.wkt) self.assertEqual(ls.geom_type, 'LineString') self.assertEqual(ls.geom_typeid, 1) self.assertEqual(ls.dims, 1) self.assertIs(ls.empty, False) self.assertIs(ls.ring, False) if hasattr(l, 'centroid'): self.assertEqual(l.centroid, ls.centroid.tuple) if hasattr(l, 'tup'): self.assertEqual(l.tup, ls.tuple) self.assertEqual(ls, fromstr(l.wkt)) self.assertIs(ls == prev, False) # Use assertIs() to test __eq__. with self.assertRaises(IndexError): ls.__getitem__(len(ls)) prev = ls # Creating a LineString from a tuple, list, and numpy array self.assertEqual(ls, LineString(ls.tuple)) # tuple self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list # Point individual arguments self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array with self.assertRaisesMessage(TypeError, 'Each coordinate should be a sequence (list or tuple)'): LineString((0, 0)) with self.assertRaisesMessage(ValueError, 'LineString requires at least 2 points, got 1.'): LineString([(0, 0)]) if numpy: with self.assertRaisesMessage(ValueError, 'LineString requires at least 2 points, got 1.'): LineString(numpy.array([(0, 0)])) with mock.patch('django.contrib.gis.geos.linestring.numpy', False): with self.assertRaisesMessage(TypeError, 'Invalid initialization input for LineStrings.'): LineString('wrong input') # Test __iter__(). self.assertEqual(list(LineString((0, 0), (1, 1), (2, 2))), [(0, 0), (1, 1), (2, 2)]) def test_linestring_reverse(self): line = GEOSGeometry('LINESTRING(144.963 -37.8143,151.2607 -33.887)', 4326) self.assertEqual(line.srid, 4326) line.reverse() self.assertEqual(line.ewkt, 'SRID=4326;LINESTRING (151.2607 -33.887, 144.963 -37.8143)') def _test_is_counterclockwise(self): lr = LinearRing((0, 0), (1, 0), (0, 1), (0, 0)) self.assertIs(lr.is_counterclockwise, True) lr.reverse() self.assertIs(lr.is_counterclockwise, False) msg = 'Orientation of an empty LinearRing cannot be determined.' with self.assertRaisesMessage(ValueError, msg): LinearRing().is_counterclockwise @skipIf(geos_version_tuple() < (3, 7), 'GEOS >= 3.7.0 is required') def test_is_counterclockwise(self): self._test_is_counterclockwise() @skipIf(geos_version_tuple() < (3, 7), 'GEOS >= 3.7.0 is required') def test_is_counterclockwise_geos_error(self): with mock.patch('django.contrib.gis.geos.prototypes.cs_is_ccw') as mocked: mocked.return_value = 0 mocked.func_name = 'GEOSCoordSeq_isCCW' msg = 'Error encountered in GEOS C function "GEOSCoordSeq_isCCW".' with self.assertRaisesMessage(GEOSException, msg): LinearRing((0, 0), (1, 0), (0, 1), (0, 0)).is_counterclockwise @mock.patch('django.contrib.gis.geos.libgeos.geos_version', lambda: b'3.6.9') def test_is_counterclockwise_fallback(self): self._test_is_counterclockwise() def test_multilinestring(self): "Testing MultiLineString objects." prev = fromstr('POINT(0 0)') for l in self.geometries.multilinestrings: ml = fromstr(l.wkt) self.assertEqual(ml.geom_type, 'MultiLineString') self.assertEqual(ml.geom_typeid, 5) self.assertEqual(ml.dims, 1) self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9) self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9) self.assertEqual(ml, fromstr(l.wkt)) self.assertIs(ml == prev, False) # Use assertIs() to test __eq__. prev = ml for ls in ml: self.assertEqual(ls.geom_type, 'LineString') self.assertEqual(ls.geom_typeid, 1) self.assertIs(ls.empty, False) with self.assertRaises(IndexError): ml.__getitem__(len(ml)) self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt) self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml))) def test_linearring(self): "Testing LinearRing objects." for rr in self.geometries.linearrings: lr = fromstr(rr.wkt) self.assertEqual(lr.geom_type, 'LinearRing') self.assertEqual(lr.geom_typeid, 2) self.assertEqual(lr.dims, 1) self.assertEqual(rr.n_p, len(lr)) self.assertIs(lr.valid, True) self.assertIs(lr.empty, False) # Creating a LinearRing from a tuple, list, and numpy array self.assertEqual(lr, LinearRing(lr.tuple)) self.assertEqual(lr, LinearRing(*lr.tuple)) self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple])) if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple))) with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 3.'): LinearRing((0, 0), (1, 1), (0, 0)) with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 1.'): LinearRing([(0, 0)]) if numpy: with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 1.'): LinearRing(numpy.array([(0, 0)])) def test_linearring_json(self): self.assertJSONEqual( LinearRing((0, 0), (0, 1), (1, 1), (0, 0)).json, '{"coordinates": [[0, 0], [0, 1], [1, 1], [0, 0]], "type": "LineString"}', ) def test_polygons_from_bbox(self): "Testing `from_bbox` class method." bbox = (-180, -90, 180, 90) p = Polygon.from_bbox(bbox) self.assertEqual(bbox, p.extent) # Testing numerical precision x = 3.14159265358979323 bbox = (0, 0, 1, x) p = Polygon.from_bbox(bbox) y = p.extent[-1] self.assertEqual(format(x, '.13f'), format(y, '.13f')) def test_polygons(self): "Testing Polygon objects." prev = fromstr('POINT(0 0)') for p in self.geometries.polygons: # Creating the Polygon, testing its properties. poly = fromstr(p.wkt) self.assertEqual(poly.geom_type, 'Polygon') self.assertEqual(poly.geom_typeid, 3) self.assertEqual(poly.dims, 2) self.assertIs(poly.empty, False) self.assertIs(poly.ring, False) self.assertEqual(p.n_i, poly.num_interior_rings) self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__ self.assertEqual(p.n_p, poly.num_points) # Area & Centroid self.assertAlmostEqual(p.area, poly.area, 9) self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9) self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9) # Testing the geometry equivalence self.assertEqual(poly, fromstr(p.wkt)) # Should not be equal to previous geometry self.assertIs(poly == prev, False) # Use assertIs() to test __eq__. self.assertIs(poly != prev, True) # Use assertIs() to test __ne__. # Testing the exterior ring ring = poly.exterior_ring self.assertEqual(ring.geom_type, 'LinearRing') self.assertEqual(ring.geom_typeid, 2) if p.ext_ring_cs: self.assertEqual(p.ext_ring_cs, ring.tuple) self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__ # Testing __getitem__ and __setitem__ on invalid indices with self.assertRaises(IndexError): poly.__getitem__(len(poly)) with self.assertRaises(IndexError): poly.__setitem__(len(poly), False) with self.assertRaises(IndexError): poly.__getitem__(-1 * len(poly) - 1) # Testing __iter__ for r in poly: self.assertEqual(r.geom_type, 'LinearRing') self.assertEqual(r.geom_typeid, 2) # Testing polygon construction. with self.assertRaises(TypeError): Polygon(0, [1, 2, 3]) with self.assertRaises(TypeError): Polygon('foo') # Polygon(shell, (hole1, ... holeN)) ext_ring, *int_rings = poly self.assertEqual(poly, Polygon(ext_ring, int_rings)) # Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN) ring_tuples = tuple(r.tuple for r in poly) self.assertEqual(poly, Polygon(*ring_tuples)) # Constructing with tuples of LinearRings. self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt) self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt) def test_polygons_templates(self): # Accessing Polygon attributes in templates should work. engine = Engine() template = engine.from_string('{{ polygons.0.wkt }}') polygons = [fromstr(p.wkt) for p in self.geometries.multipolygons[:2]] content = template.render(Context({'polygons': polygons})) self.assertIn('MULTIPOLYGON (((100', content) def test_polygon_comparison(self): p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))) p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0))) self.assertGreater(p1, p2) self.assertLess(p2, p1) p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0))) p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0))) self.assertGreater(p4, p3) self.assertLess(p3, p4) def test_multipolygons(self): "Testing MultiPolygon objects." fromstr('POINT (0 0)') for mp in self.geometries.multipolygons: mpoly = fromstr(mp.wkt) self.assertEqual(mpoly.geom_type, 'MultiPolygon') self.assertEqual(mpoly.geom_typeid, 6) self.assertEqual(mpoly.dims, 2) self.assertEqual(mp.valid, mpoly.valid) if mp.valid: self.assertEqual(mp.num_geom, mpoly.num_geom) self.assertEqual(mp.n_p, mpoly.num_coords) self.assertEqual(mp.num_geom, len(mpoly)) with self.assertRaises(IndexError): mpoly.__getitem__(len(mpoly)) for p in mpoly: self.assertEqual(p.geom_type, 'Polygon') self.assertEqual(p.geom_typeid, 3) self.assertIs(p.valid, True) self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt) def test_memory_hijinks(self): "Testing Geometry __del__() on rings and polygons." # #### Memory issues with rings and poly # These tests are needed to ensure sanity with writable geometries. # Getting a polygon with interior rings, and pulling out the interior rings poly = fromstr(self.geometries.polygons[1].wkt) ring1 = poly[0] ring2 = poly[1] # These deletes should be 'harmless' since they are done on child geometries del ring1 del ring2 ring1 = poly[0] ring2 = poly[1] # Deleting the polygon del poly # Access to these rings is OK since they are clones. str(ring1) str(ring2) def test_coord_seq(self): "Testing Coordinate Sequence objects." for p in self.geometries.polygons: if p.ext_ring_cs: # Constructing the polygon and getting the coordinate sequence poly = fromstr(p.wkt) cs = poly.exterior_ring.coord_seq self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too. self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works # Checks __getitem__ and __setitem__ for i in range(len(p.ext_ring_cs)): c1 = p.ext_ring_cs[i] # Expected value c2 = cs[i] # Value from coordseq self.assertEqual(c1, c2) # Constructing the test value to set the coordinate sequence with if len(c1) == 2: tset = (5, 23) else: tset = (5, 23, 8) cs[i] = tset # Making sure every set point matches what we expect for j in range(len(tset)): cs[i] = tset self.assertEqual(tset[j], cs[i][j]) def test_relate_pattern(self): "Testing relate() and relate_pattern()." g = fromstr('POINT (0 0)') with self.assertRaises(GEOSException): g.relate_pattern(0, 'invalid pattern, yo') for rg in self.geometries.relate_geoms: a = fromstr(rg.wkt_a) b = fromstr(rg.wkt_b) self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern)) self.assertEqual(rg.pattern, a.relate(b)) def test_intersection(self): "Testing intersects() and intersection()." for i in range(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) i1 = fromstr(self.geometries.intersect_geoms[i].wkt) self.assertIs(a.intersects(b), True) i2 = a.intersection(b) self.assertEqual(i1, i2) self.assertEqual(i1, a & b) # __and__ is intersection operator a &= b # testing __iand__ self.assertEqual(i1, a) def test_union(self): "Testing union()." for i in range(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) u1 = fromstr(self.geometries.union_geoms[i].wkt) u2 = a.union(b) self.assertEqual(u1, u2) self.assertEqual(u1, a | b) # __or__ is union operator a |= b # testing __ior__ self.assertEqual(u1, a) def test_unary_union(self): "Testing unary_union." for i in range(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) u1 = fromstr(self.geometries.union_geoms[i].wkt) u2 = GeometryCollection(a, b).unary_union self.assertTrue(u1.equals(u2)) def test_difference(self): "Testing difference()." for i in range(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) d1 = fromstr(self.geometries.diff_geoms[i].wkt) d2 = a.difference(b) self.assertEqual(d1, d2) self.assertEqual(d1, a - b) # __sub__ is difference operator a -= b # testing __isub__ self.assertEqual(d1, a) def test_symdifference(self): "Testing sym_difference()." for i in range(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) d1 = fromstr(self.geometries.sdiff_geoms[i].wkt) d2 = a.sym_difference(b) self.assertEqual(d1, d2) self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator a ^= b # testing __ixor__ self.assertEqual(d1, a) def test_buffer(self): bg = self.geometries.buffer_geoms[0] g = fromstr(bg.wkt) # Can't use a floating-point for the number of quadsegs. with self.assertRaises(ctypes.ArgumentError): g.buffer(bg.width, quadsegs=1.1) self._test_buffer(self.geometries.buffer_geoms, 'buffer') def test_buffer_with_style(self): bg = self.geometries.buffer_with_style_geoms[0] g = fromstr(bg.wkt) # Can't use a floating-point for the number of quadsegs. with self.assertRaises(ctypes.ArgumentError): g.buffer_with_style(bg.width, quadsegs=1.1) # Can't use a floating-point for the end cap style. with self.assertRaises(ctypes.ArgumentError): g.buffer_with_style(bg.width, end_cap_style=1.2) # Can't use a end cap style that is not in the enum. with self.assertRaises(GEOSException): g.buffer_with_style(bg.width, end_cap_style=55) # Can't use a floating-point for the join style. with self.assertRaises(ctypes.ArgumentError): g.buffer_with_style(bg.width, join_style=1.3) # Can't use a join style that is not in the enum. with self.assertRaises(GEOSException): g.buffer_with_style(bg.width, join_style=66) self._test_buffer( itertools.chain(self.geometries.buffer_geoms, self.geometries.buffer_with_style_geoms), 'buffer_with_style', ) def _test_buffer(self, geometries, buffer_method_name): for bg in geometries: g = fromstr(bg.wkt) # The buffer we expect exp_buf = fromstr(bg.buffer_wkt) # Constructing our buffer buf_kwargs = { kwarg_name: getattr(bg, kwarg_name) for kwarg_name in ('width', 'quadsegs', 'end_cap_style', 'join_style', 'mitre_limit') if hasattr(bg, kwarg_name) } buf = getattr(g, buffer_method_name)(**buf_kwargs) self.assertEqual(exp_buf.num_coords, buf.num_coords) self.assertEqual(len(exp_buf), len(buf)) # Now assuring that each point in the buffer is almost equal for j in range(len(exp_buf)): exp_ring = exp_buf[j] buf_ring = buf[j] self.assertEqual(len(exp_ring), len(buf_ring)) for k in range(len(exp_ring)): # Asserting the X, Y of each point are almost equal (due to floating point imprecision) self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9) self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9) def test_covers(self): poly = Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0))) self.assertTrue(poly.covers(Point(5, 5))) self.assertFalse(poly.covers(Point(100, 100))) def test_closed(self): ls_closed = LineString((0, 0), (1, 1), (0, 0)) ls_not_closed = LineString((0, 0), (1, 1)) self.assertFalse(ls_not_closed.closed) self.assertTrue(ls_closed.closed) def test_srid(self): "Testing the SRID property and keyword." # Testing SRID keyword on Point pnt = Point(5, 23, srid=4326) self.assertEqual(4326, pnt.srid) pnt.srid = 3084 self.assertEqual(3084, pnt.srid) with self.assertRaises(ctypes.ArgumentError): pnt.srid = '4326' # Testing SRID keyword on fromstr(), and on Polygon rings. poly = fromstr(self.geometries.polygons[1].wkt, srid=4269) self.assertEqual(4269, poly.srid) for ring in poly: self.assertEqual(4269, ring.srid) poly.srid = 4326 self.assertEqual(4326, poly.shell.srid) # Testing SRID keyword on GeometryCollection gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021) self.assertEqual(32021, gc.srid) for i in range(len(gc)): self.assertEqual(32021, gc[i].srid) # GEOS may get the SRID from HEXEWKB # 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS # using `SELECT GeomFromText('POINT (5 23)', 4326);`. hex = '0101000020E610000000000000000014400000000000003740' p1 = fromstr(hex) self.assertEqual(4326, p1.srid) p2 = fromstr(p1.hex) self.assertIsNone(p2.srid) p3 = fromstr(p1.hex, srid=-1) # -1 is intended. self.assertEqual(-1, p3.srid) # Testing that geometry SRID could be set to its own value pnt_wo_srid = Point(1, 1) pnt_wo_srid.srid = pnt_wo_srid.srid # Input geometries that have an SRID. self.assertEqual(GEOSGeometry(pnt.ewkt, srid=pnt.srid).srid, pnt.srid) self.assertEqual(GEOSGeometry(pnt.ewkb, srid=pnt.srid).srid, pnt.srid) with self.assertRaisesMessage(ValueError, 'Input geometry already has SRID: %d.' % pnt.srid): GEOSGeometry(pnt.ewkt, srid=1) with self.assertRaisesMessage(ValueError, 'Input geometry already has SRID: %d.' % pnt.srid): GEOSGeometry(pnt.ewkb, srid=1) def test_custom_srid(self): """Test with a null srid and a srid unknown to GDAL.""" for srid in [None, 999999]: pnt = Point(111200, 220900, srid=srid) self.assertTrue(pnt.ewkt.startswith(("SRID=%s;" % srid if srid else '') + "POINT (111200")) self.assertIsInstance(pnt.ogr, gdal.OGRGeometry) self.assertIsNone(pnt.srs) # Test conversion from custom to a known srid c2w = gdal.CoordTransform( gdal.SpatialReference( '+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 ' '+datum=WGS84 +units=m +no_defs' ), gdal.SpatialReference(4326)) new_pnt = pnt.transform(c2w, clone=True) self.assertEqual(new_pnt.srid, 4326) self.assertAlmostEqual(new_pnt.x, 1, 3) self.assertAlmostEqual(new_pnt.y, 2, 3) def test_mutable_geometries(self): "Testing the mutability of Polygons and Geometry Collections." # ### Testing the mutability of Polygons ### for p in self.geometries.polygons: poly = fromstr(p.wkt) # Should only be able to use __setitem__ with LinearRing geometries. with self.assertRaises(TypeError): poly.__setitem__(0, LineString((1, 1), (2, 2))) # Constructing the new shell by adding 500 to every point in the old shell. shell_tup = poly.shell.tuple new_coords = [] for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.)) new_shell = LinearRing(*tuple(new_coords)) # Assigning polygon's exterior ring w/the new shell poly.exterior_ring = new_shell str(new_shell) # new shell is still accessible self.assertEqual(poly.exterior_ring, new_shell) self.assertEqual(poly[0], new_shell) # ### Testing the mutability of Geometry Collections for tg in self.geometries.multipoints: mp = fromstr(tg.wkt) for i in range(len(mp)): # Creating a random point. pnt = mp[i] new = Point(random.randint(21, 100), random.randint(21, 100)) # Testing the assignment mp[i] = new str(new) # what was used for the assignment is still accessible self.assertEqual(mp[i], new) self.assertEqual(mp[i].wkt, new.wkt) self.assertNotEqual(pnt, mp[i]) # MultiPolygons involve much more memory management because each # Polygon w/in the collection has its own rings. for tg in self.geometries.multipolygons: mpoly = fromstr(tg.wkt) for i in range(len(mpoly)): poly = mpoly[i] old_poly = mpoly[i] # Offsetting the each ring in the polygon by 500. for j in range(len(poly)): r = poly[j] for k in range(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.) poly[j] = r self.assertNotEqual(mpoly[i], poly) # Testing the assignment mpoly[i] = poly str(poly) # Still accessible self.assertEqual(mpoly[i], poly) self.assertNotEqual(mpoly[i], old_poly) # Extreme (!!) __setitem__ -- no longer works, have to detect # in the first object that __setitem__ is called in the subsequent # objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)? # mpoly[0][0][0] = (3.14, 2.71) # self.assertEqual((3.14, 2.71), mpoly[0][0][0]) # Doing it more slowly.. # self.assertEqual((3.14, 2.71), mpoly[0].shell[0]) # del mpoly def test_point_list_assignment(self): p = Point(0, 0) p[:] = (1, 2, 3) self.assertEqual(p, Point(1, 2, 3)) p[:] = () self.assertEqual(p.wkt, Point()) p[:] = (1, 2) self.assertEqual(p.wkt, Point(1, 2)) with self.assertRaises(ValueError): p[:] = (1,) with self.assertRaises(ValueError): p[:] = (1, 2, 3, 4, 5) def test_linestring_list_assignment(self): ls = LineString((0, 0), (1, 1)) ls[:] = () self.assertEqual(ls, LineString()) ls[:] = ((0, 0), (1, 1), (2, 2)) self.assertEqual(ls, LineString((0, 0), (1, 1), (2, 2))) with self.assertRaises(ValueError): ls[:] = (1,) def test_linearring_list_assignment(self): ls = LinearRing((0, 0), (0, 1), (1, 1), (0, 0)) ls[:] = () self.assertEqual(ls, LinearRing()) ls[:] = ((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)) self.assertEqual(ls, LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))) with self.assertRaises(ValueError): ls[:] = ((0, 0), (1, 1), (2, 2)) def test_polygon_list_assignment(self): pol = Polygon() pol[:] = (((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)),) self.assertEqual(pol, Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)),)) pol[:] = () self.assertEqual(pol, Polygon()) def test_geometry_collection_list_assignment(self): p = Point() gc = GeometryCollection() gc[:] = [p] self.assertEqual(gc, GeometryCollection(p)) gc[:] = () self.assertEqual(gc, GeometryCollection()) def test_threed(self): "Testing three-dimensional geometries." # Testing a 3D Point pnt = Point(2, 3, 8) self.assertEqual((2., 3., 8.), pnt.coords) with self.assertRaises(TypeError): pnt.tuple = (1., 2.) pnt.coords = (1., 2., 3.) self.assertEqual((1., 2., 3.), pnt.coords) # Testing a 3D LineString ls = LineString((2., 3., 8.), (50., 250., -117.)) self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple) with self.assertRaises(TypeError): ls.__setitem__(0, (1., 2.)) ls[0] = (1., 2., 3.) self.assertEqual((1., 2., 3.), ls[0]) def test_distance(self): "Testing the distance() function." # Distance to self should be 0. pnt = Point(0, 0) self.assertEqual(0.0, pnt.distance(Point(0, 0))) # Distance should be 1 self.assertEqual(1.0, pnt.distance(Point(0, 1))) # Distance should be ~ sqrt(2) self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11) # Distances are from the closest vertex in each geometry -- # should be 3 (distance from (2, 2) to (5, 2)). ls1 = LineString((0, 0), (1, 1), (2, 2)) ls2 = LineString((5, 2), (6, 1), (7, 0)) self.assertEqual(3, ls1.distance(ls2)) def test_length(self): "Testing the length property." # Points have 0 length. pnt = Point(0, 0) self.assertEqual(0.0, pnt.length) # Should be ~ sqrt(2) ls = LineString((0, 0), (1, 1)) self.assertAlmostEqual(1.41421356237, ls.length, 11) # Should be circumference of Polygon poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))) self.assertEqual(4.0, poly.length) # Should be sum of each element's length in collection. mpoly = MultiPolygon(poly.clone(), poly) self.assertEqual(8.0, mpoly.length) def test_emptyCollections(self): "Testing empty geometries and collections." geoms = [ GeometryCollection([]), fromstr('GEOMETRYCOLLECTION EMPTY'), GeometryCollection(), fromstr('POINT EMPTY'), Point(), fromstr('LINESTRING EMPTY'), LineString(), fromstr('POLYGON EMPTY'), Polygon(), fromstr('MULTILINESTRING EMPTY'), MultiLineString(), fromstr('MULTIPOLYGON EMPTY'), MultiPolygon(()), MultiPolygon(), ] if numpy: geoms.append(LineString(numpy.array([]))) for g in geoms: self.assertIs(g.empty, True) # Testing len() and num_geom. if isinstance(g, Polygon): self.assertEqual(1, len(g)) # Has one empty linear ring self.assertEqual(1, g.num_geom) self.assertEqual(0, len(g[0])) elif isinstance(g, (Point, LineString)): self.assertEqual(1, g.num_geom) self.assertEqual(0, len(g)) else: self.assertEqual(0, g.num_geom) self.assertEqual(0, len(g)) # Testing __getitem__ (doesn't work on Point or Polygon) if isinstance(g, Point): with self.assertRaises(IndexError): g.x elif isinstance(g, Polygon): lr = g.shell self.assertEqual('LINEARRING EMPTY', lr.wkt) self.assertEqual(0, len(lr)) self.assertIs(lr.empty, True) with self.assertRaises(IndexError): lr.__getitem__(0) else: with self.assertRaises(IndexError): g.__getitem__(0) def test_collection_dims(self): gc = GeometryCollection([]) self.assertEqual(gc.dims, -1) gc = GeometryCollection(Point(0, 0)) self.assertEqual(gc.dims, 0) gc = GeometryCollection(LineString((0, 0), (1, 1)), Point(0, 0)) self.assertEqual(gc.dims, 1) gc = GeometryCollection(LineString((0, 0), (1, 1)), Polygon(((0, 0), (0, 1), (1, 1), (0, 0))), Point(0, 0)) self.assertEqual(gc.dims, 2) def test_collections_of_collections(self): "Testing GeometryCollection handling of other collections." # Creating a GeometryCollection WKT string composed of other # collections and polygons. coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid] coll.extend(mls.wkt for mls in self.geometries.multilinestrings) coll.extend(p.wkt for p in self.geometries.polygons) coll.extend(mp.wkt for mp in self.geometries.multipoints) gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll) # Should construct ok from WKT gc1 = GEOSGeometry(gc_wkt) # Should also construct ok from individual geometry arguments. gc2 = GeometryCollection(*tuple(g for g in gc1)) # And, they should be equal. self.assertEqual(gc1, gc2) def test_gdal(self): "Testing `ogr` and `srs` properties." g1 = fromstr('POINT(5 23)') self.assertIsInstance(g1.ogr, gdal.OGRGeometry) self.assertIsNone(g1.srs) g1_3d = fromstr('POINT(5 23 8)') self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry) self.assertEqual(g1_3d.ogr.z, 8) g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326) self.assertIsInstance(g2.ogr, gdal.OGRGeometry) self.assertIsInstance(g2.srs, gdal.SpatialReference) self.assertEqual(g2.hex, g2.ogr.hex) self.assertEqual('WGS 84', g2.srs.name) def test_copy(self): "Testing use with the Python `copy` module." import copy poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))') cpy1 = copy.copy(poly) cpy2 = copy.deepcopy(poly) self.assertNotEqual(poly._ptr, cpy1._ptr) self.assertNotEqual(poly._ptr, cpy2._ptr) def test_transform(self): "Testing `transform` method." orig = GEOSGeometry('POINT (-104.609 38.255)', 4326) trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774) # Using a srid, a SpatialReference object, and a CoordTransform object # for transformations. t1, t2, t3 = orig.clone(), orig.clone(), orig.clone() t1.transform(trans.srid) t2.transform(gdal.SpatialReference('EPSG:2774')) ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774)) t3.transform(ct) # Testing use of the `clone` keyword. k1 = orig.clone() k2 = k1.transform(trans.srid, clone=True) self.assertEqual(k1, orig) self.assertNotEqual(k1, k2) prec = 3 for p in (t1, t2, t3, k2): self.assertAlmostEqual(trans.x, p.x, prec) self.assertAlmostEqual(trans.y, p.y, prec) def test_transform_3d(self): p3d = GEOSGeometry('POINT (5 23 100)', 4326) p3d.transform(2774) self.assertAlmostEqual(p3d.z, 100, 3) def test_transform_noop(self): """ Testing `transform` method (SRID match) """ # transform() should no-op if source & dest SRIDs match, # regardless of whether GDAL is available. g = GEOSGeometry('POINT (-104.609 38.255)', 4326) gt = g.tuple g.transform(4326) self.assertEqual(g.tuple, gt) self.assertEqual(g.srid, 4326) g = GEOSGeometry('POINT (-104.609 38.255)', 4326) g1 = g.transform(4326, clone=True) self.assertEqual(g1.tuple, g.tuple) self.assertEqual(g1.srid, 4326) self.assertIsNot(g1, g, "Clone didn't happen") def test_transform_nosrid(self): """ Testing `transform` method (no SRID or negative SRID) """ g = GEOSGeometry('POINT (-104.609 38.255)', srid=None) with self.assertRaises(GEOSException): g.transform(2774) g = GEOSGeometry('POINT (-104.609 38.255)', srid=None) with self.assertRaises(GEOSException): g.transform(2774, clone=True) g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1) with self.assertRaises(GEOSException): g.transform(2774) g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1) with self.assertRaises(GEOSException): g.transform(2774, clone=True) def test_extent(self): "Testing `extent` method." # The xmin, ymin, xmax, ymax of the MultiPoint should be returned. mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50)) self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent) pnt = Point(5.23, 17.8) # Extent of points is just the point itself repeated. self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent) # Testing on the 'real world' Polygon. poly = fromstr(self.geometries.polygons[3].wkt) ring = poly.shell x, y = ring.x, ring.y xmin, ymin = min(x), min(y) xmax, ymax = max(x), max(y) self.assertEqual((xmin, ymin, xmax, ymax), poly.extent) def test_pickle(self): "Testing pickling and unpickling support." # Creating a list of test geometries for pickling, # and setting the SRID on some of them. def get_geoms(lst, srid=None): return [GEOSGeometry(tg.wkt, srid) for tg in lst] tgeoms = get_geoms(self.geometries.points) tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326)) tgeoms.extend(get_geoms(self.geometries.polygons, 3084)) tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857)) tgeoms.append(Point(srid=4326)) tgeoms.append(Point()) for geom in tgeoms: s1 = pickle.dumps(geom) g1 = pickle.loads(s1) self.assertEqual(geom, g1) self.assertEqual(geom.srid, g1.srid) def test_prepared(self): "Testing PreparedGeometry support." # Creating a simple multipolygon and getting a prepared version. mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))') prep = mpoly.prepared # A set of test points. pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)] for pnt in pnts: # Results should be the same (but faster) self.assertEqual(mpoly.contains(pnt), prep.contains(pnt)) self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt)) self.assertEqual(mpoly.covers(pnt), prep.covers(pnt)) self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)'))) self.assertTrue(prep.disjoint(Point(-5, -5))) poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1))) self.assertTrue(prep.overlaps(poly)) poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0))) self.assertTrue(prep.touches(poly)) poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1))) self.assertTrue(prep.within(poly)) # Original geometry deletion should not crash the prepared one (#21662) del mpoly self.assertTrue(prep.covers(Point(5, 5))) def test_line_merge(self): "Testing line merge support" ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'), fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'), ) ref_merged = (fromstr('LINESTRING(1 1, 3 3)'), fromstr('LINESTRING (1 1, 3 3, 4 2)'), ) for geom, merged in zip(ref_geoms, ref_merged): self.assertEqual(merged, geom.merged) def test_valid_reason(self): "Testing IsValidReason support" g = GEOSGeometry("POINT(0 0)") self.assertTrue(g.valid) self.assertIsInstance(g.valid_reason, str) self.assertEqual(g.valid_reason, "Valid Geometry") g = GEOSGeometry("LINESTRING(0 0, 0 0)") self.assertFalse(g.valid) self.assertIsInstance(g.valid_reason, str) self.assertTrue(g.valid_reason.startswith("Too few points in geometry component")) def test_linearref(self): "Testing linear referencing" ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)') mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))') self.assertEqual(ls.project(Point(0, 20)), 10.0) self.assertEqual(ls.project(Point(7, 6)), 24) self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3) self.assertEqual(ls.interpolate(10), Point(0, 10)) self.assertEqual(ls.interpolate(24), Point(10, 6)) self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10)) self.assertEqual(mls.project(Point(0, 20)), 10) self.assertEqual(mls.project(Point(7, 6)), 16) self.assertEqual(mls.interpolate(9), Point(0, 9)) self.assertEqual(mls.interpolate(17), Point(10, 7)) def test_deconstructible(self): """ Geometry classes should be deconstructible. """ point = Point(4.337844, 50.827537, srid=4326) path, args, kwargs = point.deconstruct() self.assertEqual(path, 'django.contrib.gis.geos.point.Point') self.assertEqual(args, (4.337844, 50.827537)) self.assertEqual(kwargs, {'srid': 4326}) ls = LineString(((0, 0), (1, 1))) path, args, kwargs = ls.deconstruct() self.assertEqual(path, 'django.contrib.gis.geos.linestring.LineString') self.assertEqual(args, (((0, 0), (1, 1)),)) self.assertEqual(kwargs, {}) ls2 = LineString([Point(0, 0), Point(1, 1)], srid=4326) path, args, kwargs = ls2.deconstruct() self.assertEqual(path, 'django.contrib.gis.geos.linestring.LineString') self.assertEqual(args, ([Point(0, 0), Point(1, 1)],)) self.assertEqual(kwargs, {'srid': 4326}) ext_coords = ((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)) int_coords = ((0.4, 0.4), (0.4, 0.6), (0.6, 0.6), (0.6, 0.4), (0.4, 0.4)) poly = Polygon(ext_coords, int_coords) path, args, kwargs = poly.deconstruct() self.assertEqual(path, 'django.contrib.gis.geos.polygon.Polygon') self.assertEqual(args, (ext_coords, int_coords)) self.assertEqual(kwargs, {}) lr = LinearRing((0, 0), (0, 1), (1, 1), (0, 0)) path, args, kwargs = lr.deconstruct() self.assertEqual(path, 'django.contrib.gis.geos.linestring.LinearRing') self.assertEqual(args, ((0, 0), (0, 1), (1, 1), (0, 0))) self.assertEqual(kwargs, {}) mp = MultiPoint(Point(0, 0), Point(1, 1)) path, args, kwargs = mp.deconstruct() self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiPoint') self.assertEqual(args, (Point(0, 0), Point(1, 1))) self.assertEqual(kwargs, {}) ls1 = LineString((0, 0), (1, 1)) ls2 = LineString((2, 2), (3, 3)) mls = MultiLineString(ls1, ls2) path, args, kwargs = mls.deconstruct() self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiLineString') self.assertEqual(args, (ls1, ls2)) self.assertEqual(kwargs, {}) p1 = Polygon(((0, 0), (0, 1), (1, 1), (0, 0))) p2 = Polygon(((1, 1), (1, 2), (2, 2), (1, 1))) mp = MultiPolygon(p1, p2) path, args, kwargs = mp.deconstruct() self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiPolygon') self.assertEqual(args, (p1, p2)) self.assertEqual(kwargs, {}) poly = Polygon(((0, 0), (0, 1), (1, 1), (0, 0))) gc = GeometryCollection(Point(0, 0), MultiPoint(Point(0, 0), Point(1, 1)), poly) path, args, kwargs = gc.deconstruct() self.assertEqual(path, 'django.contrib.gis.geos.collections.GeometryCollection') self.assertEqual(args, (Point(0, 0), MultiPoint(Point(0, 0), Point(1, 1)), poly)) self.assertEqual(kwargs, {}) def test_subclassing(self): """ GEOSGeometry subclass may itself be subclassed without being forced-cast to the parent class during `__init__`. """ class ExtendedPolygon(Polygon): def __init__(self, *args, data=0, **kwargs): super().__init__(*args, **kwargs) self._data = data def __str__(self): return "EXT_POLYGON - data: %d - %s" % (self._data, self.wkt) ext_poly = ExtendedPolygon(((0, 0), (0, 1), (1, 1), (0, 0)), data=3) self.assertEqual(type(ext_poly), ExtendedPolygon) # ExtendedPolygon.__str__ should be called (instead of Polygon.__str__). self.assertEqual(str(ext_poly), "EXT_POLYGON - data: 3 - POLYGON ((0 0, 0 1, 1 1, 0 0))") self.assertJSONEqual( ext_poly.json, '{"coordinates": [[[0, 0], [0, 1], [1, 1], [0, 0]]], "type": "Polygon"}', ) def test_geos_version_tuple(self): versions = ( (b'3.0.0rc4-CAPI-1.3.3', (3, 0, 0)), (b'3.0.0-CAPI-1.4.1', (3, 0, 0)), (b'3.4.0dev-CAPI-1.8.0', (3, 4, 0)), (b'3.4.0dev-CAPI-1.8.0 r0', (3, 4, 0)), (b'3.6.2-CAPI-1.10.2 4d2925d6', (3, 6, 2)), ) for version_string, version_tuple in versions: with self.subTest(version_string=version_string): with mock.patch('django.contrib.gis.geos.libgeos.geos_version', lambda: version_string): self.assertEqual(geos_version_tuple(), version_tuple) def test_from_gml(self): self.assertEqual( GEOSGeometry('POINT(0 0)'), GEOSGeometry.from_gml( '<gml:Point gml:id="p21" srsName="http://www.opengis.net/def/crs/EPSG/0/4326">' ' <gml:pos srsDimension="2">0 0</gml:pos>' '</gml:Point>' ), ) def test_from_ewkt(self): self.assertEqual(GEOSGeometry.from_ewkt('SRID=1;POINT(1 1)'), Point(1, 1, srid=1)) self.assertEqual(GEOSGeometry.from_ewkt('POINT(1 1)'), Point(1, 1)) def test_from_ewkt_empty_string(self): msg = 'Expected WKT but got an empty string.' with self.assertRaisesMessage(ValueError, msg): GEOSGeometry.from_ewkt('') with self.assertRaisesMessage(ValueError, msg): GEOSGeometry.from_ewkt('SRID=1;') def test_from_ewkt_invalid_srid(self): msg = 'EWKT has invalid SRID part.' with self.assertRaisesMessage(ValueError, msg): GEOSGeometry.from_ewkt('SRUD=1;POINT(1 1)') with self.assertRaisesMessage(ValueError, msg): GEOSGeometry.from_ewkt('SRID=WGS84;POINT(1 1)') def test_fromstr_scientific_wkt(self): self.assertEqual(GEOSGeometry('POINT(1.0e-1 1.0e+1)'), Point(.1, 10)) def test_normalize(self): g = MultiPoint(Point(0, 0), Point(2, 2), Point(1, 1)) self.assertIsNone(g.normalize()) self.assertTrue(g.equals_exact(MultiPoint(Point(2, 2), Point(1, 1), Point(0, 0)))) def test_empty_point(self): p = Point(srid=4326) self.assertEqual(p.ogr.ewkt, p.ewkt) self.assertEqual(p.transform(2774, clone=True), Point(srid=2774)) p.transform(2774) self.assertEqual(p, Point(srid=2774)) def test_linestring_iter(self): ls = LineString((0, 0), (1, 1)) it = iter(ls) # Step into CoordSeq iterator. next(it) ls[:] = [] with self.assertRaises(IndexError): next(it)
21ea2a386fd56fb83be91b4d1db36758e45e3034e651af9310aedde13b714e9f
import os import shutil import struct import tempfile from django.contrib.gis.gdal import GDAL_VERSION, GDALRaster from django.contrib.gis.gdal.error import GDALException from django.contrib.gis.gdal.raster.band import GDALBand from django.contrib.gis.shortcuts import numpy from django.test import SimpleTestCase from ..data.rasters.textrasters import JSON_RASTER class GDALRasterTests(SimpleTestCase): """ Test a GDALRaster instance created from a file (GeoTiff). """ def setUp(self): self.rs_path = os.path.join(os.path.dirname(__file__), '../data/rasters/raster.tif') self.rs = GDALRaster(self.rs_path) def test_rs_name_repr(self): self.assertEqual(self.rs_path, self.rs.name) self.assertRegex(repr(self.rs), r"<Raster object at 0x\w+>") def test_rs_driver(self): self.assertEqual(self.rs.driver.name, 'GTiff') def test_rs_size(self): self.assertEqual(self.rs.width, 163) self.assertEqual(self.rs.height, 174) def test_rs_srs(self): self.assertEqual(self.rs.srs.srid, 3086) self.assertEqual(self.rs.srs.units, (1.0, 'metre')) def test_rs_srid(self): rast = GDALRaster({ 'width': 16, 'height': 16, 'srid': 4326, }) self.assertEqual(rast.srid, 4326) rast.srid = 3086 self.assertEqual(rast.srid, 3086) def test_geotransform_and_friends(self): # Assert correct values for file based raster self.assertEqual( self.rs.geotransform, [511700.4680706557, 100.0, 0.0, 435103.3771231986, 0.0, -100.0] ) self.assertEqual(self.rs.origin, [511700.4680706557, 435103.3771231986]) self.assertEqual(self.rs.origin.x, 511700.4680706557) self.assertEqual(self.rs.origin.y, 435103.3771231986) self.assertEqual(self.rs.scale, [100.0, -100.0]) self.assertEqual(self.rs.scale.x, 100.0) self.assertEqual(self.rs.scale.y, -100.0) self.assertEqual(self.rs.skew, [0, 0]) self.assertEqual(self.rs.skew.x, 0) self.assertEqual(self.rs.skew.y, 0) # Create in-memory rasters and change gtvalues rsmem = GDALRaster(JSON_RASTER) # geotransform accepts both floats and ints rsmem.geotransform = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0] self.assertEqual(rsmem.geotransform, [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) rsmem.geotransform = range(6) self.assertEqual(rsmem.geotransform, [float(x) for x in range(6)]) self.assertEqual(rsmem.origin, [0, 3]) self.assertEqual(rsmem.origin.x, 0) self.assertEqual(rsmem.origin.y, 3) self.assertEqual(rsmem.scale, [1, 5]) self.assertEqual(rsmem.scale.x, 1) self.assertEqual(rsmem.scale.y, 5) self.assertEqual(rsmem.skew, [2, 4]) self.assertEqual(rsmem.skew.x, 2) self.assertEqual(rsmem.skew.y, 4) self.assertEqual(rsmem.width, 5) self.assertEqual(rsmem.height, 5) def test_geotransform_bad_inputs(self): rsmem = GDALRaster(JSON_RASTER) error_geotransforms = [ [1, 2], [1, 2, 3, 4, 5, 'foo'], [1, 2, 3, 4, 5, 6, 'foo'], ] msg = 'Geotransform must consist of 6 numeric values.' for geotransform in error_geotransforms: with self.subTest(i=geotransform), self.assertRaisesMessage(ValueError, msg): rsmem.geotransform = geotransform def test_rs_extent(self): self.assertEqual( self.rs.extent, (511700.4680706557, 417703.3771231986, 528000.4680706557, 435103.3771231986) ) def test_rs_bands(self): self.assertEqual(len(self.rs.bands), 1) self.assertIsInstance(self.rs.bands[0], GDALBand) def test_memory_based_raster_creation(self): # Create uint8 raster with full pixel data range (0-255) rast = GDALRaster({ 'datatype': 1, 'width': 16, 'height': 16, 'srid': 4326, 'bands': [{ 'data': range(256), 'nodata_value': 255, }], }) # Get array from raster result = rast.bands[0].data() if numpy: result = result.flatten().tolist() # Assert data is same as original input self.assertEqual(result, list(range(256))) def test_file_based_raster_creation(self): # Prepare tempfile rstfile = tempfile.NamedTemporaryFile(suffix='.tif') # Create file-based raster from scratch GDALRaster({ 'datatype': self.rs.bands[0].datatype(), 'driver': 'tif', 'name': rstfile.name, 'width': 163, 'height': 174, 'nr_of_bands': 1, 'srid': self.rs.srs.wkt, 'origin': (self.rs.origin.x, self.rs.origin.y), 'scale': (self.rs.scale.x, self.rs.scale.y), 'skew': (self.rs.skew.x, self.rs.skew.y), 'bands': [{ 'data': self.rs.bands[0].data(), 'nodata_value': self.rs.bands[0].nodata_value, }], }) # Reload newly created raster from file restored_raster = GDALRaster(rstfile.name) self.assertEqual(restored_raster.srs.wkt, self.rs.srs.wkt) self.assertEqual(restored_raster.geotransform, self.rs.geotransform) if numpy: numpy.testing.assert_equal( restored_raster.bands[0].data(), self.rs.bands[0].data() ) else: self.assertEqual(restored_raster.bands[0].data(), self.rs.bands[0].data()) def test_nonexistent_file(self): msg = 'Unable to read raster source input "nonexistent.tif".' with self.assertRaisesMessage(GDALException, msg): GDALRaster('nonexistent.tif') def test_vsi_raster_creation(self): # Open a raster as a file object. with open(self.rs_path, 'rb') as dat: # Instantiate a raster from the file binary buffer. vsimem = GDALRaster(dat.read()) # The data of the in-memory file is equal to the source file. result = vsimem.bands[0].data() target = self.rs.bands[0].data() if numpy: result = result.flatten().tolist() target = target.flatten().tolist() self.assertEqual(result, target) def test_vsi_raster_deletion(self): path = '/vsimem/raster.tif' # Create a vsi-based raster from scratch. vsimem = GDALRaster({ 'name': path, 'driver': 'tif', 'width': 4, 'height': 4, 'srid': 4326, 'bands': [{ 'data': range(16), }], }) # The virtual file exists. rst = GDALRaster(path) self.assertEqual(rst.width, 4) # Delete GDALRaster. del vsimem del rst # The virtual file has been removed. msg = 'Could not open the datasource at "/vsimem/raster.tif"' with self.assertRaisesMessage(GDALException, msg): GDALRaster(path) def test_vsi_invalid_buffer_error(self): msg = 'Failed creating VSI raster from the input buffer.' with self.assertRaisesMessage(GDALException, msg): GDALRaster(b'not-a-raster-buffer') def test_vsi_buffer_property(self): # Create a vsi-based raster from scratch. rast = GDALRaster({ 'name': '/vsimem/raster.tif', 'driver': 'tif', 'width': 4, 'height': 4, 'srid': 4326, 'bands': [{ 'data': range(16), }], }) # Do a round trip from raster to buffer to raster. result = GDALRaster(rast.vsi_buffer).bands[0].data() if numpy: result = result.flatten().tolist() # Band data is equal to nodata value except on input block of ones. self.assertEqual(result, list(range(16))) # The vsi buffer is None for rasters that are not vsi based. self.assertIsNone(self.rs.vsi_buffer) def test_offset_size_and_shape_on_raster_creation(self): rast = GDALRaster({ 'datatype': 1, 'width': 4, 'height': 4, 'srid': 4326, 'bands': [{ 'data': (1,), 'offset': (1, 1), 'size': (2, 2), 'shape': (1, 1), 'nodata_value': 2, }], }) # Get array from raster. result = rast.bands[0].data() if numpy: result = result.flatten().tolist() # Band data is equal to nodata value except on input block of ones. self.assertEqual( result, [2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 2] ) def test_set_nodata_value_on_raster_creation(self): # Create raster filled with nodata values. rast = GDALRaster({ 'datatype': 1, 'width': 2, 'height': 2, 'srid': 4326, 'bands': [{'nodata_value': 23}], }) # Get array from raster. result = rast.bands[0].data() if numpy: result = result.flatten().tolist() # All band data is equal to nodata value. self.assertEqual(result, [23] * 4) def test_set_nodata_none_on_raster_creation(self): if GDAL_VERSION < (2, 1): self.skipTest("GDAL >= 2.1 is required for this test.") # Create raster without data and without nodata value. rast = GDALRaster({ 'datatype': 1, 'width': 2, 'height': 2, 'srid': 4326, 'bands': [{'nodata_value': None}], }) # Get array from raster. result = rast.bands[0].data() if numpy: result = result.flatten().tolist() # Band data is equal to zero because no nodata value has been specified. self.assertEqual(result, [0] * 4) def test_raster_metadata_property(self): data = self.rs.metadata self.assertEqual(data['DEFAULT'], {'AREA_OR_POINT': 'Area'}) self.assertEqual(data['IMAGE_STRUCTURE'], {'INTERLEAVE': 'BAND'}) # Create file-based raster from scratch source = GDALRaster({ 'datatype': 1, 'width': 2, 'height': 2, 'srid': 4326, 'bands': [{'data': range(4), 'nodata_value': 99}], }) # Set metadata on raster and on a band. metadata = { 'DEFAULT': {'OWNER': 'Django', 'VERSION': '1.0', 'AREA_OR_POINT': 'Point'}, } source.metadata = metadata source.bands[0].metadata = metadata self.assertEqual(source.metadata['DEFAULT'], metadata['DEFAULT']) self.assertEqual(source.bands[0].metadata['DEFAULT'], metadata['DEFAULT']) # Update metadata on raster. metadata = { 'DEFAULT': {'VERSION': '2.0'}, } source.metadata = metadata self.assertEqual(source.metadata['DEFAULT']['VERSION'], '2.0') # Remove metadata on raster. metadata = { 'DEFAULT': {'OWNER': None}, } source.metadata = metadata self.assertNotIn('OWNER', source.metadata['DEFAULT']) def test_raster_info_accessor(self): if GDAL_VERSION < (2, 1): msg = 'GDAL ≥ 2.1 is required for using the info property.' with self.assertRaisesMessage(ValueError, msg): self.rs.info return gdalinfo = """ Driver: GTiff/GeoTIFF Files: {} Size is 163, 174 Coordinate System is: PROJCS["NAD83 / Florida GDL Albers", GEOGCS["NAD83", DATUM["North_American_Datum_1983", SPHEROID["GRS 1980",6378137,298.257222101, AUTHORITY["EPSG","7019"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6269"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4269"]], PROJECTION["Albers_Conic_Equal_Area"], PARAMETER["standard_parallel_1",24], PARAMETER["standard_parallel_2",31.5], PARAMETER["latitude_of_center",24], PARAMETER["longitude_of_center",-84], PARAMETER["false_easting",400000], PARAMETER["false_northing",0], UNIT["metre",1, AUTHORITY["EPSG","9001"]], AXIS["X",EAST], AXIS["Y",NORTH], AUTHORITY["EPSG","3086"]] Origin = (511700.468070655711927,435103.377123198588379) Pixel Size = (100.000000000000000,-100.000000000000000) Metadata: AREA_OR_POINT=Area Image Structure Metadata: INTERLEAVE=BAND Corner Coordinates: Upper Left ( 511700.468, 435103.377) ( 82d51'46.16"W, 27d55' 1.53"N) Lower Left ( 511700.468, 417703.377) ( 82d51'52.04"W, 27d45'37.50"N) Upper Right ( 528000.468, 435103.377) ( 82d41'48.81"W, 27d54'56.30"N) Lower Right ( 528000.468, 417703.377) ( 82d41'55.54"W, 27d45'32.28"N) Center ( 519850.468, 426403.377) ( 82d46'50.64"W, 27d50'16.99"N) Band 1 Block=163x50 Type=Byte, ColorInterp=Gray NoData Value=15 """.format(self.rs_path) # Data info_dyn = [line.strip() for line in self.rs.info.split('\n') if line.strip() != ''] info_ref = [line.strip() for line in gdalinfo.split('\n') if line.strip() != ''] self.assertEqual(info_dyn, info_ref) def test_compressed_file_based_raster_creation(self): rstfile = tempfile.NamedTemporaryFile(suffix='.tif') # Make a compressed copy of an existing raster. compressed = self.rs.warp({'papsz_options': {'compress': 'packbits'}, 'name': rstfile.name}) # Check physically if compression worked. self.assertLess(os.path.getsize(compressed.name), os.path.getsize(self.rs.name)) # Create file-based raster with options from scratch. compressed = GDALRaster({ 'datatype': 1, 'driver': 'tif', 'name': rstfile.name, 'width': 40, 'height': 40, 'srid': 3086, 'origin': (500000, 400000), 'scale': (100, -100), 'skew': (0, 0), 'bands': [{ 'data': range(40 ^ 2), 'nodata_value': 255, }], 'papsz_options': { 'compress': 'packbits', 'pixeltype': 'signedbyte', 'blockxsize': 23, 'blockysize': 23, } }) # Check if options used on creation are stored in metadata. # Reopening the raster ensures that all metadata has been written # to the file. compressed = GDALRaster(compressed.name) self.assertEqual(compressed.metadata['IMAGE_STRUCTURE']['COMPRESSION'], 'PACKBITS',) self.assertEqual(compressed.bands[0].metadata['IMAGE_STRUCTURE']['PIXELTYPE'], 'SIGNEDBYTE') if GDAL_VERSION >= (2, 1): self.assertIn('Block=40x23', compressed.info) def test_raster_warp(self): # Create in memory raster source = GDALRaster({ 'datatype': 1, 'driver': 'MEM', 'name': 'sourceraster', 'width': 4, 'height': 4, 'nr_of_bands': 1, 'srid': 3086, 'origin': (500000, 400000), 'scale': (100, -100), 'skew': (0, 0), 'bands': [{ 'data': range(16), 'nodata_value': 255, }], }) # Test altering the scale, width, and height of a raster data = { 'scale': [200, -200], 'width': 2, 'height': 2, } target = source.warp(data) self.assertEqual(target.width, data['width']) self.assertEqual(target.height, data['height']) self.assertEqual(target.scale, data['scale']) self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype()) self.assertEqual(target.name, 'sourceraster_copy.MEM') result = target.bands[0].data() if numpy: result = result.flatten().tolist() self.assertEqual(result, [5, 7, 13, 15]) # Test altering the name and datatype (to float) data = { 'name': '/path/to/targetraster.tif', 'datatype': 6, } target = source.warp(data) self.assertEqual(target.bands[0].datatype(), 6) self.assertEqual(target.name, '/path/to/targetraster.tif') self.assertEqual(target.driver.name, 'MEM') result = target.bands[0].data() if numpy: result = result.flatten().tolist() self.assertEqual( result, [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0] ) def test_raster_warp_nodata_zone(self): # Create in memory raster. source = GDALRaster({ 'datatype': 1, 'driver': 'MEM', 'width': 4, 'height': 4, 'srid': 3086, 'origin': (500000, 400000), 'scale': (100, -100), 'skew': (0, 0), 'bands': [{ 'data': range(16), 'nodata_value': 23, }], }) # Warp raster onto a location that does not cover any pixels of the original. result = source.warp({'origin': (200000, 200000)}).bands[0].data() if numpy: result = result.flatten().tolist() # The result is an empty raster filled with the correct nodata value. self.assertEqual(result, [23] * 16) def test_raster_transform(self): # Prepare tempfile and nodata value rstfile = tempfile.NamedTemporaryFile(suffix='.tif') ndv = 99 # Create in file based raster source = GDALRaster({ 'datatype': 1, 'driver': 'tif', 'name': rstfile.name, 'width': 5, 'height': 5, 'nr_of_bands': 1, 'srid': 4326, 'origin': (-5, 5), 'scale': (2, -2), 'skew': (0, 0), 'bands': [{ 'data': range(25), 'nodata_value': ndv, }], }) # Transform raster into srid 4326. target = source.transform(3086) # Reload data from disk target = GDALRaster(target.name) self.assertEqual(target.srs.srid, 3086) self.assertEqual(target.width, 7) self.assertEqual(target.height, 7) self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype()) self.assertAlmostEqual(target.origin[0], 9124842.791079799, 3) self.assertAlmostEqual(target.origin[1], 1589911.6476407414, 3) self.assertAlmostEqual(target.scale[0], 223824.82664250192, 3) self.assertAlmostEqual(target.scale[1], -223824.82664250192, 3) self.assertEqual(target.skew, [0, 0]) result = target.bands[0].data() if numpy: result = result.flatten().tolist() # The reprojection of a raster that spans over a large area # skews the data matrix and might introduce nodata values. self.assertEqual( result, [ ndv, ndv, ndv, ndv, 4, ndv, ndv, ndv, ndv, 2, 3, 9, ndv, ndv, ndv, 1, 2, 8, 13, 19, ndv, 0, 6, 6, 12, 18, 18, 24, ndv, 10, 11, 16, 22, 23, ndv, ndv, ndv, 15, 21, 22, ndv, ndv, ndv, ndv, 20, ndv, ndv, ndv, ndv, ] ) class GDALBandTests(SimpleTestCase): rs_path = os.path.join(os.path.dirname(__file__), '../data/rasters/raster.tif') def test_band_data(self): rs = GDALRaster(self.rs_path) band = rs.bands[0] self.assertEqual(band.width, 163) self.assertEqual(band.height, 174) self.assertEqual(band.description, '') self.assertEqual(band.datatype(), 1) self.assertEqual(band.datatype(as_string=True), 'GDT_Byte') self.assertEqual(band.color_interp(), 1) self.assertEqual(band.color_interp(as_string=True), 'GCI_GrayIndex') self.assertEqual(band.nodata_value, 15) if numpy: data = band.data() assert_array = numpy.loadtxt( os.path.join(os.path.dirname(__file__), '../data/rasters/raster.numpy.txt') ) numpy.testing.assert_equal(data, assert_array) self.assertEqual(data.shape, (band.height, band.width)) def test_band_statistics(self): with tempfile.TemporaryDirectory() as tmp_dir: rs_path = os.path.join(tmp_dir, 'raster.tif') shutil.copyfile(self.rs_path, rs_path) rs = GDALRaster(rs_path) band = rs.bands[0] pam_file = rs_path + '.aux.xml' smin, smax, smean, sstd = band.statistics(approximate=True) self.assertEqual(smin, 0) self.assertEqual(smax, 9) self.assertAlmostEqual(smean, 2.842331288343558) self.assertAlmostEqual(sstd, 2.3965567248965356) smin, smax, smean, sstd = band.statistics(approximate=False, refresh=True) self.assertEqual(smin, 0) self.assertEqual(smax, 9) self.assertAlmostEqual(smean, 2.828326634228898) self.assertAlmostEqual(sstd, 2.4260526986669095) self.assertEqual(band.min, 0) self.assertEqual(band.max, 9) self.assertAlmostEqual(band.mean, 2.828326634228898) self.assertAlmostEqual(band.std, 2.4260526986669095) # Statistics are persisted into PAM file on band close rs = band = None self.assertTrue(os.path.isfile(pam_file)) def test_read_mode_error(self): # Open raster in read mode rs = GDALRaster(self.rs_path, write=False) band = rs.bands[0] # Setting attributes in write mode raises exception in the _flush method with self.assertRaises(GDALException): setattr(band, 'nodata_value', 10) def test_band_data_setters(self): # Create in-memory raster and get band rsmem = GDALRaster({ 'datatype': 1, 'driver': 'MEM', 'name': 'mem_rst', 'width': 10, 'height': 10, 'nr_of_bands': 1, 'srid': 4326, }) bandmem = rsmem.bands[0] # Set nodata value bandmem.nodata_value = 99 self.assertEqual(bandmem.nodata_value, 99) # Set data for entire dataset bandmem.data(range(100)) if numpy: numpy.testing.assert_equal(bandmem.data(), numpy.arange(100).reshape(10, 10)) else: self.assertEqual(bandmem.data(), list(range(100))) # Prepare data for setting values in subsequent tests block = list(range(100, 104)) packed_block = struct.pack('<' + 'B B B B', *block) # Set data from list bandmem.data(block, (1, 1), (2, 2)) result = bandmem.data(offset=(1, 1), size=(2, 2)) if numpy: numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2)) else: self.assertEqual(result, block) # Set data from packed block bandmem.data(packed_block, (1, 1), (2, 2)) result = bandmem.data(offset=(1, 1), size=(2, 2)) if numpy: numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2)) else: self.assertEqual(result, block) # Set data from bytes bandmem.data(bytes(packed_block), (1, 1), (2, 2)) result = bandmem.data(offset=(1, 1), size=(2, 2)) if numpy: numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2)) else: self.assertEqual(result, block) # Set data from bytearray bandmem.data(bytearray(packed_block), (1, 1), (2, 2)) result = bandmem.data(offset=(1, 1), size=(2, 2)) if numpy: numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2)) else: self.assertEqual(result, block) # Set data from memoryview bandmem.data(memoryview(packed_block), (1, 1), (2, 2)) result = bandmem.data(offset=(1, 1), size=(2, 2)) if numpy: numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2)) else: self.assertEqual(result, block) # Set data from numpy array if numpy: bandmem.data(numpy.array(block, dtype='int8').reshape(2, 2), (1, 1), (2, 2)) numpy.testing.assert_equal( bandmem.data(offset=(1, 1), size=(2, 2)), numpy.array(block).reshape(2, 2) ) # Test json input data rsmemjson = GDALRaster(JSON_RASTER) bandmemjson = rsmemjson.bands[0] if numpy: numpy.testing.assert_equal( bandmemjson.data(), numpy.array(range(25)).reshape(5, 5) ) else: self.assertEqual(bandmemjson.data(), list(range(25))) def test_band_statistics_automatic_refresh(self): rsmem = GDALRaster({ 'srid': 4326, 'width': 2, 'height': 2, 'bands': [{'data': [0] * 4, 'nodata_value': 99}], }) band = rsmem.bands[0] # Populate statistics cache self.assertEqual(band.statistics(), (0, 0, 0, 0)) # Change data band.data([1, 1, 0, 0]) # Statistics are properly updated self.assertEqual(band.statistics(), (0.0, 1.0, 0.5, 0.5)) # Change nodata_value band.nodata_value = 0 # Statistics are properly updated self.assertEqual(band.statistics(), (1.0, 1.0, 1.0, 0.0)) def test_band_statistics_empty_band(self): rsmem = GDALRaster({ 'srid': 4326, 'width': 1, 'height': 1, 'bands': [{'data': [0], 'nodata_value': 0}], }) self.assertEqual(rsmem.bands[0].statistics(), (None, None, None, None)) def test_band_delete_nodata(self): rsmem = GDALRaster({ 'srid': 4326, 'width': 1, 'height': 1, 'bands': [{'data': [0], 'nodata_value': 1}], }) if GDAL_VERSION < (2, 1): msg = 'GDAL >= 2.1 required to delete nodata values.' with self.assertRaisesMessage(ValueError, msg): rsmem.bands[0].nodata_value = None else: rsmem.bands[0].nodata_value = None self.assertIsNone(rsmem.bands[0].nodata_value) def test_band_data_replication(self): band = GDALRaster({ 'srid': 4326, 'width': 3, 'height': 3, 'bands': [{'data': range(10, 19), 'nodata_value': 0}], }).bands[0] # Variations for input (data, shape, expected result). combos = ( ([1], (1, 1), [1] * 9), (range(3), (1, 3), [0, 0, 0, 1, 1, 1, 2, 2, 2]), (range(3), (3, 1), [0, 1, 2, 0, 1, 2, 0, 1, 2]), ) for combo in combos: band.data(combo[0], shape=combo[1]) if numpy: numpy.testing.assert_equal(band.data(), numpy.array(combo[2]).reshape(3, 3)) else: self.assertEqual(band.data(), list(combo[2]))
1669c759b11caff1cef4db63b3bced7715b22f34b7a4dd100f7ea09669b259cf
import unittest from django.contrib.gis.gdal import ( GDAL_VERSION, gdal_full_version, gdal_version, ) class GDALTest(unittest.TestCase): def test_gdal_version(self): if GDAL_VERSION: self.assertEqual(gdal_version(), ('%s.%s.%s' % GDAL_VERSION).encode()) else: self.assertIn(b'.', gdal_version()) def test_gdal_full_version(self): full_version = gdal_full_version() self.assertIn(gdal_version(), full_version) self.assertTrue(full_version.startswith(b'GDAL'))
f52293ee60023c8ffc2085bb284fc94056d546d96ea4dbe0204014fcda68596d
from django.contrib.gis.db import models from django.db import migrations from django.db.models import deletion class Migration(migrations.Migration): dependencies = [ ('rasterapp', '0001_setup_extensions'), ] operations = [ migrations.CreateModel( name='RasterModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('rast', models.fields.RasterField( blank=True, null=True, srid=4326, verbose_name='A Verbose Raster Name', )), ('rastprojected', models.fields.RasterField( null=True, srid=3086, verbose_name='A Projected Raster Table', )), ('geom', models.fields.PointField(null=True, srid=4326)), ], options={ 'required_db_features': ['supports_raster'], }, ), migrations.CreateModel( name='RasterRelatedModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('rastermodel', models.ForeignKey( on_delete=deletion.CASCADE, to='rasterapp.rastermodel', )), ], options={ 'required_db_features': ['supports_raster'], }, ), ]
aa43783ec8f9b81ff18d829798e4f7b6f31c98f6243c4a313c630b87477d943e
from django.db import connection, migrations if connection.features.supports_raster: from django.contrib.postgres.operations import CreateExtension pg_version = connection.ops.postgis_version_tuple() class Migration(migrations.Migration): # PostGIS 3+ requires postgis_raster extension. if pg_version[1:] >= (3,): operations = [ CreateExtension('postgis_raster'), ] else: operations = [] else: class Migration(migrations.Migration): operations = []
636f6dab06f27747c3e0cd141341c2f0807d5c4e811644aa2b63c6db608919dd
from django.contrib.gis.db import models from django.db import connection, migrations ops = [ migrations.CreateModel( name='Neighborhood', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100, unique=True)), ('geom', models.MultiPolygonField(srid=4326)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Household', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('neighborhood', models.ForeignKey( 'gis_migrations.Neighborhood', models.SET_NULL, to_field='id', null=True, )), ('address', models.CharField(max_length=100)), ('zip_code', models.IntegerField(null=True, blank=True)), ('geom', models.PointField(srid=4326, geography=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Family', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100, unique=True)), ], options={ }, bases=(models.Model,), ), migrations.AddField( model_name='household', name='family', field=models.ForeignKey('gis_migrations.Family', models.SET_NULL, blank=True, null=True), preserve_default=True, ) ] if connection.features.supports_raster: ops += [ migrations.CreateModel( name='Heatmap', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100, unique=True)), ('rast', models.fields.RasterField(srid=4326)), ], options={ }, bases=(models.Model,), ), ] class Migration(migrations.Migration): """ Used for gis-specific migration tests. """ dependencies = [ ('gis_migrations', '0001_setup_extensions'), ] operations = ops
e234acc6b33ec28cc1f9c3a1a63579a8629abb7af03065a8e7ec523dba50ba46
from django.db import models from django.db.models.fields.related import ReverseManyToOneDescriptor from django.db.models.lookups import StartsWith from django.db.models.query_utils import PathInfo class CustomForeignObjectRel(models.ForeignObjectRel): """ Define some extra Field methods so this Rel acts more like a Field, which lets us use ReverseManyToOneDescriptor in both directions. """ @property def foreign_related_fields(self): return tuple(lhs_field for lhs_field, rhs_field in self.field.related_fields) def get_attname(self): return self.name class StartsWithRelation(models.ForeignObject): """ A ForeignObject that uses StartsWith operator in its joins instead of the default equality operator. This is logically a many-to-many relation and creates a ReverseManyToOneDescriptor in both directions. """ auto_created = False many_to_many = False many_to_one = True one_to_many = False one_to_one = False rel_class = CustomForeignObjectRel def __init__(self, *args, **kwargs): kwargs['on_delete'] = models.DO_NOTHING super().__init__(*args, **kwargs) @property def field(self): """ Makes ReverseManyToOneDescriptor work in both directions. """ return self.remote_field def get_extra_restriction(self, where_class, alias, related_alias): to_field = self.remote_field.model._meta.get_field(self.to_fields[0]) from_field = self.model._meta.get_field(self.from_fields[0]) return StartsWith(to_field.get_col(alias), from_field.get_col(related_alias)) def get_joining_columns(self, reverse_join=False): return () def get_path_info(self, filtered_relation=None): to_opts = self.remote_field.model._meta from_opts = self.model._meta return [PathInfo( from_opts=from_opts, to_opts=to_opts, target_fields=(to_opts.pk,), join_field=self, m2m=False, direct=False, filtered_relation=filtered_relation, )] def get_reverse_path_info(self, filtered_relation=None): to_opts = self.model._meta from_opts = self.remote_field.model._meta return [PathInfo( from_opts=from_opts, to_opts=to_opts, target_fields=(to_opts.pk,), join_field=self.remote_field, m2m=False, direct=False, filtered_relation=filtered_relation, )] def contribute_to_class(self, cls, name, private_only=False): super().contribute_to_class(cls, name, private_only) setattr(cls, self.name, ReverseManyToOneDescriptor(self)) class BrokenContainsRelation(StartsWithRelation): """ This model is designed to yield no join conditions and raise an exception in ``Join.as_sql()``. """ def get_extra_restriction(self, where_class, alias, related_alias): return None class SlugPage(models.Model): slug = models.CharField(max_length=20, unique=True) descendants = StartsWithRelation( 'self', from_fields=['slug'], to_fields=['slug'], related_name='ascendants', ) containers = BrokenContainsRelation( 'self', from_fields=['slug'], to_fields=['slug'], ) class Meta: ordering = ['slug'] def __str__(self): return 'SlugPage %s' % self.slug
e4264a6767a1849d6138f9557ab93f41cfe926e65c4c80ccf5f5ac852a0cd0a3
from django.db import models class Address(models.Model): company = models.CharField(max_length=1) customer_id = models.IntegerField() class Meta: unique_together = [ ('company', 'customer_id'), ] class Customer(models.Model): company = models.CharField(max_length=1) customer_id = models.IntegerField() address = models.ForeignObject( Address, models.CASCADE, null=True, # order mismatches the Contact ForeignObject. from_fields=['company', 'customer_id'], to_fields=['company', 'customer_id'], ) class Meta: unique_together = [ ('company', 'customer_id'), ] class Contact(models.Model): company_code = models.CharField(max_length=1) customer_code = models.IntegerField() customer = models.ForeignObject( Customer, models.CASCADE, related_name='contacts', to_fields=['customer_id', 'company'], from_fields=['customer_code', 'company_code'], )
b3e48110ea22eaef134aaca8a4fd3b20fb8d989bbac5511cc01f81d9cb6b7e65
import unittest from django.db import NotSupportedError, connection from django.db.models import CharField from django.db.models.functions import SHA224 from django.test import TestCase from django.test.utils import register_lookup from ..models import Author class SHA224Tests(TestCase): @classmethod def setUpTestData(cls): Author.objects.bulk_create([ Author(alias='John Smith'), Author(alias='Jordan Élena'), Author(alias='皇帝'), Author(alias=''), Author(alias=None), ]) @unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support SHA224.") def test_basic(self): authors = Author.objects.annotate( sha224_alias=SHA224('alias'), ).values_list('sha224_alias', flat=True).order_by('pk') self.assertSequenceEqual( authors, [ 'a61303c220731168452cb6acf3759438b1523e768f464e3704e12f70', '2297904883e78183cb118fc3dc21a610d60daada7b6ebdbc85139f4d', 'eba942746e5855121d9d8f79e27dfdebed81adc85b6bf41591203080', 'd14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f', 'd14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f' if connection.features.interprets_empty_strings_as_nulls else None, ], ) @unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support SHA224.") def test_transform(self): with register_lookup(CharField, SHA224): authors = Author.objects.filter( alias__sha224='a61303c220731168452cb6acf3759438b1523e768f464e3704e12f70', ).values_list('alias', flat=True) self.assertSequenceEqual(authors, ['John Smith']) @unittest.skipUnless(connection.vendor == 'oracle', "Oracle doesn't support SHA224.") def test_unsupported(self): msg = 'SHA224 is not supported on Oracle.' with self.assertRaisesMessage(NotSupportedError, msg): Author.objects.annotate(sha224_alias=SHA224('alias')).first()
446e193a1062534e618772d573e9096fa3736f715f9631e2297c9dad61c3757a
import datetime import decimal import unittest from django.db import connection, models from django.db.models.functions import Cast from django.test import ( TestCase, ignore_warnings, override_settings, skipUnlessDBFeature, ) from ..models import Author, DTModel, Fan, FloatModel class CastTests(TestCase): @classmethod def setUpTestData(self): Author.objects.create(name='Bob', age=1, alias='1') def test_cast_from_value(self): numbers = Author.objects.annotate(cast_integer=Cast(models.Value('0'), models.IntegerField())) self.assertEqual(numbers.get().cast_integer, 0) def test_cast_from_field(self): numbers = Author.objects.annotate(cast_string=Cast('age', models.CharField(max_length=255)),) self.assertEqual(numbers.get().cast_string, '1') def test_cast_to_char_field_without_max_length(self): numbers = Author.objects.annotate(cast_string=Cast('age', models.CharField())) self.assertEqual(numbers.get().cast_string, '1') # Silence "Truncated incorrect CHAR(1) value: 'Bob'". @ignore_warnings(module='django.db.backends.mysql.base') @skipUnlessDBFeature('supports_cast_with_precision') def test_cast_to_char_field_with_max_length(self): names = Author.objects.annotate(cast_string=Cast('name', models.CharField(max_length=1))) self.assertEqual(names.get().cast_string, 'B') @skipUnlessDBFeature('supports_cast_with_precision') def test_cast_to_decimal_field(self): FloatModel.objects.create(f1=-1.934, f2=3.467) float_obj = FloatModel.objects.annotate( cast_f1_decimal=Cast('f1', models.DecimalField(max_digits=8, decimal_places=2)), cast_f2_decimal=Cast('f2', models.DecimalField(max_digits=8, decimal_places=1)), ).get() self.assertEqual(float_obj.cast_f1_decimal, decimal.Decimal('-1.93')) self.assertEqual(float_obj.cast_f2_decimal, decimal.Decimal('3.5')) author_obj = Author.objects.annotate( cast_alias_decimal=Cast('alias', models.DecimalField(max_digits=8, decimal_places=2)), ).get() self.assertEqual(author_obj.cast_alias_decimal, decimal.Decimal('1')) def test_cast_to_integer(self): for field_class in ( models.AutoField, models.BigAutoField, models.SmallAutoField, models.IntegerField, models.BigIntegerField, models.SmallIntegerField, models.PositiveBigIntegerField, models.PositiveIntegerField, models.PositiveSmallIntegerField, ): with self.subTest(field_class=field_class): numbers = Author.objects.annotate(cast_int=Cast('alias', field_class())) self.assertEqual(numbers.get().cast_int, 1) def test_cast_from_db_datetime_to_date(self): dt_value = datetime.datetime(2018, 9, 28, 12, 42, 10, 234567) DTModel.objects.create(start_datetime=dt_value) dtm = DTModel.objects.annotate( start_datetime_as_date=Cast('start_datetime', models.DateField()) ).first() self.assertEqual(dtm.start_datetime_as_date, datetime.date(2018, 9, 28)) def test_cast_from_db_datetime_to_time(self): dt_value = datetime.datetime(2018, 9, 28, 12, 42, 10, 234567) DTModel.objects.create(start_datetime=dt_value) dtm = DTModel.objects.annotate( start_datetime_as_time=Cast('start_datetime', models.TimeField()) ).first() rounded_ms = int(round(.234567, connection.features.time_cast_precision) * 10**6) self.assertEqual(dtm.start_datetime_as_time, datetime.time(12, 42, 10, rounded_ms)) def test_cast_from_db_date_to_datetime(self): dt_value = datetime.date(2018, 9, 28) DTModel.objects.create(start_date=dt_value) dtm = DTModel.objects.annotate(start_as_datetime=Cast('start_date', models.DateTimeField())).first() self.assertEqual(dtm.start_as_datetime, datetime.datetime(2018, 9, 28, 0, 0, 0, 0)) def test_cast_from_db_datetime_to_date_group_by(self): author = Author.objects.create(name='John Smith', age=45) dt_value = datetime.datetime(2018, 9, 28, 12, 42, 10, 234567) Fan.objects.create(name='Margaret', age=50, author=author, fan_since=dt_value) fans = Fan.objects.values('author').annotate( fan_for_day=Cast('fan_since', models.DateField()), fans=models.Count('*') ).values() self.assertEqual(fans[0]['fan_for_day'], datetime.date(2018, 9, 28)) self.assertEqual(fans[0]['fans'], 1) def test_cast_from_python_to_date(self): today = datetime.date.today() dates = Author.objects.annotate(cast_date=Cast(today, models.DateField())) self.assertEqual(dates.get().cast_date, today) def test_cast_from_python_to_datetime(self): now = datetime.datetime.now() dates = Author.objects.annotate(cast_datetime=Cast(now, models.DateTimeField())) time_precision = datetime.timedelta( microseconds=10**(6 - connection.features.time_cast_precision) ) self.assertAlmostEqual(dates.get().cast_datetime, now, delta=time_precision) def test_cast_from_python(self): numbers = Author.objects.annotate(cast_float=Cast(decimal.Decimal(0.125), models.FloatField())) cast_float = numbers.get().cast_float self.assertIsInstance(cast_float, float) self.assertEqual(cast_float, 0.125) @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL test') @override_settings(DEBUG=True) def test_expression_wrapped_with_parentheses_on_postgresql(self): """ The SQL for the Cast expression is wrapped with parentheses in case it's a complex expression. """ list(Author.objects.annotate(cast_float=Cast(models.Avg('age'), models.FloatField()))) self.assertIn('(AVG("db_functions_author"."age"))::double precision', connection.queries[-1]['sql']) def test_cast_to_text_field(self): self.assertEqual(Author.objects.values_list(Cast('age', models.TextField()), flat=True).get(), '1')
4b173b504fdac97cdac17b6e133273082c713a81e8a70d3e0feae93ccd39f381
import os from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponse, HttpResponsePermanentRedirect from django.middleware.locale import LocaleMiddleware from django.template import Context, Template from django.test import SimpleTestCase, override_settings from django.test.client import RequestFactory from django.test.utils import override_script_prefix from django.urls import clear_url_caches, reverse, translate_url from django.utils import translation class PermanentRedirectLocaleMiddleWare(LocaleMiddleware): response_redirect_class = HttpResponsePermanentRedirect @override_settings( USE_I18N=True, LOCALE_PATHS=[ os.path.join(os.path.dirname(__file__), 'locale'), ], LANGUAGE_CODE='en-us', LANGUAGES=[ ('nl', 'Dutch'), ('en', 'English'), ('pt-br', 'Brazilian Portuguese'), ], MIDDLEWARE=[ 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', ], ROOT_URLCONF='i18n.patterns.urls.default', TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')], 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.i18n', ], }, }], ) class URLTestCaseBase(SimpleTestCase): """ TestCase base-class for the URL tests. """ def setUp(self): # Make sure the cache is empty before we are doing our tests. clear_url_caches() def tearDown(self): # Make sure we will leave an empty cache for other testcases. clear_url_caches() class URLPrefixTests(URLTestCaseBase): """ Tests if the `i18n_patterns` is adding the prefix correctly. """ def test_not_prefixed(self): with translation.override('en'): self.assertEqual(reverse('not-prefixed'), '/not-prefixed/') self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/') with translation.override('nl'): self.assertEqual(reverse('not-prefixed'), '/not-prefixed/') self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/') def test_prefixed(self): with translation.override('en'): self.assertEqual(reverse('prefixed'), '/en/prefixed/') with translation.override('nl'): self.assertEqual(reverse('prefixed'), '/nl/prefixed/') with translation.override(None): self.assertEqual(reverse('prefixed'), '/%s/prefixed/' % settings.LANGUAGE_CODE) @override_settings(ROOT_URLCONF='i18n.patterns.urls.wrong') def test_invalid_prefix_use(self): msg = 'Using i18n_patterns in an included URLconf is not allowed.' with self.assertRaisesMessage(ImproperlyConfigured, msg): reverse('account:register') @override_settings(ROOT_URLCONF='i18n.patterns.urls.disabled') class URLDisabledTests(URLTestCaseBase): @override_settings(USE_I18N=False) def test_prefixed_i18n_disabled(self): with translation.override('en'): self.assertEqual(reverse('prefixed'), '/prefixed/') with translation.override('nl'): self.assertEqual(reverse('prefixed'), '/prefixed/') class RequestURLConfTests(SimpleTestCase): @override_settings(ROOT_URLCONF='i18n.patterns.urls.path_unused') def test_request_urlconf_considered(self): request = RequestFactory().get('/nl/') request.urlconf = 'i18n.patterns.urls.default' middleware = LocaleMiddleware(lambda req: HttpResponse()) with translation.override('nl'): middleware.process_request(request) self.assertEqual(request.LANGUAGE_CODE, 'nl') @override_settings(ROOT_URLCONF='i18n.patterns.urls.path_unused') class PathUnusedTests(URLTestCaseBase): """ If no i18n_patterns is used in root URLconfs, then no language activation activation happens based on url prefix. """ def test_no_lang_activate(self): response = self.client.get('/nl/foo/') self.assertEqual(response.status_code, 200) self.assertEqual(response['content-language'], 'en') self.assertEqual(response.context['LANGUAGE_CODE'], 'en') class URLTranslationTests(URLTestCaseBase): """ Tests if the pattern-strings are translated correctly (within the `i18n_patterns` and the normal `patterns` function). """ def test_no_prefix_translated(self): with translation.override('en'): self.assertEqual(reverse('no-prefix-translated'), '/translated/') self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/translated/yeah/') with translation.override('nl'): self.assertEqual(reverse('no-prefix-translated'), '/vertaald/') self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/vertaald/yeah/') with translation.override('pt-br'): self.assertEqual(reverse('no-prefix-translated'), '/traduzidos/') self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/traduzidos/yeah/') def test_users_url(self): with translation.override('en'): self.assertEqual(reverse('users'), '/en/users/') with translation.override('nl'): self.assertEqual(reverse('users'), '/nl/gebruikers/') self.assertEqual(reverse('prefixed_xml'), '/nl/prefixed.xml') with translation.override('pt-br'): self.assertEqual(reverse('users'), '/pt-br/usuarios/') def test_translate_url_utility(self): with translation.override('en'): self.assertEqual(translate_url('/en/nonexistent/', 'nl'), '/en/nonexistent/') self.assertEqual(translate_url('/en/users/', 'nl'), '/nl/gebruikers/') # Namespaced URL self.assertEqual(translate_url('/en/account/register/', 'nl'), '/nl/profiel/registreren/') # path() URL pattern self.assertEqual(translate_url('/en/account/register-as-path/', 'nl'), '/nl/profiel/registreren-als-pad/') self.assertEqual(translation.get_language(), 'en') # URL with parameters. self.assertEqual( translate_url('/en/with-arguments/regular-argument/', 'nl'), '/nl/with-arguments/regular-argument/', ) self.assertEqual( translate_url('/en/with-arguments/regular-argument/optional.html', 'nl'), '/nl/with-arguments/regular-argument/optional.html', ) with translation.override('nl'): self.assertEqual(translate_url('/nl/gebruikers/', 'en'), '/en/users/') self.assertEqual(translation.get_language(), 'nl') class URLNamespaceTests(URLTestCaseBase): """ Tests if the translations are still working within namespaces. """ def test_account_register(self): with translation.override('en'): self.assertEqual(reverse('account:register'), '/en/account/register/') self.assertEqual(reverse('account:register-as-path'), '/en/account/register-as-path/') with translation.override('nl'): self.assertEqual(reverse('account:register'), '/nl/profiel/registreren/') self.assertEqual(reverse('account:register-as-path'), '/nl/profiel/registreren-als-pad/') class URLRedirectTests(URLTestCaseBase): """ Tests if the user gets redirected to the right URL when there is no language-prefix in the request URL. """ def test_no_prefix_response(self): response = self.client.get('/not-prefixed/') self.assertEqual(response.status_code, 200) def test_en_redirect(self): response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en') self.assertRedirects(response, '/en/account/register/') response = self.client.get(response['location']) self.assertEqual(response.status_code, 200) def test_en_redirect_wrong_url(self): response = self.client.get('/profiel/registreren/', HTTP_ACCEPT_LANGUAGE='en') self.assertEqual(response.status_code, 404) def test_nl_redirect(self): response = self.client.get('/profiel/registreren/', HTTP_ACCEPT_LANGUAGE='nl') self.assertRedirects(response, '/nl/profiel/registreren/') response = self.client.get(response['location']) self.assertEqual(response.status_code, 200) def test_nl_redirect_wrong_url(self): response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='nl') self.assertEqual(response.status_code, 404) def test_pt_br_redirect(self): response = self.client.get('/conta/registre-se/', HTTP_ACCEPT_LANGUAGE='pt-br') self.assertRedirects(response, '/pt-br/conta/registre-se/') response = self.client.get(response['location']) self.assertEqual(response.status_code, 200) def test_pl_pl_redirect(self): # language from outside of the supported LANGUAGES list response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='pl-pl') self.assertRedirects(response, '/en/account/register/') response = self.client.get(response['location']) self.assertEqual(response.status_code, 200) @override_settings( MIDDLEWARE=[ 'i18n.patterns.tests.PermanentRedirectLocaleMiddleWare', 'django.middleware.common.CommonMiddleware', ], ) def test_custom_redirect_class(self): response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en') self.assertRedirects(response, '/en/account/register/', 301) class URLVaryAcceptLanguageTests(URLTestCaseBase): """ 'Accept-Language' is not added to the Vary header when using prefixed URLs. """ def test_no_prefix_response(self): response = self.client.get('/not-prefixed/') self.assertEqual(response.status_code, 200) self.assertEqual(response.get('Vary'), 'Accept-Language') def test_en_redirect(self): response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en') self.assertRedirects(response, '/en/account/register/') self.assertFalse(response.get('Vary')) response = self.client.get(response['location']) self.assertEqual(response.status_code, 200) self.assertFalse(response.get('Vary')) class URLRedirectWithoutTrailingSlashTests(URLTestCaseBase): """ Tests the redirect when the requested URL doesn't end with a slash (`settings.APPEND_SLASH=True`). """ def test_not_prefixed_redirect(self): response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en') self.assertRedirects(response, '/not-prefixed/', 301) def test_en_redirect(self): response = self.client.get('/account/register', HTTP_ACCEPT_LANGUAGE='en', follow=True) # We only want one redirect, bypassing CommonMiddleware self.assertEqual(response.redirect_chain, [('/en/account/register/', 302)]) self.assertRedirects(response, '/en/account/register/', 302) response = self.client.get('/prefixed.xml', HTTP_ACCEPT_LANGUAGE='en', follow=True) self.assertRedirects(response, '/en/prefixed.xml', 302) class URLRedirectWithoutTrailingSlashSettingTests(URLTestCaseBase): """ Tests the redirect when the requested URL doesn't end with a slash (`settings.APPEND_SLASH=False`). """ @override_settings(APPEND_SLASH=False) def test_not_prefixed_redirect(self): response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en') self.assertEqual(response.status_code, 404) @override_settings(APPEND_SLASH=False) def test_en_redirect(self): response = self.client.get('/account/register-without-slash', HTTP_ACCEPT_LANGUAGE='en') self.assertRedirects(response, '/en/account/register-without-slash', 302) response = self.client.get(response['location']) self.assertEqual(response.status_code, 200) class URLResponseTests(URLTestCaseBase): """Tests if the response has the correct language code.""" def test_not_prefixed_with_prefix(self): response = self.client.get('/en/not-prefixed/') self.assertEqual(response.status_code, 404) def test_en_url(self): response = self.client.get('/en/account/register/') self.assertEqual(response.status_code, 200) self.assertEqual(response['content-language'], 'en') self.assertEqual(response.context['LANGUAGE_CODE'], 'en') def test_nl_url(self): response = self.client.get('/nl/profiel/registreren/') self.assertEqual(response.status_code, 200) self.assertEqual(response['content-language'], 'nl') self.assertEqual(response.context['LANGUAGE_CODE'], 'nl') def test_wrong_en_prefix(self): response = self.client.get('/en/profiel/registreren/') self.assertEqual(response.status_code, 404) def test_wrong_nl_prefix(self): response = self.client.get('/nl/account/register/') self.assertEqual(response.status_code, 404) def test_pt_br_url(self): response = self.client.get('/pt-br/conta/registre-se/') self.assertEqual(response.status_code, 200) self.assertEqual(response['content-language'], 'pt-br') self.assertEqual(response.context['LANGUAGE_CODE'], 'pt-br') def test_en_path(self): response = self.client.get('/en/account/register-as-path/') self.assertEqual(response.status_code, 200) self.assertEqual(response['content-language'], 'en') self.assertEqual(response.context['LANGUAGE_CODE'], 'en') def test_nl_path(self): response = self.client.get('/nl/profiel/registreren-als-pad/') self.assertEqual(response.status_code, 200) self.assertEqual(response['content-language'], 'nl') self.assertEqual(response.context['LANGUAGE_CODE'], 'nl') class URLRedirectWithScriptAliasTests(URLTestCaseBase): """ #21579 - LocaleMiddleware should respect the script prefix. """ def test_language_prefix_with_script_prefix(self): prefix = '/script_prefix' with override_script_prefix(prefix): response = self.client.get('/prefixed/', HTTP_ACCEPT_LANGUAGE='en', SCRIPT_NAME=prefix) self.assertRedirects(response, '%s/en/prefixed/' % prefix, target_status_code=404) class URLTagTests(URLTestCaseBase): """ Test if the language tag works. """ def test_strings_only(self): t = Template("""{% load i18n %} {% language 'nl' %}{% url 'no-prefix-translated' %}{% endlanguage %} {% language 'pt-br' %}{% url 'no-prefix-translated' %}{% endlanguage %}""") self.assertEqual(t.render(Context({})).strip().split(), ['/vertaald/', '/traduzidos/']) def test_context(self): ctx = Context({'lang1': 'nl', 'lang2': 'pt-br'}) tpl = Template("""{% load i18n %} {% language lang1 %}{% url 'no-prefix-translated' %}{% endlanguage %} {% language lang2 %}{% url 'no-prefix-translated' %}{% endlanguage %}""") self.assertEqual(tpl.render(ctx).strip().split(), ['/vertaald/', '/traduzidos/']) def test_args(self): tpl = Template("""{% load i18n %} {% language 'nl' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %} {% language 'pt-br' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}""") self.assertEqual(tpl.render(Context({})).strip().split(), ['/vertaald/apo/', '/traduzidos/apo/']) def test_kwargs(self): tpl = Template("""{% load i18n %} {% language 'nl' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %} {% language 'pt-br' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}""") self.assertEqual(tpl.render(Context({})).strip().split(), ['/vertaald/apo/', '/traduzidos/apo/'])
46db07ee55d83f3354ca635a3d686615e4f3f025cf9fe9ee1f404ebff12c220c
""" Sphinx plugins for Django documentation. """ import json import os import re from docutils import nodes from docutils.parsers.rst import Directive from docutils.statemachine import ViewList from sphinx import addnodes from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.directives.code import CodeBlock from sphinx.domains.std import Cmdoption from sphinx.errors import ExtensionError, SphinxError from sphinx.util import logging from sphinx.util.console import bold, red from sphinx.writers.html import HTMLTranslator logger = logging.getLogger(__name__) # RE for option descriptions without a '--' prefix simple_option_desc_re = re.compile( r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)') def setup(app): app.add_crossref_type( directivename="setting", rolename="setting", indextemplate="pair: %s; setting", ) app.add_crossref_type( directivename="templatetag", rolename="ttag", indextemplate="pair: %s; template tag" ) app.add_crossref_type( directivename="templatefilter", rolename="tfilter", indextemplate="pair: %s; template filter" ) app.add_crossref_type( directivename="fieldlookup", rolename="lookup", indextemplate="pair: %s; field lookup type", ) app.add_object_type( directivename="django-admin", rolename="djadmin", indextemplate="pair: %s; django-admin command", parse_node=parse_django_admin_node, ) app.add_directive('django-admin-option', Cmdoption) app.add_config_value('django_next_version', '0.0', True) app.add_directive('versionadded', VersionDirective) app.add_directive('versionchanged', VersionDirective) app.add_builder(DjangoStandaloneHTMLBuilder) app.set_translator('djangohtml', DjangoHTMLTranslator) app.set_translator('json', DjangoHTMLTranslator) app.add_node( ConsoleNode, html=(visit_console_html, None), latex=(visit_console_dummy, depart_console_dummy), man=(visit_console_dummy, depart_console_dummy), text=(visit_console_dummy, depart_console_dummy), texinfo=(visit_console_dummy, depart_console_dummy), ) app.add_directive('console', ConsoleDirective) app.connect('html-page-context', html_page_context_hook) app.add_role('default-role-error', default_role_error) return {'parallel_read_safe': True} class VersionDirective(Directive): has_content = True required_arguments = 1 optional_arguments = 1 final_argument_whitespace = True option_spec = {} def run(self): if len(self.arguments) > 1: msg = """Only one argument accepted for directive '{directive_name}::'. Comments should be provided as content, not as an extra argument.""".format(directive_name=self.name) raise self.error(msg) env = self.state.document.settings.env ret = [] node = addnodes.versionmodified() ret.append(node) if self.arguments[0] == env.config.django_next_version: node['version'] = "Development version" else: node['version'] = self.arguments[0] node['type'] = self.name if self.content: self.state.nested_parse(self.content, self.content_offset, node) try: env.get_domain('changeset').note_changeset(node) except ExtensionError: # Sphinx < 1.8: Domain 'changeset' is not registered env.note_versionchange(node['type'], node['version'], node, self.lineno) return ret class DjangoHTMLTranslator(HTMLTranslator): """ Django-specific reST to HTML tweaks. """ # Don't use border=1, which docutils does by default. def visit_table(self, node): self.context.append(self.compact_p) self.compact_p = True self._table_row_index = 0 # Needed by Sphinx self.body.append(self.starttag(node, 'table', CLASS='docutils')) def depart_table(self, node): self.compact_p = self.context.pop() self.body.append('</table>\n') def visit_desc_parameterlist(self, node): self.body.append('(') # by default sphinx puts <big> around the "(" self.first_param = 1 self.optional_param_level = 0 self.param_separator = node.child_text_separator self.required_params_left = sum(isinstance(c, addnodes.desc_parameter) for c in node.children) def depart_desc_parameterlist(self, node): self.body.append(')') # # Turn the "new in version" stuff (versionadded/versionchanged) into a # better callout -- the Sphinx default is just a little span, # which is a bit less obvious that I'd like. # # FIXME: these messages are all hardcoded in English. We need to change # that to accommodate other language docs, but I can't work out how to make # that work. # version_text = { 'versionchanged': 'Changed in Django %s', 'versionadded': 'New in Django %s', } def visit_versionmodified(self, node): self.body.append( self.starttag(node, 'div', CLASS=node['type']) ) version_text = self.version_text.get(node['type']) if version_text: title = "%s%s" % ( version_text % node['version'], ":" if len(node) else "." ) self.body.append('<span class="title">%s</span> ' % title) def depart_versionmodified(self, node): self.body.append("</div>\n") # Give each section a unique ID -- nice for custom CSS hooks def visit_section(self, node): old_ids = node.get('ids', []) node['ids'] = ['s-' + i for i in old_ids] node['ids'].extend(old_ids) super().visit_section(node) node['ids'] = old_ids def parse_django_admin_node(env, sig, signode): command = sig.split(' ')[0] env.ref_context['std:program'] = command title = "django-admin %s" % sig signode += addnodes.desc_name(title, title) return command class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder): """ Subclass to add some extra things we need. """ name = 'djangohtml' def finish(self): super().finish() logger.info(bold("writing templatebuiltins.js...")) xrefs = self.env.domaindata["std"]["objects"] templatebuiltins = { "ttags": [ n for ((t, n), (k, a)) in xrefs.items() if t == "templatetag" and k == "ref/templates/builtins" ], "tfilters": [ n for ((t, n), (k, a)) in xrefs.items() if t == "templatefilter" and k == "ref/templates/builtins" ], } outfilename = os.path.join(self.outdir, "templatebuiltins.js") with open(outfilename, 'w') as fp: fp.write('var django_template_builtins = ') json.dump(templatebuiltins, fp) fp.write(';\n') class ConsoleNode(nodes.literal_block): """ Custom node to override the visit/depart event handlers at registration time. Wrap a literal_block object and defer to it. """ tagname = 'ConsoleNode' def __init__(self, litblk_obj): self.wrapped = litblk_obj def __getattr__(self, attr): if attr == 'wrapped': return self.__dict__.wrapped return getattr(self.wrapped, attr) def visit_console_dummy(self, node): """Defer to the corresponding parent's handler.""" self.visit_literal_block(node) def depart_console_dummy(self, node): """Defer to the corresponding parent's handler.""" self.depart_literal_block(node) def visit_console_html(self, node): """Generate HTML for the console directive.""" if self.builder.name in ('djangohtml', 'json') and node['win_console_text']: # Put a mark on the document object signaling the fact the directive # has been used on it. self.document._console_directive_used_flag = True uid = node['uid'] self.body.append('''\ <div class="console-block" id="console-block-%(id)s"> <input class="c-tab-unix" id="c-tab-%(id)s-unix" type="radio" name="console-%(id)s" checked> <label for="c-tab-%(id)s-unix" title="Linux/macOS">&#xf17c/&#xf179</label> <input class="c-tab-win" id="c-tab-%(id)s-win" type="radio" name="console-%(id)s"> <label for="c-tab-%(id)s-win" title="Windows">&#xf17a</label> <section class="c-content-unix" id="c-content-%(id)s-unix">\n''' % {'id': uid}) try: self.visit_literal_block(node) except nodes.SkipNode: pass self.body.append('</section>\n') self.body.append('<section class="c-content-win" id="c-content-%(id)s-win">\n' % {'id': uid}) win_text = node['win_console_text'] highlight_args = {'force': True} linenos = node.get('linenos', False) def warner(msg): self.builder.warn(msg, (self.builder.current_docname, node.line)) highlighted = self.highlighter.highlight_block( win_text, 'doscon', warn=warner, linenos=linenos, **highlight_args ) self.body.append(highlighted) self.body.append('</section>\n') self.body.append('</div>\n') raise nodes.SkipNode else: self.visit_literal_block(node) class ConsoleDirective(CodeBlock): """ A reStructuredText directive which renders a two-tab code block in which the second tab shows a Windows command line equivalent of the usual Unix-oriented examples. """ required_arguments = 0 # The 'doscon' Pygments formatter needs a prompt like this. '>' alone # won't do it because then it simply paints the whole command line as a # grey comment with no highlighting at all. WIN_PROMPT = r'...\> ' def run(self): def args_to_win(cmdline): changed = False out = [] for token in cmdline.split(): if token[:2] == './': token = token[2:] changed = True elif token[:2] == '~/': token = '%HOMEPATH%\\' + token[2:] changed = True elif token == 'make': token = 'make.bat' changed = True if '://' not in token and 'git' not in cmdline: out.append(token.replace('/', '\\')) changed = True else: out.append(token) if changed: return ' '.join(out) return cmdline def cmdline_to_win(line): if line.startswith('# '): return 'REM ' + args_to_win(line[2:]) if line.startswith('$ # '): return 'REM ' + args_to_win(line[4:]) if line.startswith('$ ./manage.py'): return 'manage.py ' + args_to_win(line[13:]) if line.startswith('$ manage.py'): return 'manage.py ' + args_to_win(line[11:]) if line.startswith('$ ./runtests.py'): return 'runtests.py ' + args_to_win(line[15:]) if line.startswith('$ ./'): return args_to_win(line[4:]) if line.startswith('$ python3'): return 'py ' + args_to_win(line[9:]) if line.startswith('$ python'): return 'py ' + args_to_win(line[8:]) if line.startswith('$ '): return args_to_win(line[2:]) return None def code_block_to_win(content): bchanged = False lines = [] for line in content: modline = cmdline_to_win(line) if modline is None: lines.append(line) else: lines.append(self.WIN_PROMPT + modline) bchanged = True if bchanged: return ViewList(lines) return None env = self.state.document.settings.env self.arguments = ['console'] lit_blk_obj = super().run()[0] # Only do work when the djangohtml HTML Sphinx builder is being used, # invoke the default behavior for the rest. if env.app.builder.name not in ('djangohtml', 'json'): return [lit_blk_obj] lit_blk_obj['uid'] = str(env.new_serialno('console')) # Only add the tabbed UI if there is actually a Windows-specific # version of the CLI example. win_content = code_block_to_win(self.content) if win_content is None: lit_blk_obj['win_console_text'] = None else: self.content = win_content lit_blk_obj['win_console_text'] = super().run()[0].rawsource # Replace the literal_node object returned by Sphinx's CodeBlock with # the ConsoleNode wrapper. return [ConsoleNode(lit_blk_obj)] def html_page_context_hook(app, pagename, templatename, context, doctree): # Put a bool on the context used to render the template. It's used to # control inclusion of console-tabs.css and activation of the JavaScript. # This way it's include only from HTML files rendered from reST files where # the ConsoleDirective is used. context['include_console_assets'] = getattr(doctree, '_console_directive_used_flag', False) def default_role_error( name, rawtext, text, lineno, inliner, options=None, content=None ): msg = ( "Default role used (`single backticks`) at line %s: %s. Did you mean " "to use two backticks for ``code``, or miss an underscore for a " "`link`_ ?" % (lineno, rawtext) ) raise SphinxError(red(msg))
af07033a6e5b3000b436fe9e9af8118f4deff790d5f0d6f6fbe6967086ec4e67
""" Django's standard crypto functions and utilities. """ import hashlib import hmac import secrets from django.conf import settings from django.utils.encoding import force_bytes def salted_hmac(key_salt, value, secret=None): """ Return the HMAC-SHA1 of 'value', using a key generated from key_salt and a secret (which defaults to settings.SECRET_KEY). A different key_salt should be passed in for every application of HMAC. """ if secret is None: secret = settings.SECRET_KEY key_salt = force_bytes(key_salt) secret = force_bytes(secret) # We need to generate a derived key from our base key. We can do this by # passing the key_salt and our base key through a pseudo-random function and # SHA1 works nicely. key = hashlib.sha1(key_salt + secret).digest() # If len(key_salt + secret) > block size of the hash algorithm, the above # line is redundant and could be replaced by key = key_salt + secret, since # the hmac module does the same thing for keys longer than the block size. # However, we need to ensure that we *always* do this. return hmac.new(key, msg=force_bytes(value), digestmod=hashlib.sha1) def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'): """ Return a securely generated random string. The default length of 12 with the a-z, A-Z, 0-9 character set returns a 71-bit value. log_2((26+26+10)^12) =~ 71 bits """ return ''.join(secrets.choice(allowed_chars) for i in range(length)) def constant_time_compare(val1, val2): """Return True if the two strings are equal, False otherwise.""" return secrets.compare_digest(force_bytes(val1), force_bytes(val2)) def pbkdf2(password, salt, iterations, dklen=0, digest=None): """Return the hash of password using pbkdf2.""" if digest is None: digest = hashlib.sha256 dklen = dklen or None password = force_bytes(password) salt = force_bytes(salt) return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)
96ac7afbdcca9dde6e22581d79134b86a73e8b86df0e962449407bc22943eaf4
import copy import inspect import warnings from functools import partialmethod from itertools import chain from django.apps import apps from django.conf import settings from django.core import checks from django.core.exceptions import ( NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, ObjectDoesNotExist, ValidationError, ) from django.db import ( DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, connections, router, transaction, ) from django.db.models import ( NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value, ) from django.db.models.constants import LOOKUP_SEP from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, Collector from django.db.models.fields.related import ( ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, ) from django.db.models.functions import Coalesce from django.db.models.manager import Manager from django.db.models.options import Options from django.db.models.query import Q from django.db.models.signals import ( class_prepared, post_init, post_save, pre_init, pre_save, ) from django.db.models.utils import make_model_tuple from django.utils.encoding import force_str from django.utils.hashable import make_hashable from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext_lazy as _ from django.utils.version import get_version class Deferred: def __repr__(self): return '<Deferred field>' def __str__(self): return '<Deferred field>' DEFERRED = Deferred() def subclass_exception(name, bases, module, attached_to): """ Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class. """ return type(name, bases, { '__module__': module, '__qualname__': '%s.%s' % (attached_to.__qualname__, name), }) def _has_contribute_to_class(value): # Only call contribute_to_class() if it's bound. return not inspect.isclass(value) and hasattr(value, 'contribute_to_class') class ModelBase(type): """Metaclass for all models.""" def __new__(cls, name, bases, attrs, **kwargs): super_new = super().__new__ # Also ensure initialization is only performed for subclasses of Model # (excluding Model class itself). parents = [b for b in bases if isinstance(b, ModelBase)] if not parents: return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop('__module__') new_attrs = {'__module__': module} classcell = attrs.pop('__classcell__', None) if classcell is not None: new_attrs['__classcell__'] = classcell attr_meta = attrs.pop('Meta', None) # Pass all attrs without a (Django-specific) contribute_to_class() # method to type.__new__() so that they're properly initialized # (i.e. __set_name__()). contributable_attrs = {} for obj_name, obj in list(attrs.items()): if _has_contribute_to_class(obj): contributable_attrs[obj_name] = obj else: new_attrs[obj_name] = obj new_class = super_new(cls, name, bases, new_attrs, **kwargs) abstract = getattr(attr_meta, 'abstract', False) meta = attr_meta or getattr(new_class, 'Meta', None) base_meta = getattr(new_class, '_meta', None) app_label = None # Look for an application configuration to attach the model to. app_config = apps.get_containing_app_config(module) if getattr(meta, 'app_label', None) is None: if app_config is None: if not abstract: raise RuntimeError( "Model class %s.%s doesn't declare an explicit " "app_label and isn't in an application in " "INSTALLED_APPS." % (module, name) ) else: app_label = app_config.label new_class.add_to_class('_meta', Options(meta, app_label)) if not abstract: new_class.add_to_class( 'DoesNotExist', subclass_exception( 'DoesNotExist', tuple( x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (ObjectDoesNotExist,), module, attached_to=new_class)) new_class.add_to_class( 'MultipleObjectsReturned', subclass_exception( 'MultipleObjectsReturned', tuple( x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (MultipleObjectsReturned,), module, attached_to=new_class)) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, 'ordering'): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, 'get_latest_by'): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)) # Add remaining attributes (those with a contribute_to_class() method) # to the class. for obj_name, obj in contributable_attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = chain( new_class._meta.local_fields, new_class._meta.local_many_to_many, new_class._meta.private_fields ) field_names = {f.name for f in new_fields} # Basic setup for proxy models. if is_proxy: base = None for parent in [kls for kls in parents if hasattr(kls, '_meta')]: if parent._meta.abstract: if parent._meta.fields: raise TypeError( "Abstract base class containing model fields not " "permitted for proxy model '%s'." % name ) else: continue if base is None: base = parent elif parent._meta.concrete_model is not base._meta.concrete_model: raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name) if base is None: raise TypeError("Proxy model '%s' has no non-abstract model base class." % name) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Collect the parent links for multi-table inheritance. parent_links = {} for base in reversed([new_class] + parents): # Conceptually equivalent to `if base is Model`. if not hasattr(base, '_meta'): continue # Skip concrete parent classes. if base != new_class and not base._meta.abstract: continue # Locate OneToOneField instances. for field in base._meta.local_fields: if isinstance(field, OneToOneField): related = resolve_relation(new_class, field.remote_field.model) parent_links[make_model_tuple(related)] = field # Track fields inherited from base models. inherited_attributes = set() # Do the appropriate setup for any model parents. for base in new_class.mro(): if base not in parents or not hasattr(base, '_meta'): # Things without _meta aren't functional models, so they're # uninteresting parents. inherited_attributes.update(base.__dict__) continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many if not base._meta.abstract: # Check for clashes between locally declared fields and those # on the base classes. for field in parent_fields: if field.name in field_names: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: inherited_attributes.add(field.name) # Concrete classes... base = base._meta.concrete_model base_key = make_model_tuple(base) if base_key in parent_links: field = parent_links[base_key] elif not is_proxy: attr_name = '%s_ptr' % base._meta.model_name field = OneToOneField( base, on_delete=CASCADE, name=attr_name, auto_created=True, parent_link=True, ) if attr_name in field_names: raise FieldError( "Auto-generated field '%s' in class %r for " "parent_link to base class %r clashes with " "declared field of the same name." % ( attr_name, name, base.__name__, ) ) # Only add the ptr field if it's not already present; # e.g. migrations will already have it specified if not hasattr(new_class, attr_name): new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: base_parents = base._meta.parents.copy() # Add fields from abstract base class if it wasn't overridden. for field in parent_fields: if (field.name not in field_names and field.name not in new_class.__dict__ and field.name not in inherited_attributes): new_field = copy.deepcopy(field) new_class.add_to_class(field.name, new_field) # Replace parent links defined on this base by the new # field. It will be appropriately resolved if required. if field.one_to_one: for parent, parent_link in base_parents.items(): if field == parent_link: base_parents[parent] = new_field # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base_parents) # Inherit private fields (like GenericForeignKey) from the parent # class for field in base._meta.private_fields: if field.name in field_names: if not base._meta.abstract: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: field = copy.deepcopy(field) if not base._meta.abstract: field.mti_inherited = True new_class.add_to_class(field.name, field) # Copy indexes so that index names are unique when models extend an # abstract model. new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes] if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() new_class._meta.apps.register_model(new_class._meta.app_label, new_class) return new_class def add_to_class(cls, name, value): if _has_contribute_to_class(value): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """Create some methods once self._meta has been populated.""" opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=False) # Defer creating accessors on the foreign class until it has been # created and registered. If remote_field is None, we're ordering # with respect to a GenericForeignKey and don't know what the # foreign class is - we'll add those accessors later in # contribute_to_class(). if opts.order_with_respect_to.remote_field: wrt = opts.order_with_respect_to remote = wrt.remote_field.model lazy_related_operation(make_foreign_order_accessors, cls, remote) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields)) get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower) if get_absolute_url_override: setattr(cls, 'get_absolute_url', get_absolute_url_override) if not opts.managers: if any(f.name == 'objects' for f in opts.fields): raise ValueError( "Model %s must specify a custom Manager, because it has a " "field named 'objects'." % cls.__name__ ) manager = Manager() manager.auto_created = True cls.add_to_class('objects', manager) # Set the name of _meta.indexes. This can't be done in # Options.contribute_to_class() because fields haven't been added to # the model at that point. for index in cls._meta.indexes: if not index.name: index.set_name_with_model(cls) class_prepared.send(sender=cls) @property def _base_manager(cls): return cls._meta.base_manager @property def _default_manager(cls): return cls._meta.default_manager class ModelStateFieldsCacheDescriptor: def __get__(self, instance, cls=None): if instance is None: return self res = instance.fields_cache = {} return res class ModelState: """Store model instance state.""" db = None # If true, uniqueness validation checks will consider this a new, unsaved # object. Necessary for correct validation of new instances of objects with # explicit (non-auto) PKs. This impacts validation only; it has no effect # on the actual save. adding = True fields_cache = ModelStateFieldsCacheDescriptor() class Model(metaclass=ModelBase): def __init__(self, *args, **kwargs): # Alias some things as locals to avoid repeat global lookups cls = self.__class__ opts = self._meta _setattr = setattr _DEFERRED = DEFERRED pre_init.send(sender=cls, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) else: # Slower, kwargs-ready version. fields_iter = iter(opts.fields) for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) kwargs.pop(field.name, None) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # Virtual field if field.attname not in kwargs and field.column is None: continue if kwargs: if isinstance(field.remote_field, ForeignObjectRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. if rel_obj is not _DEFERRED: _setattr(self, field.name, rel_obj) else: if val is not _DEFERRED: _setattr(self, field.attname, val) if kwargs: property_names = opts._property_names for prop in tuple(kwargs): try: # Any remaining kwargs must correspond to properties or # virtual fields. if prop in property_names or opts.get_field(prop): if kwargs[prop] is not _DEFERRED: _setattr(self, prop, kwargs[prop]) del kwargs[prop] except (AttributeError, FieldDoesNotExist): pass for kwarg in kwargs: raise TypeError("%s() got an unexpected keyword argument '%s'" % (cls.__name__, kwarg)) super().__init__() post_init.send(sender=cls, instance=self) @classmethod def from_db(cls, db, field_names, values): if len(values) != len(cls._meta.concrete_fields): values_iter = iter(values) values = [ next(values_iter) if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields ] new = cls(*values) new._state.adding = False new._state.db = db return new def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def __str__(self): return '%s object (%s)' % (self.__class__.__name__, self.pk) def __eq__(self, other): if not isinstance(other, Model): return NotImplemented if self._meta.concrete_model != other._meta.concrete_model: return False my_pk = self.pk if my_pk is None: return self is other return my_pk == other.pk def __hash__(self): if self.pk is None: raise TypeError("Model instances without primary key value are unhashable") return hash(self.pk) def __reduce__(self): data = self.__getstate__() data[DJANGO_VERSION_PICKLE_KEY] = get_version() class_id = self._meta.app_label, self._meta.object_name return model_unpickle, (class_id,), data def __getstate__(self): """Hook to allow choosing the attributes to pickle.""" return self.__dict__ def __setstate__(self, state): msg = None pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: current_version = get_version() if current_version != pickled_version: msg = ( "Pickled model instance's Django version %s does not match " "the current version %s." % (pickled_version, current_version) ) else: msg = "Pickled model instance's Django version is not specified." if msg: warnings.warn(msg, RuntimeWarning, stacklevel=2) self.__dict__.update(state) def _get_pk_val(self, meta=None): meta = meta or self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): for parent_link in self._meta.parents.values(): if parent_link and parent_link != self._meta.pk: setattr(self, parent_link.target_field.attname, value) return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def get_deferred_fields(self): """ Return a set containing names of deferred fields for this instance. """ return { f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__ } def refresh_from_db(self, using=None, fields=None): """ Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. """ if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, '_prefetched_objects_cache', ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' 'are not allowed in fields.' % LOOKUP_SEP) hints = {'instance': self} db_instance_qs = self.__class__._base_manager.db_manager(using, hints=hints).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db def serializable_value(self, field_name): """ Return the value of the field name for this instance. If the field is a foreign key, return the id value instead of the object. If there's no Field object with this name on the model, return the model attribute's value. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field(field_name) except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Save the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ # Ensure that a model instance without a PK hasn't been assigned to # a ForeignKey or OneToOneField on this model. If the field is # nullable, allowing the save() would result in silent data loss. for field in self._meta.concrete_fields: # If the related field isn't cached, then an instance hasn't # been assigned and there's no need to worry about this check. if field.is_relation and field.is_cached(self): obj = getattr(self, field.name, None) if not obj: continue # A pk may have been assigned manually to a model instance not # saved to the database (or auto-generated in a case like # UUIDField), but we allow the save to proceed and rely on the # database to raise an IntegrityError if applicable. If # constraints aren't supported by the database, there's the # unavoidable risk of data corruption. if obj.pk is None: # Remove the object from a related instance cache. if not field.remote_field.multiple: field.remote_field.delete_cached_value(obj) raise ValueError( "save() prohibited to prevent data loss due to " "unsaved related object '%s'." % field.name ) elif getattr(self, field.attname) is None: # Use pk from related object if it has been saved after # an assignment. setattr(self, field.attname, obj.pk) # If the relationship's pk/to_field was changed, clear the # cached relationship. if getattr(obj, field.target_field.attname) != getattr(self, field.attname): field.delete_cached_value(self) using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") deferred_fields = self.get_deferred_fields() if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if not update_fields: return update_fields = frozenset(update_fields) field_names = set() for field in self._meta.fields: if not field.primary_key: field_names.add(field.name) if field.name != field.attname: field_names.add(field.attname) non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError("The following fields do not exist in this " "model or are m2m fields: %s" % ', '.join(non_model_fields)) # If saving to the same database, and this model is deferred, then # automatically do an "update_fields" save on the loaded fields. elif not force_insert and deferred_fields and using == self._state.db: field_names = set() for field in self._meta.concrete_fields: if not field.primary_key and not hasattr(field, 'through'): field_names.add(field.attname) loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base(using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields) save.alters_data = True def save_base(self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None): """ Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading. """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ # Skip proxies, but keep the origin as the proxy model. if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send( sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields, ) # A transaction isn't needed if one query is issued. if meta.parents: context_manager = transaction.atomic(using=using, savepoint=False) else: context_manager = transaction.mark_for_rollback_on_error(using=using) with context_manager: parent_inserted = False if not raw: parent_inserted = self._save_parents(cls, using, update_fields) updated = self._save_table( raw, cls, force_insert or parent_inserted, force_update, using, update_fields, ) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if not meta.auto_created: post_save.send( sender=origin, instance=self, created=(not updated), update_fields=update_fields, raw=raw, using=using, ) save_base.alters_data = True def _save_parents(self, cls, using, update_fields): """Save all the parents of cls using values from self.""" meta = cls._meta inserted = False for parent, field in meta.parents.items(): # Make sure the link fields are synced between parent and self. if (field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None): setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) parent_inserted = self._save_parents(cls=parent, using=using, update_fields=update_fields) updated = self._save_table( cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted, ) if not updated: inserted = True # Set the parent's PK value to self. if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) # Since we didn't have an instance of the parent handy set # attname directly, bypassing the descriptor. Invalidate # the related object cache, in case it's been accidentally # populated. A fresh instance will be re-built from the # database if necessary. if field.is_cached(self): field.delete_cached_value(self) return inserted def _save_table(self, raw=False, cls=None, force_insert=False, force_update=False, using=None, update_fields=None): """ Do the heavy-lifting involved in saving. Update or insert the data for a single table. """ meta = cls._meta non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] if update_fields: non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields] pk_val = self._get_pk_val(meta) if pk_val is None: pk_val = meta.pk.get_pk_value_on_save(self) setattr(self, meta.pk.attname, pk_val) pk_set = pk_val is not None if not pk_set and (force_update or update_fields): raise ValueError("Cannot force an update in save() with no primary key.") updated = False # Skip an UPDATE when adding an instance and primary key has a default. if ( not raw and not force_insert and self._state.adding and self._meta.pk.default and self._meta.pk.default is not NOT_PROVIDED ): force_insert = True # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False))) for f in non_pks] forced_update = update_fields or force_update updated = self._do_update(base_qs, using, pk_val, values, update_fields, forced_update) if force_update and not updated: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not updated: raise DatabaseError("Save with update_fields did not affect any rows.") if not updated: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to filter_args = field.get_filter_kwargs_for_object(self) self._order = cls._base_manager.using(using).filter(**filter_args).aggregate( _order__max=Coalesce( ExpressionWrapper(Max('_order') + Value(1), output_field=IntegerField()), Value(0), ), )['_order__max'] fields = meta.local_concrete_fields if not pk_set: fields = [f for f in fields if f is not meta.auto_field] returning_fields = meta.db_returning_fields results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw) for result, field in zip(results, returning_fields): setattr(self, field.attname, result) return updated def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): """ Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB). """ filtered = base_qs.filter(pk=pk_val) if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that # case we just say the update succeeded. Another case ending up here # is a model with just PK - in that case check that the PK still # exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and # It may happen that the object is deleted from the DB right after # this check, causing the subsequent UPDATE to return zero matching # rows. The same result can occur in some rare cases when the # database returns zero despite the UPDATE being executed # successfully (a row is matched and updated). In order to # distinguish these two cases, the object's existence in the # database is again checked for if the UPDATE query returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 def _do_insert(self, manager, using, fields, returning_fields, raw): """ Do an INSERT. If returning_fields is defined then this method should return the newly created data for the model. """ return manager._insert( [self], fields=fields, returning_fields=returning_fields, using=using, raw=raw, ) def delete(self, using=None, keep_parents=False): using = using or router.db_for_write(self.__class__, instance=self) assert self.pk is not None, ( "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname) ) collector = Collector(using=using) collector.collect([self], keep_parents=keep_parents) return collector.delete() delete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) choices_dict = dict(make_hashable(field.flatchoices)) # force_str() to coerce lazy strings. return force_str(choices_dict.get(make_hashable(value), value), strings_only=True) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = 'gt' if is_next else 'lt' order = '' if is_next else '-' param = getattr(self, field.attname) q = Q(**{'%s__%s' % (field.name, op): param}) q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk}) qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by( '%s%s' % (order, field.name), '%spk' % order ) try: return qs[0] except IndexError: raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = 'gt' if is_next else 'lt' order = '_order' if is_next else '-_order' order_field = self._meta.order_with_respect_to filter_args = order_field.get_filter_kwargs_for_object(self) obj = self.__class__._default_manager.filter(**filter_args).filter(**{ '_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{ self._meta.pk.name: self.pk }) }).order_by(order)[:1].get() setattr(self, cachename, obj) return getattr(self, cachename) def prepare_database_save(self, field): if self.pk is None: raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self) return getattr(self, field.remote_field.get_related_field().attname) def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Check unique constraints on the model and raise ValidationError if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None): """ Return a list of checks to perform. Since validate_unique() could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = [] unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] constraints = [(self.__class__, self._meta.constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.unique_together: unique_togethers.append((parent_class, parent_class._meta.unique_together)) if parent_class._meta.constraints: constraints.append((parent_class, parent_class._meta.constraints)) for model_class, unique_together in unique_togethers: for check in unique_together: if not any(name in exclude for name in check): # Add the check if the field isn't excluded. unique_checks.append((model_class, tuple(check))) for model_class, model_constraints in constraints: for constraint in model_constraints: if (isinstance(constraint, UniqueConstraint) and # Partial unique constraints can't be validated. constraint.condition is None and not any(name in exclude for name in constraint.fields)): unique_checks.append((model_class, constraint.fields)) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.get_parent_list(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, 'date', name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, 'year', name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, 'month', name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) # TODO: Handle multiple backends with different feature flags. if (lookup_value is None or (lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)): # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check)) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == 'date': lookup_kwargs['%s__day' % unique_for] = date.day lookup_kwargs['%s__month' % unique_for] = date.month lookup_kwargs['%s__year' % unique_for] = date.year else: lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field_name, unique_for): opts = self._meta field = opts.get_field(field_name) return ValidationError( message=field.error_messages['unique_for_date'], code='unique_for_date', params={ 'model': self, 'model_name': capfirst(opts.verbose_name), 'lookup_type': lookup_type, 'field': field_name, 'field_label': capfirst(field.verbose_name), 'date_field': unique_for, 'date_field_label': capfirst(opts.get_field(unique_for).verbose_name), } ) def unique_error_message(self, model_class, unique_check): opts = model_class._meta params = { 'model': self, 'model_class': model_class, 'model_name': capfirst(opts.verbose_name), 'unique_check': unique_check, } # A unique field if len(unique_check) == 1: field = opts.get_field(unique_check[0]) params['field_label'] = capfirst(field.verbose_name) return ValidationError( message=field.error_messages['unique'], code='unique', params=params, ) # unique_together else: field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check] params['field_labels'] = get_text_list(field_labels, _('and')) return ValidationError( message=_("%(model_name)s with this %(field_labels)s already exists."), code='unique_together', params=params, ) def full_clean(self, exclude=None, validate_unique=True): """ Call clean_fields(), clean(), and validate_unique() on the model. Raise a ValidationError for any errors that occur. """ errors = {} if exclude is None: exclude = [] else: exclude = list(exclude) try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. if validate_unique: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.append(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Clean all fields and raise a ValidationError containing a dict of all validation errors if any occur. """ if exclude is None: exclude = [] errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in f.empty_values: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.error_list if errors: raise ValidationError(errors) @classmethod def check(cls, **kwargs): errors = [*cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs)] if not cls._meta.swapped: errors += [ *cls._check_fields(**kwargs), *cls._check_m2m_through_same_relationship(), *cls._check_long_column_names(), ] clash_errors = ( *cls._check_id_field(), *cls._check_field_name_clashes(), *cls._check_model_name_db_lookup_clashes(), *cls._check_property_name_related_field_accessor_clashes(), *cls._check_single_primary_key(), ) errors.extend(clash_errors) # If there are field name clashes, hide consequent column name # clashes. if not clash_errors: errors.extend(cls._check_column_name_clashes()) errors += [ *cls._check_index_together(), *cls._check_unique_together(), *cls._check_indexes(), *cls._check_ordering(), *cls._check_constraints(), ] return errors @classmethod def _check_swappable(cls): """Check if the swapped model exists.""" errors = [] if cls._meta.swapped: try: apps.get_model(cls._meta.swapped) except ValueError: errors.append( checks.Error( "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, id='models.E001', ) ) except LookupError: app_label, model_name = cls._meta.swapped.split('.') errors.append( checks.Error( "'%s' references '%s.%s', which has not been " "installed, or is abstract." % ( cls._meta.swappable, app_label, model_name ), id='models.E002', ) ) return errors @classmethod def _check_model(cls): errors = [] if cls._meta.proxy: if cls._meta.local_fields or cls._meta.local_many_to_many: errors.append( checks.Error( "Proxy model '%s' contains model fields." % cls.__name__, id='models.E017', ) ) return errors @classmethod def _check_managers(cls, **kwargs): """Perform all manager checks.""" errors = [] for manager in cls._meta.managers: errors.extend(manager.check(**kwargs)) return errors @classmethod def _check_fields(cls, **kwargs): """Perform all field checks.""" errors = [] for field in cls._meta.local_fields: errors.extend(field.check(**kwargs)) for field in cls._meta.local_many_to_many: errors.extend(field.check(from_model=cls, **kwargs)) return errors @classmethod def _check_m2m_through_same_relationship(cls): """ Check if no relationship model is used by more than one m2m field. """ errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = (f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id='models.E003', ) ) else: seen_intermediary_signatures.append(signature) return errors @classmethod def _check_id_field(cls): """Check if `id` field is a primary key.""" fields = [f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk] # fields is empty or consists of the invalid "id" field if fields and not fields[0].primary_key and cls._meta.pk.name == 'id': return [ checks.Error( "'id' can only be used as a field name if the field also " "sets 'primary_key=True'.", obj=cls, id='models.E004', ) ] else: return [] @classmethod def _check_field_name_clashes(cls): """Forbid field shadowing in multi-table inheritance.""" errors = [] used_fields = {} # name or attname -> field # Check that multi-inheritance doesn't cause field name shadowing. for parent in cls._meta.get_parent_list(): for f in parent._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None if clash: errors.append( checks.Error( "The field '%s' from parent model " "'%s' clashes with the field '%s' " "from parent model '%s'." % ( clash.name, clash.model._meta, f.name, f.model._meta ), obj=cls, id='models.E005', ) ) used_fields[f.name] = f used_fields[f.attname] = f # Check that fields defined in the model don't clash with fields from # parents, including auto-generated fields like multi-table inheritance # child accessors. for parent in cls._meta.get_parent_list(): for f in parent._meta.get_fields(): if f not in used_fields: used_fields[f.name] = f for f in cls._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None # Note that we may detect clash between user-defined non-unique # field "id" and automatically added unique field "id", both # defined at the same model. This special case is considered in # _check_id_field and here we ignore it. id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls if clash and not id_conflict: errors.append( checks.Error( "The field '%s' clashes with the field '%s' " "from model '%s'." % ( f.name, clash.name, clash.model._meta ), obj=f, id='models.E006', ) ) used_fields[f.name] = f used_fields[f.attname] = f return errors @classmethod def _check_column_name_clashes(cls): # Store a list of column names which have already been used by other fields. used_column_names = [] errors = [] for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Ensure the column name is not already in use. if column_name and column_name in used_column_names: errors.append( checks.Error( "Field '%s' has column name '%s' that is used by " "another field." % (f.name, column_name), hint="Specify a 'db_column' for the field.", obj=cls, id='models.E007' ) ) else: used_column_names.append(column_name) return errors @classmethod def _check_model_name_db_lookup_clashes(cls): errors = [] model_name = cls.__name__ if model_name.startswith('_') or model_name.endswith('_'): errors.append( checks.Error( "The model name '%s' cannot start or end with an underscore " "as it collides with the query lookup syntax." % model_name, obj=cls, id='models.E023' ) ) elif LOOKUP_SEP in model_name: errors.append( checks.Error( "The model name '%s' cannot contain double underscores as " "it collides with the query lookup syntax." % model_name, obj=cls, id='models.E024' ) ) return errors @classmethod def _check_property_name_related_field_accessor_clashes(cls): errors = [] property_names = cls._meta._property_names related_field_accessors = ( f.get_attname() for f in cls._meta._get_fields(reverse=False) if f.is_relation and f.related_model is not None ) for accessor in related_field_accessors: if accessor in property_names: errors.append( checks.Error( "The property '%s' clashes with a related field " "accessor." % accessor, obj=cls, id='models.E025', ) ) return errors @classmethod def _check_single_primary_key(cls): errors = [] if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: errors.append( checks.Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=cls, id='models.E026', ) ) return errors @classmethod def _check_index_together(cls): """Check the value of "index_together" option.""" if not isinstance(cls._meta.index_together, (tuple, list)): return [ checks.Error( "'index_together' must be a list or tuple.", obj=cls, id='models.E008', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together): return [ checks.Error( "All 'index_together' elements must be lists or tuples.", obj=cls, id='models.E009', ) ] else: errors = [] for fields in cls._meta.index_together: errors.extend(cls._check_local_fields(fields, "index_together")) return errors @classmethod def _check_unique_together(cls): """Check the value of "unique_together" option.""" if not isinstance(cls._meta.unique_together, (tuple, list)): return [ checks.Error( "'unique_together' must be a list or tuple.", obj=cls, id='models.E010', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together): return [ checks.Error( "All 'unique_together' elements must be lists or tuples.", obj=cls, id='models.E011', ) ] else: errors = [] for fields in cls._meta.unique_together: errors.extend(cls._check_local_fields(fields, "unique_together")) return errors @classmethod def _check_indexes(cls): """Check the fields and names of indexes.""" errors = [] for index in cls._meta.indexes: # Index name can't start with an underscore or a number, restricted # for cross-database compatibility with Oracle. if index.name[0] == '_' or index.name[0].isdigit(): errors.append( checks.Error( "The index name '%s' cannot start with an underscore " "or a number." % index.name, obj=cls, id='models.E033', ), ) if len(index.name) > index.max_name_length: errors.append( checks.Error( "The index name '%s' cannot be longer than %d " "characters." % (index.name, index.max_name_length), obj=cls, id='models.E034', ), ) fields = [field for index in cls._meta.indexes for field, _ in index.fields_orders] errors.extend(cls._check_local_fields(fields, 'indexes')) return errors @classmethod def _check_local_fields(cls, fields, option): from django.db import models # In order to avoid hitting the relation tree prematurely, we use our # own fields_map instead of using get_field() forward_fields_map = {} for field in cls._meta._get_fields(reverse=False): forward_fields_map[field.name] = field if hasattr(field, 'attname'): forward_fields_map[field.attname] = field errors = [] for field_name in fields: try: field = forward_fields_map[field_name] except KeyError: errors.append( checks.Error( "'%s' refers to the nonexistent field '%s'." % ( option, field_name, ), obj=cls, id='models.E012', ) ) else: if isinstance(field.remote_field, models.ManyToManyRel): errors.append( checks.Error( "'%s' refers to a ManyToManyField '%s', but " "ManyToManyFields are not permitted in '%s'." % ( option, field_name, option, ), obj=cls, id='models.E013', ) ) elif field not in cls._meta.local_fields: errors.append( checks.Error( "'%s' refers to field '%s' which is not local to model '%s'." % (option, field_name, cls._meta.object_name), hint="This issue may be caused by multi-table inheritance.", obj=cls, id='models.E016', ) ) return errors @classmethod def _check_ordering(cls): """ Check "ordering" option -- is it a list of strings and do all fields exist? """ if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id='models.E021', ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by only one field).", obj=cls, id='models.E014', ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != '?') # Convert "-field" to "field". fields = ((f[1:] if f.startswith('-') else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: # pk is an alias that won't be found by opts.get_field. if part == 'pk': fld = _cls._meta.pk else: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.get_path_info()[-1].to_opts.model else: _cls = None except (FieldDoesNotExist, AttributeError): if fld is None or fld.get_transform(part) is None: errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id='models.E015', ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != 'pk'} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set(chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) )) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id='models.E015', ) ) return errors @classmethod def _check_long_column_names(cls): """ Check that any auto-generated column names are shorter than the limits for each database in which the model will be created. """ errors = [] allowed_len = None db_alias = None # Find the minimum max allowed length among all specified db_aliases. for db in settings.DATABASES: # skip databases where the model won't be created if not router.allow_migrate_model(db, cls): continue connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is None or connection.features.truncates_names: continue else: if allowed_len is None: allowed_len = max_name_length db_alias = db elif max_name_length < allowed_len: allowed_len = max_name_length db_alias = db if allowed_len is None: return errors for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Check if auto-generated name for the field is too long # for the database. if f.db_column is None and column_name is not None and len(column_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (column_name, allowed_len, db_alias), hint="Set the column name manually using 'db_column'.", obj=cls, id='models.E018', ) ) for f in cls._meta.local_many_to_many: # Skip nonexistent models. if isinstance(f.remote_field.through, str): continue # Check if auto-generated name for the M2M field is too long # for the database. for m2m in f.remote_field.through._meta.local_fields: _, rel_name = m2m.get_attname_column() if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for M2M field ' '"%s". Maximum length is "%s" for database "%s".' % (rel_name, allowed_len, db_alias), hint=( "Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'." ), obj=cls, id='models.E019', ) ) return errors @classmethod def _check_constraints(cls): errors = [] for db in settings.DATABASES: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if ( connection.features.supports_table_check_constraints or 'supports_table_check_constraints' in cls._meta.required_db_features ): continue if any(isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints): errors.append( checks.Warning( '%s does not support check constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W027', ) ) return errors ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(self, ordered_obj, id_list, using=None): if using is None: using = DEFAULT_DB_ALIAS order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update([ ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list) ], ['_order']) def method_get_order(self, ordered_obj): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) pk_name = ordered_obj._meta.pk.name return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) def make_foreign_order_accessors(model, related_model): setattr( related_model, 'get_%s_order' % model.__name__.lower(), partialmethod(method_get_order, model) ) setattr( related_model, 'set_%s_order' % model.__name__.lower(), partialmethod(method_set_order, model) ) ######## # MISC # ######## def model_unpickle(model_id): """Used to unpickle Model subclasses with deferred fields.""" if isinstance(model_id, tuple): model = apps.get_model(*model_id) else: # Backwards compat - the model was cached directly in earlier versions. model = model_id return model.__new__(model) model_unpickle.__safe_for_unpickle__ = True
ba6d73c9db82894254d367316706c6ef71902565ee3c3f91dda374eb06111e38
import collections.abc import copy import datetime import decimal import operator import uuid import warnings from base64 import b64decode, b64encode from functools import partialmethod, total_ordering from django import forms from django.apps import apps from django.conf import settings from django.core import checks, exceptions, validators from django.db import connection, connections, router from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin from django.utils import timezone from django.utils.datastructures import DictWrapper from django.utils.dateparse import ( parse_date, parse_datetime, parse_duration, parse_time, ) from django.utils.duration import duration_microseconds, duration_string from django.utils.functional import Promise, cached_property from django.utils.ipv6 import clean_ipv6_address from django.utils.itercompat import is_iterable from django.utils.text import capfirst from django.utils.translation import gettext_lazy as _ __all__ = [ 'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField', 'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField', 'DateTimeField', 'DecimalField', 'DurationField', 'EmailField', 'Empty', 'Field', 'FilePathField', 'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED', 'NullBooleanField', 'PositiveBigIntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SlugField', 'SmallAutoField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField', 'UUIDField', ] class Empty: pass class NOT_PROVIDED: pass # The values to use for "blank" in SelectFields. Will be appended to the start # of most "choices" lists. BLANK_CHOICE_DASH = [("", "---------")] def _load_field(app_label, model_name, field_name): return apps.get_model(app_label, model_name)._meta.get_field(field_name) # A guide to Field parameters: # # * name: The name of the field specified in the model. # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * db_column: The db_column specified in the model (or None). # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. # # Code that introspects values, or does other dynamic things, should use # attname. For example, this gets the primary key value of object "obj": # # getattr(obj, opts.pk.attname) def _empty(of_cls): new = Empty() new.__class__ = of_cls return new def return_None(): return None @total_ordering class Field(RegisterLookupMixin): """Base class for all field types""" # Designates whether empty strings fundamentally are allowed at the # database level. empty_strings_allowed = True empty_values = list(validators.EMPTY_VALUES) # These track each time a Field instance is created. Used to retain order. # The auto_creation_counter is used for fields that Django implicitly # creates, creation_counter is used for all user-specified fields. creation_counter = 0 auto_creation_counter = -1 default_validators = [] # Default set of validators default_error_messages = { 'invalid_choice': _('Value %(value)r is not a valid choice.'), 'null': _('This field cannot be null.'), 'blank': _('This field cannot be blank.'), 'unique': _('%(model_name)s with this %(field_label)s ' 'already exists.'), # Translators: The 'lookup_type' is one of 'date', 'year' or 'month'. # Eg: "Title must be unique for pub_date year" 'unique_for_date': _("%(field_label)s must be unique for " "%(date_field_label)s %(lookup_type)s."), } system_check_deprecated_details = None system_check_removed_details = None # Field flags hidden = False many_to_many = None many_to_one = None one_to_many = None one_to_one = None related_model = None descriptor_class = DeferredAttribute # Generic field type description, usually overridden by subclasses def _description(self): return _('Field of type: %(field_type)s') % { 'field_type': self.__class__.__name__ } description = property(_description) def __init__(self, verbose_name=None, name=None, primary_key=False, max_length=None, unique=False, blank=False, null=False, db_index=False, rel=None, default=NOT_PROVIDED, editable=True, serialize=True, unique_for_date=None, unique_for_month=None, unique_for_year=None, choices=None, help_text='', db_column=None, db_tablespace=None, auto_created=False, validators=(), error_messages=None): self.name = name self.verbose_name = verbose_name # May be set by set_attributes_from_name self._verbose_name = verbose_name # Store original for deconstruction self.primary_key = primary_key self.max_length, self._unique = max_length, unique self.blank, self.null = blank, null self.remote_field = rel self.is_relation = self.remote_field is not None self.default = default self.editable = editable self.serialize = serialize self.unique_for_date = unique_for_date self.unique_for_month = unique_for_month self.unique_for_year = unique_for_year if isinstance(choices, collections.abc.Iterator): choices = list(choices) self.choices = choices self.help_text = help_text self.db_index = db_index self.db_column = db_column self._db_tablespace = db_tablespace self.auto_created = auto_created # Adjust the appropriate creation counter, and save our local copy. if auto_created: self.creation_counter = Field.auto_creation_counter Field.auto_creation_counter -= 1 else: self.creation_counter = Field.creation_counter Field.creation_counter += 1 self._validators = list(validators) # Store for deconstruction later messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, 'default_error_messages', {})) messages.update(error_messages or {}) self._error_messages = error_messages # Store for deconstruction later self.error_messages = messages def __str__(self): """ Return "app_label.model_label.field_name" for fields attached to models. """ if not hasattr(self, 'model'): return super().__str__() model = self.model app = model._meta.app_label return '%s.%s.%s' % (app, model._meta.object_name, self.name) def __repr__(self): """Display the module, class, and name of the field.""" path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__) name = getattr(self, 'name', None) if name is not None: return '<%s: %s>' % (path, name) return '<%s>' % path def check(self, **kwargs): return [ *self._check_field_name(), *self._check_choices(), *self._check_db_index(), *self._check_null_allowed_for_primary_keys(), *self._check_backend_specific_checks(**kwargs), *self._check_validators(), *self._check_deprecation_details(), ] def _check_field_name(self): """ Check if field name is valid, i.e. 1) does not end with an underscore, 2) does not contain "__" and 3) is not "pk". """ if self.name.endswith('_'): return [ checks.Error( 'Field names must not end with an underscore.', obj=self, id='fields.E001', ) ] elif LOOKUP_SEP in self.name: return [ checks.Error( 'Field names must not contain "%s".' % (LOOKUP_SEP,), obj=self, id='fields.E002', ) ] elif self.name == 'pk': return [ checks.Error( "'pk' is a reserved word that cannot be used as a field name.", obj=self, id='fields.E003', ) ] else: return [] @classmethod def _choices_is_value(cls, value): return isinstance(value, (str, Promise)) or not is_iterable(value) def _check_choices(self): if not self.choices: return [] if not is_iterable(self.choices) or isinstance(self.choices, str): return [ checks.Error( "'choices' must be an iterable (e.g., a list or tuple).", obj=self, id='fields.E004', ) ] choice_max_length = 0 # Expect [group_name, [value, display]] for choices_group in self.choices: try: group_name, group_choices = choices_group except (TypeError, ValueError): # Containing non-pairs break try: if not all( self._choices_is_value(value) and self._choices_is_value(human_name) for value, human_name in group_choices ): break if self.max_length is not None and group_choices: choice_max_length = max([ choice_max_length, *(len(value) for value, _ in group_choices if isinstance(value, str)), ]) except (TypeError, ValueError): # No groups, choices in the form [value, display] value, human_name = group_name, group_choices if not self._choices_is_value(value) or not self._choices_is_value(human_name): break if self.max_length is not None and isinstance(value, str): choice_max_length = max(choice_max_length, len(value)) # Special case: choices=['ab'] if isinstance(choices_group, str): break else: if self.max_length is not None and choice_max_length > self.max_length: return [ checks.Error( "'max_length' is too small to fit the longest value " "in 'choices' (%d characters)." % choice_max_length, obj=self, id='fields.E009', ), ] return [] return [ checks.Error( "'choices' must be an iterable containing " "(actual value, human readable name) tuples.", obj=self, id='fields.E005', ) ] def _check_db_index(self): if self.db_index not in (None, True, False): return [ checks.Error( "'db_index' must be None, True or False.", obj=self, id='fields.E006', ) ] else: return [] def _check_null_allowed_for_primary_keys(self): if (self.primary_key and self.null and not connection.features.interprets_empty_strings_as_nulls): # We cannot reliably check this for backends like Oracle which # consider NULL and '' to be equal (and thus set up # character-based fields a little differently). return [ checks.Error( 'Primary keys must not have null=True.', hint=('Set null=False on the field, or ' 'remove primary_key=True argument.'), obj=self, id='fields.E007', ) ] else: return [] def _check_backend_specific_checks(self, **kwargs): app_label = self.model._meta.app_label for db in connections: if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name): return connections[db].validation.check_field(self, **kwargs) return [] def _check_validators(self): errors = [] for i, validator in enumerate(self.validators): if not callable(validator): errors.append( checks.Error( "All 'validators' must be callable.", hint=( "validators[{i}] ({repr}) isn't a function or " "instance of a validator class.".format( i=i, repr=repr(validator), ) ), obj=self, id='fields.E008', ) ) return errors def _check_deprecation_details(self): if self.system_check_removed_details is not None: return [ checks.Error( self.system_check_removed_details.get( 'msg', '%s has been removed except for support in historical ' 'migrations.' % self.__class__.__name__ ), hint=self.system_check_removed_details.get('hint'), obj=self, id=self.system_check_removed_details.get('id', 'fields.EXXX'), ) ] elif self.system_check_deprecated_details is not None: return [ checks.Warning( self.system_check_deprecated_details.get( 'msg', '%s has been deprecated.' % self.__class__.__name__ ), hint=self.system_check_deprecated_details.get('hint'), obj=self, id=self.system_check_deprecated_details.get('id', 'fields.WXXX'), ) ] return [] def get_col(self, alias, output_field=None): if output_field is None: output_field = self if alias != self.model._meta.db_table or output_field != self: from django.db.models.expressions import Col return Col(alias, self, output_field) else: return self.cached_col @cached_property def cached_col(self): from django.db.models.expressions import Col return Col(self.model._meta.db_table, self) def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, GIS columns need to be selected as AsText(table.col) on MySQL as the table.col data can't be used by Django. """ return sql, params def deconstruct(self): """ Return enough information to recreate the field as a 4-tuple: * The name of the field on the model, if contribute_to_class() has been run. * The import path of the field, including the class:e.g. django.db.models.IntegerField This should be the most portable version, so less specific may be better. * A list of positional arguments. * A dict of keyword arguments. Note that the positional or keyword arguments must contain values of the following types (including inner values of collection types): * None, bool, str, int, float, complex, set, frozenset, list, tuple, dict * UUID * datetime.datetime (naive), datetime.date * top-level classes, top-level functions - will be referenced by their full import path * Storage instances - these have their own deconstruct() method This is because the values here must be serialized into a text format (possibly new Python code, possibly JSON) and these are the only types with encoding handlers defined. There's no need to return the exact way the field was instantiated this time, just ensure that the resulting field is the same - prefer keyword arguments over positional ones, and omit parameters with their default values. """ # Short-form way of fetching all the default parameters keywords = {} possibles = { "verbose_name": None, "primary_key": False, "max_length": None, "unique": False, "blank": False, "null": False, "db_index": False, "default": NOT_PROVIDED, "editable": True, "serialize": True, "unique_for_date": None, "unique_for_month": None, "unique_for_year": None, "choices": None, "help_text": '', "db_column": None, "db_tablespace": None, "auto_created": False, "validators": [], "error_messages": None, } attr_overrides = { "unique": "_unique", "error_messages": "_error_messages", "validators": "_validators", "verbose_name": "_verbose_name", "db_tablespace": "_db_tablespace", } equals_comparison = {"choices", "validators"} for name, default in possibles.items(): value = getattr(self, attr_overrides.get(name, name)) # Unroll anything iterable for choices into a concrete list if name == "choices" and isinstance(value, collections.abc.Iterable): value = list(value) # Do correct kind of comparison if name in equals_comparison: if value != default: keywords[name] = value else: if value is not default: keywords[name] = value # Work out path - we shorten it for known Django core fields path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) if path.startswith("django.db.models.fields.related"): path = path.replace("django.db.models.fields.related", "django.db.models") elif path.startswith("django.db.models.fields.files"): path = path.replace("django.db.models.fields.files", "django.db.models") elif path.startswith("django.db.models.fields.proxy"): path = path.replace("django.db.models.fields.proxy", "django.db.models") elif path.startswith("django.db.models.fields"): path = path.replace("django.db.models.fields", "django.db.models") # Return basic info - other fields should override this. return (self.name, path, [], keywords) def clone(self): """ Uses deconstruct() to clone a new copy of this Field. Will not preserve any class attachments/attribute names. """ name, path, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) def __eq__(self, other): # Needed for @total_ordering if isinstance(other, Field): return self.creation_counter == other.creation_counter return NotImplemented def __lt__(self, other): # This is needed because bisect does not take a comparison function. if isinstance(other, Field): return self.creation_counter < other.creation_counter return NotImplemented def __hash__(self): return hash(self.creation_counter) def __deepcopy__(self, memodict): # We don't have to deepcopy very much here, since most things are not # intended to be altered after initial creation. obj = copy.copy(self) if self.remote_field: obj.remote_field = copy.copy(self.remote_field) if hasattr(self.remote_field, 'field') and self.remote_field.field is self: obj.remote_field.field = obj memodict[id(self)] = obj return obj def __copy__(self): # We need to avoid hitting __reduce__, so define this # slightly weird copy construct. obj = Empty() obj.__class__ = self.__class__ obj.__dict__ = self.__dict__.copy() return obj def __reduce__(self): """ Pickling should return the model._meta.fields instance of the field, not a new copy of that field. So, use the app registry to load the model and then the field back. """ if not hasattr(self, 'model'): # Fields are sometimes used without attaching them to models (for # example in aggregation). In this case give back a plain field # instance. The code below will create a new empty instance of # class self.__class__, then update its dict with self.__dict__ # values - so, this is very close to normal pickle. state = self.__dict__.copy() # The _get_default cached_property can't be pickled due to lambda # usage. state.pop('_get_default', None) return _empty, (self.__class__,), state return _load_field, (self.model._meta.app_label, self.model._meta.object_name, self.name) def get_pk_value_on_save(self, instance): """ Hook to generate new PK values on save. This method is called when saving instances with no primary key value set. If this method returns something else than None, then the returned value is used when saving the new instance. """ if self.default: return self.get_default() return None def to_python(self, value): """ Convert the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Return the converted value. Subclasses should override this. """ return value @cached_property def validators(self): """ Some validators can't be created at field initialization time. This method provides a way to delay their creation until required. """ return [*self.default_validators, *self._validators] def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except exceptions.ValidationError as e: if hasattr(e, 'code') and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise exceptions.ValidationError(errors) def validate(self, value, model_instance): """ Validate value and raise ValidationError if necessary. Subclasses should override this to provide validation logic. """ if not self.editable: # Skip validation for non-editable fields. return if self.choices is not None and value not in self.empty_values: for option_key, option_value in self.choices: if isinstance(option_value, (list, tuple)): # This is an optgroup, so look inside the group for # options. for optgroup_key, optgroup_value in option_value: if value == optgroup_key: return elif value == option_key: return raise exceptions.ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) if value is None and not self.null: raise exceptions.ValidationError(self.error_messages['null'], code='null') if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages['blank'], code='blank') def clean(self, value, model_instance): """ Convert the value's type and run validation. Validation errors from to_python() and validate() are propagated. Return the correct value if no error is raised. """ value = self.to_python(value) self.validate(value, model_instance) self.run_validators(value) return value def db_type_parameters(self, connection): return DictWrapper(self.__dict__, connection.ops.quote_name, 'qn_') def db_check(self, connection): """ Return the database column check constraint for this field, for the provided connection. Works the same way as db_type() for the case that get_internal_type() does not map to a preexisting model field. """ data = self.db_type_parameters(connection) try: return connection.data_type_check_constraints[self.get_internal_type()] % data except KeyError: return None def db_type(self, connection): """ Return the database column data type for this field, for the provided connection. """ # The default implementation of this method looks at the # backend-specific data_types dictionary, looking up the field by its # "internal type". # # A Field class can implement the get_internal_type() method to specify # which *preexisting* Django Field class it's most similar to -- i.e., # a custom field might be represented by a TEXT column type, which is # the same as the TextField Django field type, which means the custom # field's get_internal_type() returns 'TextField'. # # But the limitation of the get_internal_type() / data_types approach # is that it cannot handle database column types that aren't already # mapped to one of the built-in Django field types. In this case, you # can implement db_type() instead of get_internal_type() to specify # exactly which wacky database column type you want to use. data = self.db_type_parameters(connection) try: return connection.data_types[self.get_internal_type()] % data except KeyError: return None def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. For example, this method is called by ForeignKey and OneToOneField to determine its data type. """ return self.db_type(connection) def cast_db_type(self, connection): """Return the data type to use in the Cast() function.""" db_type = connection.ops.cast_data_types.get(self.get_internal_type()) if db_type: return db_type % self.db_type_parameters(connection) return self.db_type(connection) def db_parameters(self, connection): """ Extension of db_type(), providing a range of different return values (type, checks). This will look at db_type(), allowing custom model fields to override it. """ type_string = self.db_type(connection) check_string = self.db_check(connection) return { "type": type_string, "check": check_string, } def db_type_suffix(self, connection): return connection.data_types_suffix.get(self.get_internal_type()) def get_db_converters(self, connection): if hasattr(self, 'from_db_value'): return [self.from_db_value] return [] @property def unique(self): return self._unique or self.primary_key @property def db_tablespace(self): return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE @property def db_returning(self): """ Private API intended only to be used by Django itself. Currently only the PostgreSQL backend supports returning multiple fields on a model. """ return False def set_attributes_from_name(self, name): self.name = self.name or name self.attname, self.column = self.get_attname_column() self.concrete = self.column is not None if self.verbose_name is None and self.name: self.verbose_name = self.name.replace('_', ' ') def contribute_to_class(self, cls, name, private_only=False): """ Register the field with the model class it belongs to. If private_only is True, create a separate instance of this field for every subclass of cls, even if cls is not an abstract model. """ self.set_attributes_from_name(name) self.model = cls cls._meta.add_field(self, private=private_only) if self.column: # Don't override classmethods with the descriptor. This means that # if you have a classmethod and a field with the same name, then # such fields can't be deferred (we don't have a check for this). if not getattr(cls, self.attname, None): setattr(cls, self.attname, self.descriptor_class(self)) if self.choices is not None: # Don't override a get_FOO_display() method defined explicitly on # this class, but don't check methods derived from inheritance, to # allow overriding inherited choices. For more complex inheritance # structures users should override contribute_to_class(). if 'get_%s_display' % self.name not in cls.__dict__: setattr( cls, 'get_%s_display' % self.name, partialmethod(cls._get_FIELD_display, field=self), ) def get_filter_kwargs_for_object(self, obj): """ Return a dict that when passed as kwargs to self.model.filter(), would yield all instances having the same value for this field as obj has. """ return {self.name: getattr(obj, self.attname)} def get_attname(self): return self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_internal_type(self): return self.__class__.__name__ def pre_save(self, model_instance, add): """Return field's value just before saving.""" return getattr(model_instance, self.attname) def get_prep_value(self, value): """Perform preliminary non-db specific value checks and conversions.""" if isinstance(value, Promise): value = value._proxy____cast() return value def get_db_prep_value(self, value, connection, prepared=False): """ Return field's value prepared for interacting with the database backend. Used by the default implementations of get_db_prep_save(). """ if not prepared: value = self.get_prep_value(value) return value def get_db_prep_save(self, value, connection): """Return field's value prepared for saving into a database.""" return self.get_db_prep_value(value, connection=connection, prepared=False) def has_default(self): """Return a boolean of whether this field has a default value.""" return self.default is not NOT_PROVIDED def get_default(self): """Return the default value for this field.""" return self._get_default() @cached_property def _get_default(self): if self.has_default(): if callable(self.default): return self.default return lambda: self.default if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls: return return_None return str # return empty string def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=()): """ Return choices with a default blank choices included, for use as <select> choices for this field. """ if self.choices is not None: choices = list(self.choices) if include_blank: blank_defined = any(choice in ('', None) for choice, _ in self.flatchoices) if not blank_defined: choices = blank_choice + choices return choices rel_model = self.remote_field.model limit_choices_to = limit_choices_to or self.get_limit_choices_to() choice_func = operator.attrgetter( self.remote_field.get_related_field().attname if hasattr(self.remote_field, 'get_related_field') else 'pk' ) qs = rel_model._default_manager.complex_filter(limit_choices_to) if ordering: qs = qs.order_by(*ordering) return (blank_choice if include_blank else []) + [ (choice_func(x), str(x)) for x in qs ] def value_to_string(self, obj): """ Return a string value of this field from the passed obj. This is used by the serialization framework. """ return str(self.value_from_object(obj)) def _get_flatchoices(self): """Flattened version of choices tuple.""" if self.choices is None: return [] flat = [] for choice, value in self.choices: if isinstance(value, (list, tuple)): flat.extend(value) else: flat.append((choice, value)) return flat flatchoices = property(_get_flatchoices) def save_form_data(self, instance, data): setattr(instance, self.name, data) def formfield(self, form_class=None, choices_form_class=None, **kwargs): """Return a django.forms.Field instance for this field.""" defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, } if self.has_default(): if callable(self.default): defaults['initial'] = self.default defaults['show_hidden_initial'] = True else: defaults['initial'] = self.get_default() if self.choices is not None: # Fields with choices get special treatment. include_blank = (self.blank or not (self.has_default() or 'initial' in kwargs)) defaults['choices'] = self.get_choices(include_blank=include_blank) defaults['coerce'] = self.to_python if self.null: defaults['empty_value'] = None if choices_form_class is not None: form_class = choices_form_class else: form_class = forms.TypedChoiceField # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in list(kwargs): if k not in ('coerce', 'empty_value', 'choices', 'required', 'widget', 'label', 'initial', 'help_text', 'error_messages', 'show_hidden_initial', 'disabled'): del kwargs[k] defaults.update(kwargs) if form_class is None: form_class = forms.CharField return form_class(**defaults) def value_from_object(self, obj): """Return the value of this field in the given model instance.""" return getattr(obj, self.attname) class BooleanField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be either True or False.'), 'invalid_nullable': _('“%(value)s” value must be either True, False, or None.'), } description = _("Boolean (Either True or False)") def get_internal_type(self): return "BooleanField" def to_python(self, value): if self.null and value in self.empty_values: return None if value in (True, False): # 1/0 are equal to True/False. bool() converts former to latter. return bool(value) if value in ('t', 'True', '1'): return True if value in ('f', 'False', '0'): return False raise exceptions.ValidationError( self.error_messages['invalid_nullable' if self.null else 'invalid'], code='invalid', params={'value': value}, ) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return self.to_python(value) def formfield(self, **kwargs): if self.choices is not None: include_blank = not (self.has_default() or 'initial' in kwargs) defaults = {'choices': self.get_choices(include_blank=include_blank)} else: form_class = forms.NullBooleanField if self.null else forms.BooleanField # In HTML checkboxes, 'required' means "must be checked" which is # different from the choices case ("must select some value"). # required=False allows unchecked checkboxes. defaults = {'form_class': form_class, 'required': False} return super().formfield(**{**defaults, **kwargs}) class CharField(Field): description = _("String (up to %(max_length)s)") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_attribute(**kwargs), ] def _check_max_length_attribute(self, **kwargs): if self.max_length is None: return [ checks.Error( "CharFields must define a 'max_length' attribute.", obj=self, id='fields.E120', ) ] elif (not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or self.max_length <= 0): return [ checks.Error( "'max_length' must be a positive integer.", obj=self, id='fields.E121', ) ] else: return [] def cast_db_type(self, connection): if self.max_length is None: return connection.ops.cast_char_field_without_max_length return super().cast_db_type(connection) def get_internal_type(self): return "CharField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). defaults = {'max_length': self.max_length} # TODO: Handle multiple backends with different feature flags. if self.null and not connection.features.interprets_empty_strings_as_nulls: defaults['empty_value'] = None defaults.update(kwargs) return super().formfield(**defaults) class CommaSeparatedIntegerField(CharField): default_validators = [validators.validate_comma_separated_integer_list] description = _("Comma-separated integers") system_check_removed_details = { 'msg': ( 'CommaSeparatedIntegerField is removed except for support in ' 'historical migrations.' ), 'hint': ( 'Use CharField(validators=[validate_comma_separated_integer_list]) ' 'instead.' ), 'id': 'fields.E901', } class DateTimeCheckMixin: def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_mutually_exclusive_options(), *self._check_fix_default_value(), ] def _check_mutually_exclusive_options(self): # auto_now, auto_now_add, and default are mutually exclusive # options. The use of more than one of these options together # will trigger an Error mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()] enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True) if enabled_options > 1: return [ checks.Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", obj=self, id='fields.E160', ) ] else: return [] def _check_fix_default_value(self): return [] class DateField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid date format. It must be ' 'in YYYY-MM-DD format.'), 'invalid_date': _('“%(value)s” value has the correct format (YYYY-MM-DD) ' 'but it is an invalid date.'), } description = _("Date (without time)") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): if not timezone.is_naive(value): value = timezone.make_naive(value, timezone.utc) value = value.date() elif isinstance(value, datetime.date): # Nothing to do, as dates don't have tz information pass else: # No explicit date / datetime value -- no checks necessary return [] offset = datetime.timedelta(days=1) lower = (now - offset).date() upper = (now + offset).date() if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now: kwargs['auto_now'] = True if self.auto_now_add: kwargs['auto_now_add'] = True if self.auto_now or self.auto_now_add: del kwargs['editable'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "DateField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): if settings.USE_TZ and timezone.is_aware(value): # Convert aware datetimes to the default time zone # before casting them to dates (#17742). default_timezone = timezone.get_default_timezone() value = timezone.make_naive(value, default_timezone) return value.date() if isinstance(value, datetime.date): return value try: parsed = parse_date(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.date.today() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) if not self.null: setattr( cls, 'get_next_by_%s' % self.name, partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=True) ) setattr( cls, 'get_previous_by_%s' % self.name, partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=False) ) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts dates into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DateField, **kwargs, }) class DateTimeField(DateField): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' 'YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format.'), 'invalid_date': _("“%(value)s” value has the correct format " "(YYYY-MM-DD) but it is an invalid date."), 'invalid_datetime': _('“%(value)s” value has the correct format ' '(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) ' 'but it is an invalid date/time.'), } description = _("Date (with time)") # __init__ is inherited from DateField def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc) elif isinstance(value, datetime.date): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset lower = datetime.datetime(lower.year, lower.month, lower.day) upper = now + second_offset upper = datetime.datetime(upper.year, upper.month, upper.day) value = datetime.datetime(value.year, value.month, value.day) else: # No explicit date / datetime value -- no checks necessary return [] if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def get_internal_type(self): return "DateTimeField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): return value if isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) if settings.USE_TZ: # For backwards compatibility, interpret naive datetimes in # local time. This won't work during DST change, but we can't # do much about it, so we let the exceptions percolate up the # call stack. warnings.warn("DateTimeField %s.%s received a naive datetime " "(%s) while time zone support is active." % (self.model.__name__, self.name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value try: parsed = parse_datetime(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_datetime'], code='invalid_datetime', params={'value': value}, ) try: parsed = parse_date(value) if parsed is not None: return datetime.datetime(parsed.year, parsed.month, parsed.day) except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = timezone.now() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) # contribute_to_class is inherited from DateField, it registers # get_next_by_FOO and get_prev_by_FOO def get_prep_value(self, value): value = super().get_prep_value(value) value = self.to_python(value) if value is not None and settings.USE_TZ and timezone.is_naive(value): # For backwards compatibility, interpret naive datetimes in local # time. This won't work during DST change, but we can't do much # about it, so we let the exceptions percolate up the call stack. try: name = '%s.%s' % (self.model.__name__, self.name) except AttributeError: name = '(unbound)' warnings.warn("DateTimeField %s received a naive datetime (%s)" " while time zone support is active." % (name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value def get_db_prep_value(self, value, connection, prepared=False): # Casts datetimes into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datetimefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DateTimeField, **kwargs, }) class DecimalField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be a decimal number.'), } description = _("Decimal number") def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs): self.max_digits, self.decimal_places = max_digits, decimal_places super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super().check(**kwargs) digits_errors = [ *self._check_decimal_places(), *self._check_max_digits(), ] if not digits_errors: errors.extend(self._check_decimal_places_and_max_digits(**kwargs)) else: errors.extend(digits_errors) return errors def _check_decimal_places(self): try: decimal_places = int(self.decimal_places) if decimal_places < 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'decimal_places' attribute.", obj=self, id='fields.E130', ) ] except ValueError: return [ checks.Error( "'decimal_places' must be a non-negative integer.", obj=self, id='fields.E131', ) ] else: return [] def _check_max_digits(self): try: max_digits = int(self.max_digits) if max_digits <= 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'max_digits' attribute.", obj=self, id='fields.E132', ) ] except ValueError: return [ checks.Error( "'max_digits' must be a positive integer.", obj=self, id='fields.E133', ) ] else: return [] def _check_decimal_places_and_max_digits(self, **kwargs): if int(self.decimal_places) > int(self.max_digits): return [ checks.Error( "'max_digits' must be greater or equal to 'decimal_places'.", obj=self, id='fields.E134', ) ] return [] @cached_property def validators(self): return super().validators + [ validators.DecimalValidator(self.max_digits, self.decimal_places) ] @cached_property def context(self): return decimal.Context(prec=self.max_digits) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.max_digits is not None: kwargs['max_digits'] = self.max_digits if self.decimal_places is not None: kwargs['decimal_places'] = self.decimal_places return name, path, args, kwargs def get_internal_type(self): return "DecimalField" def to_python(self, value): if value is None: return value if isinstance(value, float): return self.context.create_decimal_from_float(value) try: return decimal.Decimal(value) except decimal.InvalidOperation: raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_db_prep_save(self, value, connection): return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): return super().formfield(**{ 'max_digits': self.max_digits, 'decimal_places': self.decimal_places, 'form_class': forms.DecimalField, **kwargs, }) class DurationField(Field): """ Store timedelta objects. Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint of microseconds on other databases. """ empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' '[DD] [[HH:]MM:]ss[.uuuuuu] format.') } description = _("Duration") def get_internal_type(self): return "DurationField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.timedelta): return value try: parsed = parse_duration(value) except ValueError: pass else: if parsed is not None: return parsed raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_db_prep_value(self, value, connection, prepared=False): if connection.features.has_native_duration_field: return value if value is None: return None return duration_microseconds(value) def get_db_converters(self, connection): converters = [] if not connection.features.has_native_duration_field: converters.append(connection.ops.convert_durationfield_value) return converters + super().get_db_converters(connection) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else duration_string(val) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DurationField, **kwargs, }) class EmailField(CharField): default_validators = [validators.validate_email] description = _("Email address") def __init__(self, *args, **kwargs): # max_length=254 to be compliant with RFCs 3696 and 5321 kwargs.setdefault('max_length', 254) super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() # We do not exclude max_length if it matches default as we want to change # the default in future. return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause email validation to be performed # twice. return super().formfield(**{ 'form_class': forms.EmailField, **kwargs, }) class FilePathField(Field): description = _("File path") def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders kwargs.setdefault('max_length', 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_allowing_files_or_folders(**kwargs), ] def _check_allowing_files_or_folders(self, **kwargs): if not self.allow_files and not self.allow_folders: return [ checks.Error( "FilePathFields must have either 'allow_files' or 'allow_folders' set to True.", obj=self, id='fields.E140', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.path != '': kwargs['path'] = self.path if self.match is not None: kwargs['match'] = self.match if self.recursive is not False: kwargs['recursive'] = self.recursive if self.allow_files is not True: kwargs['allow_files'] = self.allow_files if self.allow_folders is not False: kwargs['allow_folders'] = self.allow_folders if kwargs.get("max_length") == 100: del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def formfield(self, **kwargs): return super().formfield(**{ 'path': self.path() if callable(self.path) else self.path, 'match': self.match, 'recursive': self.recursive, 'form_class': forms.FilePathField, 'allow_files': self.allow_files, 'allow_folders': self.allow_folders, **kwargs, }) def get_internal_type(self): return "FilePathField" class FloatField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be a float.'), } description = _("Floating point number") def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return float(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "FloatField" def to_python(self, value): if value is None: return value try: return float(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.FloatField, **kwargs, }) class IntegerField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be an integer.'), } description = _("Integer") def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_warning(), ] def _check_max_length_warning(self): if self.max_length is not None: return [ checks.Warning( "'max_length' is ignored when used with %s." % self.__class__.__name__, hint="Remove 'max_length' from field", obj=self, id='fields.W122', ) ] return [] @cached_property def validators(self): # These validators can't be added at field initialization time since # they're based on values retrieved from `connection`. validators_ = super().validators internal_type = self.get_internal_type() min_value, max_value = connection.ops.integer_field_range(internal_type) if min_value is not None and not any( ( isinstance(validator, validators.MinValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) >= min_value ) for validator in validators_ ): validators_.append(validators.MinValueValidator(min_value)) if max_value is not None and not any( ( isinstance(validator, validators.MaxValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) <= max_value ) for validator in validators_ ): validators_.append(validators.MaxValueValidator(max_value)) return validators_ def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return int(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "IntegerField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.IntegerField, **kwargs, }) class BigIntegerField(IntegerField): description = _("Big (8 byte) integer") MAX_BIGINT = 9223372036854775807 def get_internal_type(self): return "BigIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': -BigIntegerField.MAX_BIGINT - 1, 'max_value': BigIntegerField.MAX_BIGINT, **kwargs, }) class IPAddressField(Field): empty_strings_allowed = False description = _("IPv4 address") system_check_removed_details = { 'msg': ( 'IPAddressField has been removed except for support in ' 'historical migrations.' ), 'hint': 'Use GenericIPAddressField instead.', 'id': 'fields.E900', } def __init__(self, *args, **kwargs): kwargs['max_length'] = 15 super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['max_length'] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def get_internal_type(self): return "IPAddressField" class GenericIPAddressField(Field): empty_strings_allowed = False description = _("IP address") default_error_messages = {} def __init__(self, verbose_name=None, name=None, protocol='both', unpack_ipv4=False, *args, **kwargs): self.unpack_ipv4 = unpack_ipv4 self.protocol = protocol self.default_validators, invalid_error_message = \ validators.ip_address_validators(protocol, unpack_ipv4) self.default_error_messages['invalid'] = invalid_error_message kwargs['max_length'] = 39 super().__init__(verbose_name, name, *args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_blank_and_null_values(**kwargs), ] def _check_blank_and_null_values(self, **kwargs): if not getattr(self, 'null', False) and getattr(self, 'blank', False): return [ checks.Error( 'GenericIPAddressFields cannot have blank=True if null=False, ' 'as blank values are stored as nulls.', obj=self, id='fields.E150', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.unpack_ipv4 is not False: kwargs['unpack_ipv4'] = self.unpack_ipv4 if self.protocol != "both": kwargs['protocol'] = self.protocol if kwargs.get("max_length") == 39: del kwargs['max_length'] return name, path, args, kwargs def get_internal_type(self): return "GenericIPAddressField" def to_python(self, value): if value is None: return None if not isinstance(value, str): value = str(value) value = value.strip() if ':' in value: return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid']) return value def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_ipaddressfield_value(value) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None if value and ':' in value: try: return clean_ipv6_address(value, self.unpack_ipv4) except exceptions.ValidationError: pass return str(value) def formfield(self, **kwargs): return super().formfield(**{ 'protocol': self.protocol, 'form_class': forms.GenericIPAddressField, **kwargs, }) class NullBooleanField(BooleanField): default_error_messages = { 'invalid': _('“%(value)s” value must be either None, True or False.'), 'invalid_nullable': _('“%(value)s” value must be either None, True or False.'), } description = _("Boolean (Either True, False or None)") def __init__(self, *args, **kwargs): kwargs['null'] = True kwargs['blank'] = True super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['null'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "NullBooleanField" class PositiveIntegerRelDbTypeMixin: def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. In most cases, a foreign key pointing to a positive integer primary key will have an integer column data type but some databases (e.g. MySQL) have an unsigned integer type. In that case (related_fields_match_type=True), the primary key should return its db_type. """ if connection.features.related_fields_match_type: return self.db_type(connection) else: return IntegerField().db_type(connection=connection) class PositiveBigIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _('Positive big integer') def get_internal_type(self): return 'PositiveBigIntegerField' def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive integer") def get_internal_type(self): return "PositiveIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive small integer") def get_internal_type(self): return "PositiveSmallIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class SlugField(CharField): default_validators = [validators.validate_slug] description = _("Slug (up to %(max_length)s)") def __init__(self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs): self.allow_unicode = allow_unicode if self.allow_unicode: self.default_validators = [validators.validate_unicode_slug] super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 50: del kwargs['max_length'] if self.db_index is False: kwargs['db_index'] = False else: del kwargs['db_index'] if self.allow_unicode is not False: kwargs['allow_unicode'] = self.allow_unicode return name, path, args, kwargs def get_internal_type(self): return "SlugField" def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.SlugField, 'allow_unicode': self.allow_unicode, **kwargs, }) class SmallIntegerField(IntegerField): description = _("Small integer") def get_internal_type(self): return "SmallIntegerField" class TextField(Field): description = _("Text") def get_internal_type(self): return "TextField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). return super().formfield(**{ 'max_length': self.max_length, **({} if self.choices is not None else {'widget': forms.Textarea}), **kwargs, }) class TimeField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' 'HH:MM[:ss[.uuuuuu]] format.'), 'invalid_time': _('“%(value)s” value has the correct format ' '(HH:MM[:ss[.uuuuuu]]) but it is an invalid time.'), } description = _("Time") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] now = timezone.now() if not timezone.is_naive(now): now = timezone.make_naive(now, timezone.utc) value = self.default if isinstance(value, datetime.datetime): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc) elif isinstance(value, datetime.time): second_offset = datetime.timedelta(seconds=10) lower = now - second_offset upper = now + second_offset value = datetime.datetime.combine(now.date(), value) if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc).time() else: # No explicit time / datetime value -- no checks necessary return [] if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint='It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`', obj=self, id='fields.W161', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now is not False: kwargs["auto_now"] = self.auto_now if self.auto_now_add is not False: kwargs["auto_now_add"] = self.auto_now_add if self.auto_now or self.auto_now_add: del kwargs['blank'] del kwargs['editable'] return name, path, args, kwargs def get_internal_type(self): return "TimeField" def to_python(self, value): if value is None: return None if isinstance(value, datetime.time): return value if isinstance(value, datetime.datetime): # Not usually a good idea to pass in a datetime here (it loses # information), but this can be a side-effect of interacting with a # database backend (e.g. Oracle), so we'll be accommodating. return value.time() try: parsed = parse_time(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_time'], code='invalid_time', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.datetime.now().time() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts times into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_timefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.TimeField, **kwargs, }) class URLField(CharField): default_validators = [validators.URLValidator()] description = _("URL") def __init__(self, verbose_name=None, name=None, **kwargs): kwargs.setdefault('max_length', 200) super().__init__(verbose_name, name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 200: del kwargs['max_length'] return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause URL validation to be performed # twice. return super().formfield(**{ 'form_class': forms.URLField, **kwargs, }) class BinaryField(Field): description = _("Raw binary data") empty_values = [None, b''] def __init__(self, *args, **kwargs): kwargs.setdefault('editable', False) super().__init__(*args, **kwargs) if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [*super().check(**kwargs), *self._check_str_default_value()] def _check_str_default_value(self): if self.has_default() and isinstance(self.default, str): return [ checks.Error( "BinaryField's default cannot be a string. Use bytes " "content instead.", obj=self, id='fields.E170', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.editable: kwargs['editable'] = True else: del kwargs['editable'] return name, path, args, kwargs def get_internal_type(self): return "BinaryField" def get_placeholder(self, value, compiler, connection): return connection.ops.binary_placeholder_sql(value) def get_default(self): if self.has_default() and not callable(self.default): return self.default default = super().get_default() if default == '': return b'' return default def get_db_prep_value(self, value, connection, prepared=False): value = super().get_db_prep_value(value, connection, prepared) if value is not None: return connection.Database.Binary(value) return value def value_to_string(self, obj): """Binary data is serialized as base64""" return b64encode(self.value_from_object(obj)).decode('ascii') def to_python(self, value): # If it's a string, it should be base64-encoded data if isinstance(value, str): return memoryview(b64decode(value.encode('ascii'))) return value class UUIDField(Field): default_error_messages = { 'invalid': _('“%(value)s” is not a valid UUID.'), } description = _('Universally unique identifier') empty_strings_allowed = False def __init__(self, verbose_name=None, **kwargs): kwargs['max_length'] = 32 super().__init__(verbose_name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['max_length'] return name, path, args, kwargs def get_internal_type(self): return "UUIDField" def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): if value is None: return None if not isinstance(value, uuid.UUID): value = self.to_python(value) if connection.features.has_native_uuid_field: return value return value.hex def to_python(self, value): if value is not None and not isinstance(value, uuid.UUID): input_form = 'int' if isinstance(value, int) else 'hex' try: return uuid.UUID(**{input_form: value}) except (AttributeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) return value def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.UUIDField, **kwargs, }) class AutoFieldMixin: db_returning = True def __init__(self, *args, **kwargs): kwargs['blank'] = True super().__init__(*args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), ] def _check_primary_key(self): if not self.primary_key: return [ checks.Error( 'AutoFields must set primary_key=True.', obj=self, id='fields.E100', ), ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['blank'] kwargs['primary_key'] = True return name, path, args, kwargs def validate(self, value, model_instance): pass def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) value = connection.ops.validate_autopk_value(value) return value def contribute_to_class(self, cls, name, **kwargs): assert not cls._meta.auto_field, ( "Model %s can't have more than one auto-generated field." % cls._meta.label ) super().contribute_to_class(cls, name, **kwargs) cls._meta.auto_field = self def formfield(self, **kwargs): return None class AutoFieldMeta(type): """ Metaclass to maintain backward inheritance compatibility for AutoField. It is intended that AutoFieldMixin become public API when it is possible to create a non-integer automatically-generated field using column defaults stored in the database. In many areas Django also relies on using isinstance() to check for an automatically-generated field as a subclass of AutoField. A new flag needs to be implemented on Field to be used instead. When these issues have been addressed, this metaclass could be used to deprecate inheritance from AutoField and use of isinstance() with AutoField for detecting automatically-generated fields. """ @property def _subclasses(self): return (BigAutoField, SmallAutoField) def __instancecheck__(self, instance): return isinstance(instance, self._subclasses) or super().__instancecheck__(instance) def __subclasscheck__(self, subclass): return subclass in self._subclasses or super().__subclasscheck__(subclass) class AutoField(AutoFieldMixin, IntegerField, metaclass=AutoFieldMeta): def get_internal_type(self): return 'AutoField' def rel_db_type(self, connection): return IntegerField().db_type(connection=connection) class BigAutoField(AutoFieldMixin, BigIntegerField): def get_internal_type(self): return 'BigAutoField' def rel_db_type(self, connection): return BigIntegerField().db_type(connection=connection) class SmallAutoField(AutoFieldMixin, SmallIntegerField): def get_internal_type(self): return 'SmallAutoField' def rel_db_type(self, connection): return SmallIntegerField().db_type(connection=connection)
e6d8d7ea25a0d9dcb1d602559ee9facfdbc73c4e2e24e404ea310745ed21ff49
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import copy import difflib import functools import inspect import sys import warnings from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import ( EmptyResultSet, FieldDoesNotExist, FieldError, ) from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import BaseExpression, Col, F, OuterRef, Ref from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import ( INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE, ) from django.db.models.sql.datastructures import ( BaseTable, Empty, Join, MultiJoin, ) from django.db.models.sql.where import ( AND, OR, ExtraWhere, NothingNode, WhereNode, ) from django.utils.deprecation import RemovedInDjango40Warning from django.utils.functional import cached_property from django.utils.tree import Node __all__ = ['Query', 'RawQuery'] def get_field_names_from_opts(opts): return set(chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() )) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( 'JoinInfo', ('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function') ) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=None): self.params = params or () self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): return dict if isinstance(self.params, Mapping) else tuple def __str__(self): return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) class Query(BaseExpression): """A single SQL query.""" alias_prefix = 'T' subq_aliases = frozenset([alias_prefix]) compiler = 'SQLCompiler' def __init__(self, model, where=WhereNode, alias_cols=True): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Whether to provide alias to columns during reference resolving. self.alias_cols = alias_cols # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. # Map external tables to whether they are aliased. self.external_aliases = {} self.table_map = {} # Maps table names to list of aliases. self.default_cols = True self.default_ordering = True self.standard_ordering = True self.used_aliases = set() self.filter_is_sticky = False self.subquery = False # SQL-related attributes # Select and related select clauses are expressions to use in the # SELECT clause of the query. # The select is used for cases where we want to set up the select # clause to contain other than default fields (values(), subqueries...) # Note that annotations go to annotations dictionary. self.select = () self.where = where() self.where_class = where # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. self.group_by = None self.order_by = () self.low_mark, self.high_mark = 0, None # Used for offset/limit self.distinct = False self.distinct_fields = () self.select_for_update = False self.select_for_update_nowait = False self.select_for_update_skip_locked = False self.select_for_update_of = () self.select_related = False # Arbitrary limit for select_related to prevents infinite recursion. self.max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. self.values_select = () # SQL annotation-related attributes self.annotations = {} # Maps alias -> Annotation Expression self.annotation_select_mask = None self._annotation_select_cache = None # Set combination attributes self.combinator = None self.combinator_all = False self.combined_queries = () # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self.extra_select_mask = None self._extra_select_cache = None self.extra_tables = () self.extra_order_by = () # A tuple that is a set of model field names and either True, if these # are the fields to defer, or False if these are the only fields to # load. self.deferred_loading = (frozenset(), True) self._filtered_relations = {} self.explain_query = False self.explain_format = None self.explain_options = {} @property def output_field(self): if len(self.select) == 1: return self.select[0].field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @property def has_select_fields(self): return bool(self.select or self.annotation_select_mask or self.extra_select_mask) @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)(self, connection, using) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is None: obj.annotation_select_mask = None else: obj.annotation_select_mask = self.annotation_select_mask.copy() # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is None: obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is None: obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() if self.select_related is not False: # Use deepcopy because select_related stores fields in nested # dicts. obj.select_related = copy.deepcopy(obj.select_related) if 'subq_aliases' in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property try: del obj.base_table except AttributeError: pass return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, '_setup_query'): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def _get_col(self, target, field, alias): if not self.alias_cols: alias = None return target.get_col(alias, field) def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) else: # Reuse aliases of expressions already selected in subquery. for col_alias, selected_annotation in self.annotation_select.items(): if selected_annotation == expr: new_expr = Ref(col_alias, expr) break else: # An expression that is not selected the subquery. if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary): # Reference column or another aggregate. Select it # under a non-conflicting alias. col_cnt += 1 col_alias = '__col%d' % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_expr = Ref(col_alias, expr) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} existing_annotations = [ annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ] # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if (isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or self.distinct or self.combinator): from django.db.models.sql.subqueries import AggregateQuery outer_query = AggregateQuery(self.model) inner_query = self.clone() inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) if not self.is_sliced and not self.distinct_fields: # Queries with distinct_fields need ordering and when a limit # is applied we must take the slice from the ordered query. # Otherwise no need for ordering. inner_query.clear_ordering(True) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. has_existing_aggregate_annotations = any( annotation for annotation in existing_annotations if getattr(annotation, 'contains_aggregate', True) ) if inner_query.default_cols and has_existing_aggregate_annotations: inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) inner_query.default_cols = False relabels = {t: 'subquery' for t in inner_query.alias_map} relabels[None] = 'subquery' # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): annotation_select_mask = inner_query.annotation_select_mask if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone(relabels) del inner_query.annotations[alias] annotation_select_mask.remove(alias) # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask: # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) try: outer_query.add_subquery(inner_query, using) except EmptyResultSet: return { alias: None for alias in outer_query.annotation_select } else: outer_query = self self.select = () self.default_cols = False self.extra = {} outer_query.clear_ordering(True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using) result = compiler.execute_sql(SINGLE) if result is None: result = [None] * len(outer_query.annotation_select) converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count('*'), alias='__count', is_summary=True) number = obj.get_aggregation(using, ['__count'])['__count'] if number is None: number = 0 return number def has_filters(self): return self.where def has_results(self, using): q = self.clone() if not q.distinct: if q.group_by is True: q.add_fields((f.attname for f in self.model._meta.concrete_fields), False) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. q.set_group_by(allow_aliases=False) q.clear_select_clause() q.clear_ordering(True) q.set_limits(high=1) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() q.explain_query = True q.explain_format = format q.explain_options = options compiler = q.get_compiler(using=using) return '\n'.join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ assert self.model == rhs.model, \ "Cannot combine queries on two different base models." assert not self.is_sliced, \ "Cannot combine queries once a slice has been taken." assert self.distinct == rhs.distinct, \ "Cannot combine a unique query with a non-unique query." assert self.distinct_fields == rhs.distinct_fields, \ "Cannot combine queries with different distinct fields." # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = (connector == AND) # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) # Base table must be present in the query - this is the same # table on both sides. self.get_initial_alias() joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def deferred_to_data(self, target, callback): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. The "target" parameter is the instance that is populated (in place). The "callback" is a function that is called whenever a (model, field) pair need to be added to "target". It accepts three parameters: "target", and the model and list of fields being added for that model. """ field_names, defer = self.deferred_loading if not field_names: return orig_opts = self.get_meta() seen = {} must_include = {orig_opts.concrete_model: {orig_opts.pk}} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model._meta.concrete_model opts = orig_opts for name in parts[:-1]: old_model = cur_model if name in self._filtered_relations: name = self._filtered_relations[name].relation_name source = opts.get_field(name) if is_reverse_o2o(source): cur_model = source.related_model else: cur_model = source.remote_field.model opts = cur_model._meta # Even if we're "just passing through" this model, we must add # both the current model's pk and the related reference field # (if it's not a reverse relation) to the things we select. if not is_reverse_o2o(source): must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) field = opts.get_field(parts[-1]) is_reverse_object = field.auto_created and not field.concrete model = field.related_model if is_reverse_object else field.model model = model._meta.concrete_model if model == opts.model: model = cur_model if not is_reverse_o2o(field): add_to_dict(seen, model, field) if defer: # We need to load all fields for each model, except those that # appear in "seen" (for all models that appear in "seen"). The only # slight complexity here is handling fields that exist on parent # models. workset = {} for model, values in seen.items(): for field in model._meta.local_fields: if field not in values: m = field.model._meta.concrete_model add_to_dict(workset, m, field) for model, values in must_include.items(): # If we haven't included a model in workset, we don't add the # corresponding must_include fields for that model, since an # empty set means "include all fields". That's why there's no # "else" branch here. if model in workset: workset[model].update(values) for model, values in workset.items(): callback(target, model, values) else: for model, values in must_include.items(): if model in seen: seen[model].update(values) else: # As we've passed through this model, but not explicitly # included any fields, we have to make sure it's mentioned # so that only the "must include" fields are pulled in. seen[model] = values # Now ensure that every model in the inheritance chain is mentioned # in the parent list. Again, it must be mentioned to ensure that # only "must include" fields are pulled in. for model in orig_opts.get_parent_list(): seen.setdefault(model, set()) for model, values in seen.items(): callback(target, model, values) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = filtered_relation.alias if filtered_relation is not None else table_name self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER already_louter = self.alias_map[alias].join_type == LOUTER if ((self.alias_map[alias].nullable or parent_louter) and not already_louter): self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by]) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = { # Table is aliased or it's being changed and thus is aliased. change_map.get(alias, alias): (aliased or alias in change_map) for alias, aliased in self.external_aliases.items() } def bump_prefix(self, outer_query): """ Change the alias prefix to the next letter in the alphabet in a way that the outer query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix):] if prefix else alphabet for s in product(seq, repeat=n): yield ''.join(s) prefix = None if self.alias_prefix != outer_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( 'Maximum recursion depth exceeded: too many subqueries.' ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases) self.change_aliases({ alias: '%s%d' % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) }) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) else: alias = self.join(BaseTable(self.get_meta().db_table, None)) return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a sql.datastructures.BaseTable or Join. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join, with_filtered_relation=False) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def add_annotation(self, annotation, alias, is_summary=False): """Add a single annotation expression to the Query.""" annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None, summarize=is_summary) self.append_annotation_mask([alias]) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True # It's safe to drop ordering if the queryset isn't using slicing, # distinct(*fields) or select_for_update(). if (self.low_mark == 0 and self.high_mark is None and not self.distinct_fields and not self.select_for_update): clone.clear_ordering(True) clone.where.resolve_expression(query, *args, **kwargs) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, 'external_aliases'): resolved.external_aliases.update(clone.external_aliases) clone.annotations[key] = resolved # Outer query's aliases are considered external. for alias, table in query.alias_map.items(): clone.external_aliases[alias] = ( (isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias) or (isinstance(table, BaseTable) and table.table_name != table.table_alias) ) return clone def get_external_cols(self): exprs = chain(self.annotations.values(), self.where.children) return [ col for col in self._gen_cols(exprs) if col.alias in self.external_aliases ] def as_sql(self, compiler, connection): sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = '(%s)' % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins): if hasattr(value, 'resolve_expression'): value = value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, ) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. return type(value)( self.resolve_lookup_value(sub_value, can_reuse, allow_joins) for sub_value in value ) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: expression, expression_lookups = refs_expression(lookup_splitted, self.annotations) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, '_meta'): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name)) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if (isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field)): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, '_meta'): self.check_query_object_type(value, opts, field) elif hasattr(value, '__iter__'): for v in value: self.check_query_object_type(v, opts, field) def check_filterable(self, expression): """Raise an error if expression cannot be used in a WHERE clause.""" if not getattr(expression, 'filterable', True): raise NotSupportedError( expression.__class__.__name__ + ' is disallowed in the filter ' 'clause.' ) if hasattr(expression, 'get_source_expressions'): for expr in expression.get_source_expressions(): self.check_filterable(expr) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ['exact'] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: if lhs.field.is_relation: raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name)) # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = 'exact' lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ('exact', 'iexact'): raise ValueError("Cannot use None as a query value") return lhs.get_lookup('isnull')(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and lookup_name == 'exact' and lookup.rhs == ''): return lhs.get_lookup('isnull')(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups()) if suggested_lookups: suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups) else: suggestion = '.' raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter(self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False, check_filterable=True): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") if isinstance(filter_expr, Q): return self._add_q( filter_expr, branch_negated=branch_negated, current_negated=current_negated, used_aliases=can_reuse, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) if hasattr(filter_expr, 'resolve_expression'): if not getattr(filter_expr, 'conditional', False): raise TypeError('Cannot filter against a non-conditional expression.') condition = self.build_lookup( ['exact'], filter_expr.resolve_expression(self, allow_joins=allow_joins), True ) clause = self.where_class() clause.add(condition, AND) return clause, [] arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) if check_filterable: self.check_filterable(reffed_expression) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins) used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)} if check_filterable: self.check_filterable(value) clause = self.where_class() if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) clause.add(condition, AND) return clause, [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: # No support for transforms for relational fields num_lookups = len(lookups) if num_lookups > 1: raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0])) if len(targets) == 1: col = self._get_col(targets[0], join_info.final_field, alias) else: col = MultiColSource(alias, targets, join_info.targets, join_info.final_field) else: col = self._get_col(targets[0], join_info.final_field, alias) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause.add(condition, AND) require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None: require_outer = True if (lookup_type != 'isnull' and ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER)): # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). lookup_class = targets[0].get_lookup('isnull') col = self._get_col(targets[0], join_info.targets[0], alias) clause.add(lookup_class(col, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_clause): self.add_q(Q(**{filter_clause[0]: filter_clause[1]})) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER} clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, filter_expr): return self.build_filter(filter_expr, allow_joins=False)[0] def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, check_filterable=True): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated) for child in q_object.children: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) for lookup in chain((filtered_relation.relation_name,), lookups): lookup_parts, field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 if len(field_parts) > (shift + len(lookup_parts)): raise ValueError( "FilteredRelation's condition doesn't support nested " "relations (got %r)." % lookup ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == 'pk': name = opts.pk.name field = None filtered_relation = None try: field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted([ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ]) raise FieldError("Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available))) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, 'get_path_info'): pathinfos = field.get_path_info(filtered_relation) if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name)) break return path, final_field, targets, names[pos + 1:] def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial(transform, name=name, previous=final_transformer) # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = Join( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets} targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins @classmethod def _gen_cols(cls, exprs): for expr in exprs: if isinstance(expr, Col): yield expr else: yield from cls._gen_cols(expr.get_source_expressions()) @classmethod def _gen_col_aliases(cls, exprs): yield from (expr.alias for expr in cls._gen_cols(exprs)) def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False): if not allow_joins and LOOKUP_SEP in name: raise FieldError("Joined field references are not permitted in this query") annotation = self.annotations.get(name) if annotation is not None: if not allow_joins: for alias in self._gen_col_aliases([annotation]): if isinstance(self.alias_map[alias], Join): raise FieldError( 'Joined field references are not permitted in ' 'this query' ) if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. return Ref(name, self.annotation_select[name]) else: return annotation else: field_list = name.split(LOOKUP_SEP) join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse) targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if not allow_joins and len(join_list) > 1: raise FieldError('Joined field references are not permitted in this query') if len(targets) > 1: raise FieldError("Referencing multicolumn fields with F() objects " "isn't supported") # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) return self._get_col(targets[0], join_info.targets[0], join_list[-1]) def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT (pk IN (SELECT parent_id FROM thetable WHERE name = 'foo' AND parent_id IS NOT NULL)) It might be worth it to consider using WHERE NOT EXISTS as that has saner null handling, and is easier for the backend's optimizer to handle. """ filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, OuterRef): filter_expr = (filter_lhs, OuterRef(filter_rhs)) elif isinstance(filter_rhs, F): filter_expr = (filter_lhs, OuterRef(filter_rhs.name)) # Generate the inner query. query = Query(self.model) query._filtered_relations = self._filtered_relations query.add_filter(filter_expr) query.clear_ordering(True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) # Add extra check to make sure the selected field will not be null # since we are adding an IN <subquery> clause. This prevents the # database from tripping over IN (...,NULL,...) selects and returning # nothing col = query.select[0] select_field = col.target alias = col.alias if self.is_nullable(select_field): lookup_class = select_field.get_lookup('isnull') lookup = lookup_class(select_field.get_col(alias), False) query.where.add(lookup, AND) if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup('exact') # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases[alias] = True condition, needed_inner = self.build_filter( ('%s__in' % trimmed_prefix, query), current_negated=True, branch_negated=True, can_reuse=can_reuse) if contains_louter: or_null_condition, _ = self.build_filter( ('%s__isnull' % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None @property def is_sliced(self): return self.low_mark != 0 or self.high_mark is not None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.is_sliced def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col): self.select += col, self.values_select += col.output_field.name, def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise else: names = sorted([ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations ]) raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names))) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item): errors.append(item) if getattr(item, 'contains_aggregate', False): raise FieldError( 'Using an aggregate in order_by() without also including ' 'it in annotate() is not allowed: %s' % item ) if errors: raise FieldError('Invalid order_by arguments: %s' % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force_empty): """ Remove any ordering settings. If 'force_empty' is True, there will be no ordering in the resulting query (not even the model's default). """ self.order_by = () self.extra_order_by = () if force_empty: self.default_ordering = False def set_group_by(self, allow_aliases=True): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ group_by = list(self.select) if self.annotation_select: for alias, annotation in self.annotation_select.items(): signature = inspect.signature(annotation.get_group_by_cols) if 'alias' not in signature.parameters: annotation_class = annotation.__class__ msg = ( '`alias=None` must be added to the signature of ' '%s.%s.get_group_by_cols().' ) % (annotation_class.__module__, annotation_class.__qualname__) warnings.warn(msg, category=RemovedInDjango40Warning) group_by_cols = annotation.get_group_by_cols() else: if not allow_aliases: alias = None group_by_cols = annotation.get_group_by_cols(alias=alias) group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != '%': entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. self.deferred_loading = existing.difference(field_names), False def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if 'pk' in field_names: field_names.remove('pk') field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def get_loaded_field_names(self): """ If any fields are marked to be deferred, return a dictionary mapping models to a set of names in those fields that will be loaded. If a model is not in the returned dictionary, none of its fields are deferred. If no fields are marked for deferral, return an empty dictionary. """ # We cache this because we call this function multiple times # (compiler.fill_related_selections, query.iterator) try: return self._loaded_field_names_cache except AttributeError: collection = {} self.deferred_to_data(collection, self.get_loaded_field_names_cb) self._loaded_field_names_cache = collection return collection def get_loaded_field_names_cb(self, target, model, fields): """Callback used by get_deferred_field_names().""" target[model] = {f.attname for f in fields} def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] # Selected annotations must be known before setting the GROUP BY # clause. if self.group_by is True: self.add_fields((f.attname for f in self.model._meta.concrete_fields), False) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. self.set_group_by(allow_aliases=False) self.clear_select_fields() self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append( join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( self.where_class, None, lookup_tables[trimmed_paths + 1]) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a Join instead of a BaseTable reference. # But the first entry in the query's FROM clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return ( connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed ) or field.null def get_order_dir(field, default='ASC'): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == '-': return field[1:], dirn[1] return field, dirn[0] def add_to_dict(data, key, value): """ Add "value" to the set of values for "key", whether or not "key" already exists. """ if key in data: data[key].add(value) else: data[key] = {value} def is_reverse_o2o(field): """ Check if the given field is reverse-o2o. The field is expected to be some sort of relation field or related object. """ return field.is_relation and field.one_to_one and not field.concrete class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == 'OR' and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == 'AND' or ( self.effective_connector == 'OR' and votes == self.num_children): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
fc12a0f430e732180bc48fb1cdc935c9e954d417652ec6c51eb8937482010b43
import datetime import re from decimal import Decimal from django.core.exceptions import FieldError from django.db import connection from django.db.models import ( Avg, Count, DecimalField, DurationField, F, FloatField, Func, IntegerField, Max, Min, Sum, Value, ) from django.db.models.expressions import Case, Exists, OuterRef, Subquery, When from django.test import TestCase from django.test.testcases import skipUnlessDBFeature from django.test.utils import Approximate, CaptureQueriesContext from django.utils import timezone from .models import Author, Book, Publisher, Store class AggregateTestCase(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='Brad Dayley', age=45) cls.a4 = Author.objects.create(name='James Bennett', age=29) cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37) cls.a6 = Author.objects.create(name='Paul Bissex', age=29) cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25) cls.a8 = Author.objects.create(name='Peter Norvig', age=57) cls.a9 = Author.objects.create(name='Stuart Russell', age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1)) cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2)) cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7) cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6) ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3) ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23) ) cls.b4 = Book.objects.create( isbn='013235613', name='Python Web Development with Django', pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3) ) cls.b5 = Book.objects.create( isbn='013790395', name='Artificial Intelligence: A Modern Approach', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15) ) cls.b6 = Book.objects.create( isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15) ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) s2 = Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30) ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def test_empty_aggregate(self): self.assertEqual(Author.objects.all().aggregate(), {}) def test_aggregate_in_order_by(self): msg = ( 'Using an aggregate in order_by() without also including it in ' 'annotate() is not allowed: Avg(F(book__rating)' ) with self.assertRaisesMessage(FieldError, msg): Author.objects.values('age').order_by(Avg('book__rating')) def test_single_aggregate(self): vals = Author.objects.aggregate(Avg("age")) self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)}) def test_multiple_aggregates(self): vals = Author.objects.aggregate(Sum("age"), Avg("age")) self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}) def test_filter_aggregate(self): vals = Author.objects.filter(age__gt=29).aggregate(Sum("age")) self.assertEqual(vals, {'age__sum': 254}) def test_related_aggregate(self): vals = Author.objects.aggregate(Avg("friends__age")) self.assertEqual(vals, {'friends__age__avg': Approximate(34.07, places=2)}) vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age")) self.assertEqual(vals, {'authors__age__avg': Approximate(38.2857, places=2)}) vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating")) self.assertEqual(vals, {'book__rating__avg': 4.0}) vals = Book.objects.aggregate(Sum("publisher__num_awards")) self.assertEqual(vals, {'publisher__num_awards__sum': 30}) vals = Publisher.objects.aggregate(Sum("book__price")) self.assertEqual(vals, {'book__price__sum': Decimal('270.27')}) def test_aggregate_multi_join(self): vals = Store.objects.aggregate(Max("books__authors__age")) self.assertEqual(vals, {'books__authors__age__max': 57}) vals = Author.objects.aggregate(Min("book__publisher__num_awards")) self.assertEqual(vals, {'book__publisher__num_awards__min': 1}) def test_aggregate_alias(self): vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating")) self.assertEqual(vals, {'amazon_mean': Approximate(4.08, places=2)}) def test_annotate_basic(self): self.assertQuerysetEqual( Book.objects.annotate().order_by('pk'), [ "The Definitive Guide to Django: Web Development Done Right", "Sams Teach Yourself Django in 24 Hours", "Practical Django Projects", "Python Web Development with Django", "Artificial Intelligence: A Modern Approach", "Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp" ], lambda b: b.name ) books = Book.objects.annotate(mean_age=Avg("authors__age")) b = books.get(pk=self.b1.pk) self.assertEqual( b.name, 'The Definitive Guide to Django: Web Development Done Right' ) self.assertEqual(b.mean_age, 34.5) def test_annotate_defer(self): qs = Book.objects.annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (self.b1.id, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name) ) def test_annotate_defer_select_related(self): qs = Book.objects.select_related('contact').annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (self.b1.id, "159059725", 447, "Adrian Holovaty", "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name) ) def test_annotate_m2m(self): books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 51.5), ('Practical Django Projects', 29.0), ('Python Web Development with Django', Approximate(30.3, places=1)), ('Sams Teach Yourself Django in 24 Hours', 45.0) ], lambda b: (b.name, b.authors__age__avg), ) books = Book.objects.annotate(num_authors=Count("authors")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Practical Django Projects', 1), ('Python Web Development with Django', 3), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 2) ], lambda b: (b.name, b.num_authors) ) def test_backwards_m2m_annotate(self): authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 4.5), ('Brad Dayley', 3.0), ('Jacob Kaplan-Moss', 4.5), ('James Bennett', 4.0), ('Paul Bissex', 4.0), ('Stuart Russell', 4.0) ], lambda a: (a.name, a.book__rating__avg) ) authors = Author.objects.annotate(num_books=Count("book")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 1), ('Brad Dayley', 1), ('Jacob Kaplan-Moss', 1), ('James Bennett', 1), ('Jeffrey Forcier', 1), ('Paul Bissex', 1), ('Peter Norvig', 2), ('Stuart Russell', 1), ('Wesley J. Chun', 1) ], lambda a: (a.name, a.num_books) ) def test_reverse_fkey_annotate(self): books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 7), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9), ('Practical Django Projects', 3), ('Python Web Development with Django', 7), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 3) ], lambda b: (b.name, b.publisher__num_awards__sum) ) publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name") self.assertQuerysetEqual( publishers, [ ('Apress', Decimal("59.69")), ("Jonno's House of Books", None), ('Morgan Kaufmann', Decimal("75.00")), ('Prentice Hall', Decimal("112.49")), ('Sams', Decimal("23.09")) ], lambda p: (p.name, p.book__price__sum) ) def test_annotate_values(self): books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values()) self.assertEqual( books, [ { "contact_id": self.a1.id, "id": self.b1.id, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": self.p1.id, "rating": 4.5, } ] ) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg('authors__age')) .values('pk', 'isbn', 'mean_age') ) self.assertEqual( list(books), [ { "pk": self.b1.pk, "isbn": "159059725", "mean_age": 34.5, } ] ) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name") self.assertEqual( list(books), [{'name': 'The Definitive Guide to Django: Web Development Done Right'}], ) books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age')) self.assertEqual( list(books), [ { "contact_id": self.a1.id, "id": self.b1.id, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": self.p1.id, "rating": 4.5, } ] ) books = ( Book.objects .values("rating") .annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")) .order_by("rating") ) self.assertEqual( list(books), [ { "rating": 3.0, "n_authors": 1, "mean_age": 45.0, }, { "rating": 4.0, "n_authors": 6, "mean_age": Approximate(37.16, places=1) }, { "rating": 4.5, "n_authors": 2, "mean_age": 34.5, }, { "rating": 5.0, "n_authors": 1, "mean_age": 57.0, } ] ) authors = Author.objects.annotate(Avg("friends__age")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 32.0), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 29.5), ('James Bennett', 34.0), ('Jeffrey Forcier', 27.0), ('Paul Bissex', 31.0), ('Peter Norvig', 46.0), ('Stuart Russell', 57.0), ('Wesley J. Chun', Approximate(33.66, places=1)) ], lambda a: (a.name, a.friends__age__avg) ) def test_count(self): vals = Book.objects.aggregate(Count("rating")) self.assertEqual(vals, {"rating__count": 6}) def test_count_star(self): with self.assertNumQueries(1) as ctx: Book.objects.aggregate(n=Count("*")) sql = ctx.captured_queries[0]['sql'] self.assertIn('SELECT COUNT(*) ', sql) def test_count_distinct_expression(self): aggs = Book.objects.aggregate( distinct_ratings=Count(Case(When(pages__gt=300, then='rating')), distinct=True), ) self.assertEqual(aggs['distinct_ratings'], 4) def test_distinct_on_aggregate(self): for aggregate, expected_result in ( (Avg, 4.125), (Count, 4), (Sum, 16.5), ): with self.subTest(aggregate=aggregate.__name__): books = Book.objects.aggregate(ratings=aggregate('rating', distinct=True)) self.assertEqual(books['ratings'], expected_result) def test_non_grouped_annotation_not_in_group_by(self): """ An annotation not included in values() before an aggregate should be excluded from the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual(list(qs), [{'rating': 4.0, 'count': 2}]) def test_grouped_annotation_in_group_by(self): """ An annotation included in values() before an aggregate should be included in the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual( list(qs), [ {'rating': 4.0, 'count': 1}, {'rating': 4.0, 'count': 2}, ] ) def test_fkey_aggregate(self): explicit = list(Author.objects.annotate(Count('book__id'))) implicit = list(Author.objects.annotate(Count('book'))) self.assertCountEqual(explicit, implicit) def test_annotate_ordering(self): books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating') self.assertEqual( list(books), [ {'rating': 4.5, 'oldest': 35}, {'rating': 3.0, 'oldest': 45}, {'rating': 4.0, 'oldest': 57}, {'rating': 5.0, 'oldest': 57}, ] ) books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating") self.assertEqual( list(books), [ {'rating': 5.0, 'oldest': 57}, {'rating': 4.0, 'oldest': 57}, {'rating': 3.0, 'oldest': 45}, {'rating': 4.5, 'oldest': 35}, ] ) def test_aggregate_annotation(self): vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors")) self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)}) def test_avg_duration_field(self): # Explicit `output_field`. self.assertEqual( Publisher.objects.aggregate(Avg('duration', output_field=DurationField())), {'duration__avg': datetime.timedelta(days=1, hours=12)} ) # Implicit `output_field`. self.assertEqual( Publisher.objects.aggregate(Avg('duration')), {'duration__avg': datetime.timedelta(days=1, hours=12)} ) def test_sum_duration_field(self): self.assertEqual( Publisher.objects.aggregate(Sum('duration', output_field=DurationField())), {'duration__sum': datetime.timedelta(days=3)} ) def test_sum_distinct_aggregate(self): """ Sum on a distinct() QuerySet should aggregate only the distinct items. """ authors = Author.objects.filter(book__in=[self.b5, self.b6]) self.assertEqual(authors.count(), 3) distinct_authors = authors.distinct() self.assertEqual(distinct_authors.count(), 2) # Selected author ages are 57 and 46 age_sum = distinct_authors.aggregate(Sum('age')) self.assertEqual(age_sum['age__sum'], 103) def test_filtering(self): p = Publisher.objects.create(name='Expensive Publisher', num_awards=0) Book.objects.create( name='ExpensiveBook1', pages=1, isbn='111', rating=3.5, price=Decimal("1000"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 1) ) Book.objects.create( name='ExpensiveBook2', pages=1, isbn='222', rating=4.0, price=Decimal("1000"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 2) ) Book.objects.create( name='ExpensiveBook3', pages=1, isbn='333', rating=4.5, price=Decimal("35"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 3) ) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual( publishers, ['Apress', 'Prentice Hall', 'Expensive Publisher'], lambda p: p.name, ) publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Apress", "Sams", "Prentice Hall", "Expensive Publisher", ], lambda p: p.name ) publishers = ( Publisher.objects .annotate(num_books=Count("book__id")) .filter(num_books__gt=1, book__price__lt=Decimal("40.0")) .order_by("pk") ) self.assertQuerysetEqual( publishers, ['Apress', 'Prentice Hall', 'Expensive Publisher'], lambda p: p.name, ) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) .order_by("pk") ) self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Sams", "Prentice Hall", "Morgan Kaufmann", "Expensive Publisher", ], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk") self.assertQuerysetEqual( publishers, ['Apress', 'Sams', 'Prentice Hall', 'Morgan Kaufmann'], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, ['Sams', 'Morgan Kaufmann', 'Expensive Publisher'], lambda p: p.name, ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True) self.assertEqual(len(publishers), 0) def test_annotation(self): vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id")) self.assertEqual(vals, {"friends__id__count": 2}) books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk") self.assertQuerysetEqual( books, [ "The Definitive Guide to Django: Web Development Done Right", "Artificial Intelligence: A Modern Approach", ], lambda b: b.name ) authors = ( Author.objects .annotate(num_friends=Count("friends__id", distinct=True)) .filter(num_friends=0) .order_by("pk") ) self.assertQuerysetEqual(authors, ['Brad Dayley'], lambda a: a.name) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual(publishers, ['Apress', 'Prentice Hall'], lambda p: p.name) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) ) self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name) books = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) ) self.assertQuerysetEqual( books, ['Artificial Intelligence: A Modern Approach'], lambda b: b.name ) def test_more_aggregation(self): a = Author.objects.get(name__contains='Norvig') b = Book.objects.get(name__contains='Done Right') b.authors.add(a) b.save() vals = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) .aggregate(Avg("rating")) ) self.assertEqual(vals, {"rating__avg": 4.25}) def test_even_more_aggregate(self): publishers = Publisher.objects.annotate( earliest_book=Min("book__pubdate"), ).exclude(earliest_book=None).order_by("earliest_book").values( 'earliest_book', 'num_awards', 'id', 'name', ) self.assertEqual( list(publishers), [ { 'earliest_book': datetime.date(1991, 10, 15), 'num_awards': 9, 'id': self.p4.id, 'name': 'Morgan Kaufmann' }, { 'earliest_book': datetime.date(1995, 1, 15), 'num_awards': 7, 'id': self.p3.id, 'name': 'Prentice Hall' }, { 'earliest_book': datetime.date(2007, 12, 6), 'num_awards': 3, 'id': self.p1.id, 'name': 'Apress' }, { 'earliest_book': datetime.date(2008, 3, 3), 'num_awards': 1, 'id': self.p2.id, 'name': 'Sams' } ] ) vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening")) self.assertEqual( vals, { "friday_night_closing__max": datetime.time(23, 59, 59), "original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14), } ) def test_annotate_values_list(self): books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("pk", "isbn", "mean_age") ) self.assertEqual(list(books), [(self.b1.id, '159059725', 34.5)]) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn") self.assertEqual(list(books), [('159059725',)]) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age") self.assertEqual(list(books), [(34.5,)]) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("mean_age", flat=True) ) self.assertEqual(list(books), [34.5]) books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price") self.assertEqual( list(books), [ (Decimal("29.69"), 2), (Decimal('23.09'), 1), (Decimal('30'), 1), (Decimal('75'), 1), (Decimal('82.8'), 1), ] ) def test_dates_with_aggregation(self): """ .dates() returns a distinct set of dates when applied to a QuerySet with aggregation. Refs #18056. Previously, .dates() would return distinct (date_kind, aggregation) sets, in this case (year, num_authors), so 2008 would be returned twice because there are books from 2008 with a different number of authors. """ dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year') self.assertQuerysetEqual( dates, [ "datetime.date(1991, 1, 1)", "datetime.date(1995, 1, 1)", "datetime.date(2007, 1, 1)", "datetime.date(2008, 1, 1)" ] ) def test_values_aggregation(self): # Refs #20782 max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating')) self.assertEqual(max_rating['max_rating'], 5) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3}) def test_ticket17424(self): """ Doing exclude() on a foreign model after annotate() doesn't crash. """ all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk')) annotated_books = Book.objects.order_by('pk').annotate(one=Count("id")) # The value doesn't matter, we just need any negative # constraint on a related model that's a noop. excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__") # Try to generate query tree str(excluded_books.query) self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk) # Check internal state self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type) self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type) def test_ticket12886(self): """ Aggregation over sliced queryset works correctly. """ qs = Book.objects.all().order_by('-rating')[0:3] vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating'] self.assertAlmostEqual(vals, 4.5, places=2) def test_ticket11881(self): """ Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or select_related() stuff. """ qs = Book.objects.all().select_for_update().order_by( 'pk').select_related('publisher').annotate(max_pk=Max('pk')) with CaptureQueriesContext(connection) as captured_queries: qs.aggregate(avg_pk=Avg('max_pk')) self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]['sql'].lower() self.assertNotIn('for update', qstr) forced_ordering = connection.ops.force_no_ordering() if forced_ordering: # If the backend needs to force an ordering we make sure it's # the only "ORDER BY" clause present in the query. self.assertEqual( re.findall(r'order by (\w+)', qstr), [', '.join(f[1][0] for f in forced_ordering).lower()] ) else: self.assertNotIn('order by', qstr) self.assertEqual(qstr.count(' join '), 0) def test_decimal_max_digits_has_no_effect(self): Book.objects.all().delete() a1 = Author.objects.first() p1 = Publisher.objects.first() thedate = timezone.now() for i in range(10): Book.objects.create( isbn="abcde{}".format(i), name="none", pages=10, rating=4.0, price=9999.98, contact=a1, publisher=p1, pubdate=thedate) book = Book.objects.aggregate(price_sum=Sum('price')) self.assertEqual(book['price_sum'], Decimal("99999.80")) def test_nonaggregate_aggregation_throws(self): with self.assertRaisesMessage(TypeError, 'fail is not an aggregate expression'): Book.objects.aggregate(fail=F('price')) def test_nonfield_annotation(self): book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first() self.assertEqual(book.val, 2) def test_missing_output_field_raises_error(self): with self.assertRaisesMessage(FieldError, 'Cannot resolve expression type, unknown output_field'): Book.objects.annotate(val=Max(2)).first() def test_annotation_expressions(self): authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name') authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name') for qs in (authors, authors2): self.assertQuerysetEqual( qs, [ ('Adrian Holovaty', 132), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 129), ('James Bennett', 63), ('Jeffrey Forcier', 128), ('Paul Bissex', 120), ('Peter Norvig', 103), ('Stuart Russell', 103), ('Wesley J. Chun', 176) ], lambda a: (a.name, a.combined_ages) ) def test_aggregation_expressions(self): a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*')) a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age')) a3 = Author.objects.aggregate(av_age=Avg('age')) self.assertEqual(a1, {'av_age': 37}) self.assertEqual(a2, {'av_age': 37}) self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)}) def test_avg_decimal_field(self): v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price'] self.assertIsInstance(v, Decimal) self.assertEqual(v, Approximate(Decimal('47.39'), places=2)) def test_order_of_precedence(self): p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3) self.assertEqual(p1, {'avg_price': Approximate(Decimal('148.18'), places=2)}) p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3) self.assertEqual(p2, {'avg_price': Approximate(Decimal('53.39'), places=2)}) def test_combine_different_types(self): msg = ( 'Expression contains mixed types: FloatField, IntegerField. ' 'You must set output_field.' ) qs = Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')) with self.assertRaisesMessage(FieldError, msg): qs.first() with self.assertRaisesMessage(FieldError, msg): qs.first() b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=IntegerField())).get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=FloatField())).get(pk=self.b4.pk) self.assertEqual(b2.sums, 383.69) b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=DecimalField())).get(pk=self.b4.pk) self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2)) def test_complex_aggregations_require_kwarg(self): with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'): Author.objects.annotate(Sum(F('age') + F('friends__age'))) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum('age') / Count('age')) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum(1)) def test_aggregate_over_complex_annotation(self): qs = Author.objects.annotate( combined_ages=Sum(F('age') + F('friends__age'))) age = qs.aggregate(max_combined_age=Max('combined_ages')) self.assertEqual(age['max_combined_age'], 176) age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age=Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age'], 954) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age_doubled'], 954 * 2) def test_values_annotation_with_expression(self): # ensure the F() is promoted to the group by clause qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['another_age'], 68) qs = qs.annotate(friend_count=Count('friends')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['friend_count'], 2) qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter( name="Adrian Holovaty").order_by('-combined_age') self.assertEqual( list(qs), [ { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 69 }, { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 63 } ] ) vals = qs.values('name', 'combined_age') self.assertEqual( list(vals), [ {'name': 'Adrian Holovaty', 'combined_age': 69}, {'name': 'Adrian Holovaty', 'combined_age': 63}, ] ) def test_annotate_values_aggregate(self): alias_age = Author.objects.annotate( age_alias=F('age') ).values( 'age_alias', ).aggregate(sum_age=Sum('age_alias')) age = Author.objects.values('age').aggregate(sum_age=Sum('age')) self.assertEqual(alias_age['sum_age'], age['sum_age']) def test_annotate_over_annotate(self): author = Author.objects.annotate( age_alias=F('age') ).annotate( sum_age=Sum('age_alias') ).get(name="Adrian Holovaty") other_author = Author.objects.annotate( sum_age=Sum('age') ).get(name="Adrian Holovaty") self.assertEqual(author.sum_age, other_author.sum_age) def test_annotated_aggregate_over_annotated_aggregate(self): with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(Sum('id__max')) class MyMax(Max): def as_sql(self, compiler, connection): self.set_source_expressions(self.get_source_expressions()[0:1]) return super().as_sql(compiler, connection) with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price')) def test_multi_arg_aggregate(self): class MyMax(Max): output_field = DecimalField() def as_sql(self, compiler, connection): copy = self.copy() copy.set_source_expressions(copy.get_source_expressions()[0:1]) return super(MyMax, copy).as_sql(compiler, connection) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Book.objects.aggregate(MyMax('pages', 'price')) with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'): Book.objects.annotate(MyMax('pages', 'price')) Book.objects.aggregate(max_field=MyMax('pages', 'price')) def test_add_implementation(self): class MySum(Sum): pass # test completely changing how the output is rendered def lower_case_function_override(self, compiler, connection): sql, params = compiler.compile(self.source_expressions[0]) substitutions = {'function': self.function.lower(), 'expressions': sql, 'distinct': ''} substitutions.update(self.extra) return self.template % substitutions, params setattr(MySum, 'as_' + connection.vendor, lower_case_function_override) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test changing the dict and delegating def lower_case_function_super(self, compiler, connection): self.extra['function'] = self.function.lower() return super(MySum, self).as_sql(compiler, connection) setattr(MySum, 'as_' + connection.vendor, lower_case_function_super) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test overriding all parts of the template def be_evil(self, compiler, connection): substitutions = {'function': 'MAX', 'expressions': '2', 'distinct': ''} substitutions.update(self.extra) return self.template % substitutions, () setattr(MySum, 'as_' + connection.vendor, be_evil) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('MAX('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 2) def test_complex_values_aggregation(self): max_rating = Book.objects.values('rating').aggregate( double_max_rating=Max('rating') + Max('rating')) self.assertEqual(max_rating['double_max_rating'], 5 * 2) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') + 5 ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3 + 5}) def test_expression_on_aggregation(self): # Create a plain expression class Greatest(Func): function = 'GREATEST' def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function='MAX', **extra_context) qs = Publisher.objects.annotate( price_or_median=Greatest(Avg('book__rating', output_field=DecimalField()), Avg('book__price')) ).filter(price_or_median__gte=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs, [1, 3, 7, 9], lambda v: v.num_awards) qs2 = Publisher.objects.annotate( rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'), output_field=FloatField()) ).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs2, [1, 3], lambda v: v.num_awards) def test_arguments_must_be_expressions(self): msg = 'QuerySet.aggregate() received non-expression(s): %s.' with self.assertRaisesMessage(TypeError, msg % FloatField()): Book.objects.aggregate(FloatField()) with self.assertRaisesMessage(TypeError, msg % True): Book.objects.aggregate(is_book=True) with self.assertRaisesMessage(TypeError, msg % ', '.join([str(FloatField()), 'True'])): Book.objects.aggregate(FloatField(), Avg('price'), is_book=True) def test_aggregation_subquery_annotation(self): """Subquery annotations are excluded from the GROUP BY if they are not explicitly grouped against.""" latest_book_pubdate_qs = Book.objects.filter( publisher=OuterRef('pk') ).order_by('-pubdate').values('pubdate')[:1] publisher_qs = Publisher.objects.annotate( latest_book_pubdate=Subquery(latest_book_pubdate_qs), ).annotate(count=Count('book')) with self.assertNumQueries(1) as ctx: list(publisher_qs) self.assertEqual(ctx[0]['sql'].count('SELECT'), 2) # The GROUP BY should not be by alias either. self.assertEqual(ctx[0]['sql'].lower().count('latest_book_pubdate'), 1) def test_aggregation_subquery_annotation_exists(self): latest_book_pubdate_qs = Book.objects.filter( publisher=OuterRef('pk') ).order_by('-pubdate').values('pubdate')[:1] publisher_qs = Publisher.objects.annotate( latest_book_pubdate=Subquery(latest_book_pubdate_qs), count=Count('book'), ) self.assertTrue(publisher_qs.exists()) def test_aggregation_exists_annotation(self): published_books = Book.objects.filter(publisher=OuterRef('pk')) publisher_qs = Publisher.objects.annotate( published_book=Exists(published_books), count=Count('book'), ).values_list('name', flat=True) self.assertCountEqual(list(publisher_qs), [ 'Apress', 'Morgan Kaufmann', "Jonno's House of Books", 'Prentice Hall', 'Sams', ]) def test_aggregation_subquery_annotation_values(self): """ Subquery annotations and external aliases are excluded from the GROUP BY if they are not selected. """ books_qs = Book.objects.annotate( first_author_the_same_age=Subquery( Author.objects.filter( age=OuterRef('contact__friends__age'), ).order_by('age').values('id')[:1], ) ).filter( publisher=self.p1, first_author_the_same_age__isnull=False, ).annotate( min_age=Min('contact__friends__age'), ).values('name', 'min_age').order_by('name') self.assertEqual(list(books_qs), [ {'name': 'Practical Django Projects', 'min_age': 34}, { 'name': 'The Definitive Guide to Django: Web Development Done Right', 'min_age': 29, }, ]) @skipUnlessDBFeature('supports_subqueries_in_group_by') def test_group_by_subquery_annotation(self): """ Subquery annotations are included in the GROUP BY if they are grouped against. """ long_books_count_qs = Book.objects.filter( publisher=OuterRef('pk'), pages__gt=400, ).values( 'publisher' ).annotate(count=Count('pk')).values('count') long_books_count_breakdown = Publisher.objects.values_list( Subquery(long_books_count_qs, IntegerField()), ).annotate(total=Count('*')) self.assertEqual(dict(long_books_count_breakdown), {None: 1, 1: 4}) @skipUnlessDBFeature('supports_subqueries_in_group_by') def test_group_by_exists_annotation(self): """ Exists annotations are included in the GROUP BY if they are grouped against. """ long_books_qs = Book.objects.filter( publisher=OuterRef('pk'), pages__gt=800, ) has_long_books_breakdown = Publisher.objects.values_list( Exists(long_books_qs), ).annotate(total=Count('*')) self.assertEqual(dict(has_long_books_breakdown), {True: 2, False: 3}) def test_aggregation_subquery_annotation_related_field(self): publisher = Publisher.objects.create(name=self.a9.name, num_awards=2) book = Book.objects.create( isbn='159059999', name='Test book.', pages=819, rating=2.5, price=Decimal('14.44'), contact=self.a9, publisher=publisher, pubdate=datetime.date(2019, 12, 6), ) book.authors.add(self.a5, self.a6, self.a7) books_qs = Book.objects.annotate( contact_publisher=Subquery( Publisher.objects.filter( pk=OuterRef('publisher'), name=OuterRef('contact__name'), ).values('name')[:1], ) ).filter( contact_publisher__isnull=False, ).annotate(count=Count('authors')) self.assertSequenceEqual(books_qs, [book])
e57243382de226d654774a4e6d509d2859bb2bc6b8ae0e1a2eba446ddcc4f258
import hashlib import unittest from django.utils.crypto import constant_time_compare, pbkdf2, salted_hmac class TestUtilsCryptoMisc(unittest.TestCase): def test_constant_time_compare(self): # It's hard to test for constant time, just test the result. self.assertTrue(constant_time_compare(b'spam', b'spam')) self.assertFalse(constant_time_compare(b'spam', b'eggs')) self.assertTrue(constant_time_compare('spam', 'spam')) self.assertFalse(constant_time_compare('spam', 'eggs')) def test_salted_hmac(self): tests = [ ((b'salt', b'value'), {}, 'b51a2e619c43b1ca4f91d15c57455521d71d61eb'), (('salt', 'value'), {}, 'b51a2e619c43b1ca4f91d15c57455521d71d61eb'), ( ('salt', 'value'), {'secret': 'abcdefg'}, '8bbee04ccddfa24772d1423a0ba43bd0c0e24b76', ), ( ('salt', 'value'), {'secret': 'x' * hashlib.sha1().block_size}, 'bd3749347b412b1b0a9ea65220e55767ac8e96b0', ), ] for args, kwargs, digest in tests: with self.subTest(args=args, kwargs=kwargs): self.assertEqual(salted_hmac(*args, **kwargs).hexdigest(), digest) class TestUtilsCryptoPBKDF2(unittest.TestCase): # http://tools.ietf.org/html/draft-josefsson-pbkdf2-test-vectors-06 rfc_vectors = [ { "args": { "password": "password", "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha1, }, "result": "0c60c80f961f0e71f3a9b524af6012062fe037a6", }, { "args": { "password": "password", "salt": "salt", "iterations": 2, "dklen": 20, "digest": hashlib.sha1, }, "result": "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957", }, { "args": { "password": "password", "salt": "salt", "iterations": 4096, "dklen": 20, "digest": hashlib.sha1, }, "result": "4b007901b765489abead49d926f721d065a429c1", }, # # this takes way too long :( # { # "args": { # "password": "password", # "salt": "salt", # "iterations": 16777216, # "dklen": 20, # "digest": hashlib.sha1, # }, # "result": "eefe3d61cd4da4e4e9945b3d6ba2158c2634e984", # }, { "args": { "password": "passwordPASSWORDpassword", "salt": "saltSALTsaltSALTsaltSALTsaltSALTsalt", "iterations": 4096, "dklen": 25, "digest": hashlib.sha1, }, "result": "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038", }, { "args": { "password": "pass\0word", "salt": "sa\0lt", "iterations": 4096, "dklen": 16, "digest": hashlib.sha1, }, "result": "56fa6aa75548099dcc37d7f03425e0c3", }, ] regression_vectors = [ { "args": { "password": "password", "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha256, }, "result": "120fb6cffcf8b32c43e7225256c4f837a86548c9", }, { "args": { "password": "password", "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha512, }, "result": "867f70cf1ade02cff3752599a3a53dc4af34c7a6", }, { "args": { "password": "password", "salt": "salt", "iterations": 1000, "dklen": 0, "digest": hashlib.sha512, }, "result": ("afe6c5530785b6cc6b1c6453384731bd5ee432ee" "549fd42fb6695779ad8a1c5bf59de69c48f774ef" "c4007d5298f9033c0241d5ab69305e7b64eceeb8d" "834cfec"), }, # Check leading zeros are not stripped (#17481) { "args": { "password": b'\xba', "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha1, }, "result": '0053d3b91a7f1e54effebd6d68771e8a6e0b2c5b', }, ] def test_public_vectors(self): for vector in self.rfc_vectors: result = pbkdf2(**vector['args']) self.assertEqual(result.hex(), vector['result']) def test_regression_vectors(self): for vector in self.regression_vectors: result = pbkdf2(**vector['args']) self.assertEqual(result.hex(), vector['result']) def test_default_hmac_alg(self): kwargs = {'password': b'password', 'salt': b'salt', 'iterations': 1, 'dklen': 20} self.assertEqual(pbkdf2(**kwargs), hashlib.pbkdf2_hmac(hash_name=hashlib.sha256().name, **kwargs))
182d87d8c3d1e9b3f6d07af46a96c3cbdccdcb689f0a9b58cfea5c0c7c41455c
# Django documentation build configuration file, created by # sphinx-quickstart on Thu Mar 27 09:06:53 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't picklable (module imports are okay, they're removed automatically). # # All configuration values have a default; values that are commented out # serve to show the default. import sys from os.path import abspath, dirname, join # Workaround for sphinx-build recursion limit overflow: # pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL) # RuntimeError: maximum recursion depth exceeded while pickling an object # # Python's default allowed recursion depth is 1000 but this isn't enough for # building docs/ref/settings.txt sometimes. # https://groups.google.com/d/topic/sphinx-dev/MtRf64eGtv4/discussion sys.setrecursionlimit(2000) # Make sure we get the version of this copy of Django sys.path.insert(1, dirname(dirname(abspath(__file__)))) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(abspath(join(dirname(__file__), "_ext"))) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.6.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "djangodocs", 'sphinx.ext.extlinks', "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinx.ext.autosectionlabel", ] # AutosectionLabel settings. # Uses a <page>:<label> schema which doesn't work for duplicate sub-section # labels, so set max depth. autosectionlabel_prefix_document = True autosectionlabel_maxdepth = 2 # Spelling check needs an additional module that is not installed by default. # Add it only if spelling check is requested so docs can be generated without it. if 'spelling' in sys.argv: extensions.append("sphinxcontrib.spelling") # Spelling language. spelling_lang = 'en_US' # Location of word list. spelling_word_list_filename = 'spelling_wordlist' # Add any paths that contain templates here, relative to this directory. # templates_path = [] # The suffix of source filenames. source_suffix = '.txt' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General substitutions. project = 'Django' copyright = 'Django Software Foundation and contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '3.1' # The full version, including alpha/beta/rc tags. try: from django import VERSION, get_version except ImportError: release = version else: def django_release(): pep440ver = get_version() if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep440ver: return pep440ver + '.dev' return pep440ver release = django_release() # The "development version" of Django django_next_version = '3.1' extlinks = { 'commit': ('https://github.com/django/django/commit/%s', ''), 'cve': ('https://nvd.nist.gov/view/vuln/detail?vulnId=%s', 'CVE-'), # A file or directory. GitHub redirects from blob to tree if needed. 'source': ('https://github.com/django/django/blob/master/%s', ''), 'ticket': ('https://code.djangoproject.com/ticket/%s', '#'), } # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # Location for .po/.mo translation files used when language is set locale_dirs = ['locale/'] # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', '_theme'] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'trac' # Links to Python's docs should reference the most recent version of the 3.x # branch, which is located at this URL. intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), 'psycopg2': ('https://www.psycopg.org/docs/', None), } # Python's docs don't change every week. intersphinx_cache_limit = 90 # days # The 'versionadded' and 'versionchanged' directives are overridden. suppress_warnings = ['app.add_directive'] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "djangodocs" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["_theme"] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # Content template for the index page. # html_index = '' # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Djangodoc' modindex_common_prefix = ["django."] # Appended to every page rst_epilog = """ .. |django-users| replace:: :ref:`django-users <django-users-mailing-list>` .. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>` .. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>` .. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>` .. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>` """ # -- Options for LaTeX output -------------------------------------------------- latex_elements = { 'preamble': ( '\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}' '\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}' '\\DeclareUnicodeCharacter{2665}{[unicode-heart]}' '\\DeclareUnicodeCharacter{2713}{[unicode-checkmark]}' ), } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). # latex_documents = [] latex_documents = [ ('contents', 'django.tex', 'Django Documentation', 'Django Software Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [( 'ref/django-admin', 'django-admin', 'Utility script for the Django Web framework', ['Django Software Foundation'], 1 )] # -- Options for Texinfo output ------------------------------------------------ # List of tuples (startdocname, targetname, title, author, dir_entry, # description, category, toctree_only) texinfo_documents = [( master_doc, "django", "", "", "Django", "Documentation of the Django framework", "Web development", False )] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = 'Django Software Foundation' epub_publisher = 'Django Software Foundation' epub_copyright = copyright # The basename for the epub file. It defaults to the project name. # epub_basename = 'Django' # The HTML theme for the epub output. Since the default themes are not optimized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. epub_theme = 'djangodocs-epub' # The language of the text. It defaults to the language option # or en if the language is not set. # epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. # epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. # epub_identifier = '' # A unique identification for the text. # epub_uid = '' # A tuple containing the cover image and cover page html template filenames. epub_cover = ('', 'epub-cover.html') # A sequence of (type, uri, title) tuples for the guide element of content.opf. # epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_post_files = [] # A list of files that should not be packed into the epub file. # epub_exclude_files = [] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 # Allow duplicate toc entries. # epub_tocdup = True # Choose between 'default' and 'includehidden'. # epub_tocscope = 'default' # Fix unsupported image types using the PIL. # epub_fix_images = False # Scale large images. # epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. # epub_show_urls = 'inline' # If false, no index is generated. # epub_use_index = True
f236a926996d85bdc3f1a6bd74cc5c3423249f15846ce840cab2449c085ed34a
import itertools import json import os import re from urllib.parse import unquote from django.apps import apps from django.conf import settings from django.http import HttpResponse, HttpResponseRedirect, JsonResponse from django.template import Context, Engine from django.urls import translate_url from django.utils.formats import get_format from django.utils.http import url_has_allowed_host_and_scheme from django.utils.translation import ( LANGUAGE_SESSION_KEY, check_for_language, get_language, ) from django.utils.translation.trans_real import DjangoTranslation from django.views.generic import View LANGUAGE_QUERY_PARAMETER = 'language' def set_language(request): """ Redirect to a given URL while setting the chosen language in the session (if enabled) and in a cookie. The URL and the language code need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. """ next_url = request.POST.get('next', request.GET.get('next')) if ( (next_url or request.accepts('text/html')) and not url_has_allowed_host_and_scheme( url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure(), ) ): next_url = request.META.get('HTTP_REFERER') # HTTP_REFERER may be encoded. next_url = next_url and unquote(next_url) if not url_has_allowed_host_and_scheme( url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure(), ): next_url = '/' response = HttpResponseRedirect(next_url) if next_url else HttpResponse(status=204) if request.method == 'POST': lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER) if lang_code and check_for_language(lang_code): if next_url: next_trans = translate_url(next_url, lang_code) if next_trans != next_url: response = HttpResponseRedirect(next_trans) if hasattr(request, 'session'): # Storing the language in the session is deprecated. # (RemovedInDjango40Warning) request.session[LANGUAGE_SESSION_KEY] = lang_code response.set_cookie( settings.LANGUAGE_COOKIE_NAME, lang_code, max_age=settings.LANGUAGE_COOKIE_AGE, path=settings.LANGUAGE_COOKIE_PATH, domain=settings.LANGUAGE_COOKIE_DOMAIN, secure=settings.LANGUAGE_COOKIE_SECURE, httponly=settings.LANGUAGE_COOKIE_HTTPONLY, samesite=settings.LANGUAGE_COOKIE_SAMESITE, ) return response def get_formats(): """Return all formats strings required for i18n to work.""" FORMAT_SETTINGS = ( 'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT', 'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT', 'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR', 'THOUSAND_SEPARATOR', 'NUMBER_GROUPING', 'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS' ) return {attr: get_format(attr) for attr in FORMAT_SETTINGS} js_catalog_template = r""" {% autoescape off %} (function(globals) { var django = globals.django || (globals.django = {}); {% if plural %} django.pluralidx = function(n) { var v={{ plural }}; if (typeof(v) == 'boolean') { return v ? 1 : 0; } else { return v; } }; {% else %} django.pluralidx = function(count) { return (count == 1) ? 0 : 1; }; {% endif %} /* gettext library */ django.catalog = django.catalog || {}; {% if catalog_str %} var newcatalog = {{ catalog_str }}; for (var key in newcatalog) { django.catalog[key] = newcatalog[key]; } {% endif %} if (!django.jsi18n_initialized) { django.gettext = function(msgid) { var value = django.catalog[msgid]; if (typeof(value) == 'undefined') { return msgid; } else { return (typeof(value) == 'string') ? value : value[0]; } }; django.ngettext = function(singular, plural, count) { var value = django.catalog[singular]; if (typeof(value) == 'undefined') { return (count == 1) ? singular : plural; } else { return value.constructor === Array ? value[django.pluralidx(count)] : value; } }; django.gettext_noop = function(msgid) { return msgid; }; django.pgettext = function(context, msgid) { var value = django.gettext(context + '\x04' + msgid); if (value.indexOf('\x04') != -1) { value = msgid; } return value; }; django.npgettext = function(context, singular, plural, count) { var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count); if (value.indexOf('\x04') != -1) { value = django.ngettext(singular, plural, count); } return value; }; django.interpolate = function(fmt, obj, named) { if (named) { return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])}); } else { return fmt.replace(/%s/g, function(match){return String(obj.shift())}); } }; /* formatting library */ django.formats = {{ formats_str }}; django.get_format = function(format_type) { var value = django.formats[format_type]; if (typeof(value) == 'undefined') { return format_type; } else { return value; } }; /* add to global namespace */ globals.pluralidx = django.pluralidx; globals.gettext = django.gettext; globals.ngettext = django.ngettext; globals.gettext_noop = django.gettext_noop; globals.pgettext = django.pgettext; globals.npgettext = django.npgettext; globals.interpolate = django.interpolate; globals.get_format = django.get_format; django.jsi18n_initialized = true; } }(this)); {% endautoescape %} """ class JavaScriptCatalog(View): """ Return the selected language catalog as a JavaScript library. Receive the list of packages to check for translations in the `packages` kwarg either from the extra dictionary passed to the url() function or as a plus-sign delimited string from the request. Default is 'django.conf'. You can override the gettext domain for this view, but usually you don't want to do that as JavaScript messages go to the djangojs domain. This might be needed if you deliver your JavaScript source from Django templates. """ domain = 'djangojs' packages = None def get(self, request, *args, **kwargs): locale = get_language() domain = kwargs.get('domain', self.domain) # If packages are not provided, default to all installed packages, as # DjangoTranslation without localedirs harvests them all. packages = kwargs.get('packages', '') packages = packages.split('+') if packages else self.packages paths = self.get_paths(packages) if packages else None self.translation = DjangoTranslation(locale, domain=domain, localedirs=paths) context = self.get_context_data(**kwargs) return self.render_to_response(context) def get_paths(self, packages): allowable_packages = {app_config.name: app_config for app_config in apps.get_app_configs()} app_configs = [allowable_packages[p] for p in packages if p in allowable_packages] if len(app_configs) < len(packages): excluded = [p for p in packages if p not in allowable_packages] raise ValueError( 'Invalid package(s) provided to JavaScriptCatalog: %s' % ','.join(excluded) ) # paths of requested packages return [os.path.join(app.path, 'locale') for app in app_configs] @property def _num_plurals(self): """ Return the number of plurals for this catalog language, or 2 if no plural string is available. """ match = re.search(r'nplurals=\s*(\d+)', self._plural_string or '') if match: return int(match.groups()[0]) return 2 @property def _plural_string(self): """ Return the plural string (including nplurals) for this catalog language, or None if no plural string is available. """ if '' in self.translation._catalog: for line in self.translation._catalog[''].split('\n'): if line.startswith('Plural-Forms:'): return line.split(':', 1)[1].strip() return None def get_plural(self): plural = self._plural_string if plural is not None: # This should be a compiled function of a typical plural-form: # Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : # n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2; plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1] return plural def get_catalog(self): pdict = {} num_plurals = self._num_plurals catalog = {} trans_cat = self.translation._catalog trans_fallback_cat = self.translation._fallback._catalog if self.translation._fallback else {} seen_keys = set() for key, value in itertools.chain(trans_cat.items(), trans_fallback_cat.items()): if key == '' or key in seen_keys: continue if isinstance(key, str): catalog[key] = value elif isinstance(key, tuple): msgid, cnt = key pdict.setdefault(msgid, {})[cnt] = value else: raise TypeError(key) seen_keys.add(key) for k, v in pdict.items(): catalog[k] = [v.get(i, '') for i in range(num_plurals)] return catalog def get_context_data(self, **kwargs): return { 'catalog': self.get_catalog(), 'formats': get_formats(), 'plural': self.get_plural(), } def render_to_response(self, context, **response_kwargs): def indent(s): return s.replace('\n', '\n ') template = Engine().from_string(js_catalog_template) context['catalog_str'] = indent( json.dumps(context['catalog'], sort_keys=True, indent=2) ) if context['catalog'] else None context['formats_str'] = indent(json.dumps(context['formats'], sort_keys=True, indent=2)) return HttpResponse(template.render(Context(context)), 'text/javascript; charset="utf-8"') class JSONCatalog(JavaScriptCatalog): """ Return the selected language catalog as a JSON object. Receive the same parameters as JavaScriptCatalog and return a response with a JSON object of the following format: { "catalog": { # Translations catalog }, "formats": { # Language formats for date, time, etc. }, "plural": '...' # Expression for plural forms, or null. } """ def render_to_response(self, context, **response_kwargs): return JsonResponse(context)
389d23d4bbb32c06246800ebc52ae3ad45f169b9e75dcc5a0d113b96858f6968
import functools import re import sys import types from pathlib import Path from django.conf import settings from django.http import Http404, HttpResponse, HttpResponseNotFound from django.template import Context, Engine, TemplateDoesNotExist from django.template.defaultfilters import pprint from django.urls import resolve from django.utils import timezone from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_str from django.utils.module_loading import import_string from django.utils.regex_helper import _lazy_re_compile from django.utils.version import get_docs_version # Minimal Django templates engine to render the error templates # regardless of the project's TEMPLATES setting. Templates are # read directly from the filesystem so that the error handler # works even if the template loader is broken. DEBUG_ENGINE = Engine( debug=True, libraries={'i18n': 'django.templatetags.i18n'}, ) CURRENT_DIR = Path(__file__).parent class CallableSettingWrapper: """ Object to wrap callable appearing in settings. * Not to call in the debug page (#21345). * Not to break the debug page if the callable forbidding to set attributes (#23070). """ def __init__(self, callable_setting): self._wrapped = callable_setting def __repr__(self): return repr(self._wrapped) def technical_500_response(request, exc_type, exc_value, tb, status_code=500): """ Create a technical server error response. The last three arguments are the values returned from sys.exc_info() and friends. """ reporter = get_exception_reporter_class(request)(request, exc_type, exc_value, tb) if request.accepts('text/html'): html = reporter.get_traceback_html() return HttpResponse(html, status=status_code, content_type='text/html') else: text = reporter.get_traceback_text() return HttpResponse(text, status=status_code, content_type='text/plain; charset=utf-8') @functools.lru_cache() def get_default_exception_reporter_filter(): # Instantiate the default filter for the first time and cache it. return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)() def get_exception_reporter_filter(request): default_filter = get_default_exception_reporter_filter() return getattr(request, 'exception_reporter_filter', default_filter) def get_exception_reporter_class(request): default_exception_reporter_class = import_string(settings.DEFAULT_EXCEPTION_REPORTER) return getattr(request, 'exception_reporter_class', default_exception_reporter_class) class SafeExceptionReporterFilter: """ Use annotations made by the sensitive_post_parameters and sensitive_variables decorators to filter out sensitive information. """ cleansed_substitute = '********************' hidden_settings = _lazy_re_compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.I) def cleanse_setting(self, key, value): """ Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. """ try: if self.hidden_settings.search(key): cleansed = self.cleansed_substitute elif isinstance(value, dict): cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()} else: cleansed = value except TypeError: # If the key isn't regex-able, just return as-is. cleansed = value if callable(cleansed): cleansed = CallableSettingWrapper(cleansed) return cleansed def get_safe_settings(self): """ Return a dictionary of the settings module with values of sensitive settings replaced with stars (*********). """ settings_dict = {} for k in dir(settings): if k.isupper(): settings_dict[k] = self.cleanse_setting(k, getattr(settings, k)) return settings_dict def get_safe_request_meta(self, request): """ Return a dictionary of request.META with sensitive values redacted. """ if not hasattr(request, 'META'): return {} return {k: self.cleanse_setting(k, v) for k, v in request.META.items()} def is_active(self, request): """ This filter is to add safety in production environments (i.e. DEBUG is False). If DEBUG is True then your site is not safe anyway. This hook is provided as a convenience to easily activate or deactivate the filter on a per request basis. """ return settings.DEBUG is False def get_cleansed_multivaluedict(self, request, multivaluedict): """ Replace the keys in a MultiValueDict marked as sensitive with stars. This mitigates leaking sensitive POST parameters if something like request.POST['nonexistent_key'] throws an exception (#21098). """ sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: multivaluedict = multivaluedict.copy() for param in sensitive_post_parameters: if param in multivaluedict: multivaluedict[param] = self.cleansed_substitute return multivaluedict def get_post_parameters(self, request): """ Replace the values of POST parameters marked as sensitive with stars (*********). """ if request is None: return {} else: sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: cleansed = request.POST.copy() if sensitive_post_parameters == '__ALL__': # Cleanse all parameters. for k in cleansed: cleansed[k] = self.cleansed_substitute return cleansed else: # Cleanse only the specified parameters. for param in sensitive_post_parameters: if param in cleansed: cleansed[param] = self.cleansed_substitute return cleansed else: return request.POST def cleanse_special_types(self, request, value): try: # If value is lazy or a complex object of another kind, this check # might raise an exception. isinstance checks that lazy # MultiValueDicts will have a return value. is_multivalue_dict = isinstance(value, MultiValueDict) except Exception as e: return '{!r} while evaluating {!r}'.format(e, value) if is_multivalue_dict: # Cleanse MultiValueDicts (request.POST is the one we usually care about) value = self.get_cleansed_multivaluedict(request, value) return value def get_traceback_frame_variables(self, request, tb_frame): """ Replace the values of variables marked as sensitive with stars (*********). """ # Loop through the frame's callers to see if the sensitive_variables # decorator was used. current_frame = tb_frame.f_back sensitive_variables = None while current_frame is not None: if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in current_frame.f_locals): # The sensitive_variables decorator was used, so we take note # of the sensitive variables' names. wrapper = current_frame.f_locals['sensitive_variables_wrapper'] sensitive_variables = getattr(wrapper, 'sensitive_variables', None) break current_frame = current_frame.f_back cleansed = {} if self.is_active(request) and sensitive_variables: if sensitive_variables == '__ALL__': # Cleanse all variables for name in tb_frame.f_locals: cleansed[name] = self.cleansed_substitute else: # Cleanse specified variables for name, value in tb_frame.f_locals.items(): if name in sensitive_variables: value = self.cleansed_substitute else: value = self.cleanse_special_types(request, value) cleansed[name] = value else: # Potentially cleanse the request and any MultiValueDicts if they # are one of the frame variables. for name, value in tb_frame.f_locals.items(): cleansed[name] = self.cleanse_special_types(request, value) if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in tb_frame.f_locals): # For good measure, obfuscate the decorated function's arguments in # the sensitive_variables decorator's frame, in case the variables # associated with those arguments were meant to be obfuscated from # the decorated function's frame. cleansed['func_args'] = self.cleansed_substitute cleansed['func_kwargs'] = self.cleansed_substitute return cleansed.items() class ExceptionReporter: """Organize and coordinate reporting on exceptions.""" def __init__(self, request, exc_type, exc_value, tb, is_email=False): self.request = request self.filter = get_exception_reporter_filter(self.request) self.exc_type = exc_type self.exc_value = exc_value self.tb = tb self.is_email = is_email self.template_info = getattr(self.exc_value, 'template_debug', None) self.template_does_not_exist = False self.postmortem = None def get_traceback_data(self): """Return a dictionary containing traceback information.""" if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist): self.template_does_not_exist = True self.postmortem = self.exc_value.chain or [self.exc_value] frames = self.get_traceback_frames() for i, frame in enumerate(frames): if 'vars' in frame: frame_vars = [] for k, v in frame['vars']: v = pprint(v) # Trim large blobs of data if len(v) > 4096: v = '%s… <trimmed %d bytes string>' % (v[0:4096], len(v)) frame_vars.append((k, v)) frame['vars'] = frame_vars frames[i] = frame unicode_hint = '' if self.exc_type and issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = force_str( unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace' ) from django import get_version if self.request is None: user_str = None else: try: user_str = str(self.request.user) except Exception: # request.user may raise OperationalError if the database is # unavailable, for example. user_str = '[unable to retrieve the current user]' c = { 'is_email': self.is_email, 'unicode_hint': unicode_hint, 'frames': frames, 'request': self.request, 'request_meta': self.filter.get_safe_request_meta(self.request), 'user_str': user_str, 'filtered_POST_items': list(self.filter.get_post_parameters(self.request).items()), 'settings': self.filter.get_safe_settings(), 'sys_executable': sys.executable, 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3], 'server_time': timezone.now(), 'django_version_info': get_version(), 'sys_path': sys.path, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, 'postmortem': self.postmortem, } if self.request is not None: c['request_GET_items'] = self.request.GET.items() c['request_FILES_items'] = self.request.FILES.items() c['request_COOKIES_items'] = self.request.COOKIES.items() # Check whether exception info is available if self.exc_type: c['exception_type'] = self.exc_type.__name__ if self.exc_value: c['exception_value'] = str(self.exc_value) if frames: c['lastframe'] = frames[-1] return c def get_traceback_html(self): """Return HTML version of debug 500 HTTP error page.""" with Path(CURRENT_DIR, 'templates', 'technical_500.html').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context(self.get_traceback_data(), use_l10n=False) return t.render(c) def get_traceback_text(self): """Return plain text version of debug 500 HTTP error page.""" with Path(CURRENT_DIR, 'templates', 'technical_500.txt').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False) return t.render(c) def _get_source(self, filename, loader, module_name): source = None if hasattr(loader, 'get_source'): try: source = loader.get_source(module_name) except ImportError: pass if source is not None: source = source.splitlines() if source is None: try: with open(filename, 'rb') as fp: source = fp.read().splitlines() except OSError: pass return source def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None): """ Return context_lines before and after lineno from file. Return (pre_context_lineno, pre_context, context_line, post_context). """ source = self._get_source(filename, loader, module_name) if source is None: return None, [], None, [] # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a # string, then we should do that ourselves. if isinstance(source[0], bytes): encoding = 'ascii' for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (https://www.python.org/dev/peps/pep-0263/) match = re.search(br'coding[:=]\s*([-\w.]+)', line) if match: encoding = match.group(1).decode('ascii') break source = [str(sline, encoding, 'replace') for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines try: pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1:upper_bound] except IndexError: return None, [], None, [] return lower_bound, pre_context, context_line, post_context def get_traceback_frames(self): def explicit_or_implicit_cause(exc_value): explicit = getattr(exc_value, '__cause__', None) implicit = getattr(exc_value, '__context__', None) return explicit or implicit # Get the exception and all its causes exceptions = [] exc_value = self.exc_value while exc_value: exceptions.append(exc_value) exc_value = explicit_or_implicit_cause(exc_value) if exc_value in exceptions: # Avoid infinite loop if there's a cyclic reference (#29393). break frames = [] # No exceptions were supplied to ExceptionReporter if not exceptions: return frames # In case there's just one exception, take the traceback from self.tb exc_value = exceptions.pop() tb = self.tb if not exceptions else exc_value.__traceback__ while tb is not None: # Support for __traceback_hide__ which is used by a few libraries # to hide internal frames. if tb.tb_frame.f_locals.get('__traceback_hide__'): tb = tb.tb_next continue filename = tb.tb_frame.f_code.co_filename function = tb.tb_frame.f_code.co_name lineno = tb.tb_lineno - 1 loader = tb.tb_frame.f_globals.get('__loader__') module_name = tb.tb_frame.f_globals.get('__name__') or '' pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file( filename, lineno, 7, loader, module_name, ) if pre_context_lineno is None: pre_context_lineno = lineno pre_context = [] context_line = '<source code not available>' post_context = [] frames.append({ 'exc_cause': explicit_or_implicit_cause(exc_value), 'exc_cause_explicit': getattr(exc_value, '__cause__', True), 'tb': tb, 'type': 'django' if module_name.startswith('django.') else 'user', 'filename': filename, 'function': function, 'lineno': lineno + 1, 'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame), 'id': id(tb), 'pre_context': pre_context, 'context_line': context_line, 'post_context': post_context, 'pre_context_lineno': pre_context_lineno + 1, }) # If the traceback for current exception is consumed, try the # other exception. if not tb.tb_next and exceptions: exc_value = exceptions.pop() tb = exc_value.__traceback__ else: tb = tb.tb_next return frames def technical_404_response(request, exception): """Create a technical 404 error response. `exception` is the Http404.""" try: error_url = exception.args[0]['path'] except (IndexError, TypeError, KeyError): error_url = request.path_info[1:] # Trim leading slash try: tried = exception.args[0]['tried'] except (IndexError, TypeError, KeyError): tried = [] else: if (not tried or ( # empty URLconf request.path == '/' and len(tried) == 1 and # default URLconf len(tried[0]) == 1 and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin' )): return default_urlconf(request) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ caller = '' try: resolver_match = resolve(request.path) except Http404: pass else: obj = resolver_match.func if hasattr(obj, '__name__'): caller = obj.__name__ elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'): caller = obj.__class__.__name__ if hasattr(obj, '__module__'): module = obj.__module__ caller = '%s.%s' % (module, caller) with Path(CURRENT_DIR, 'templates', 'technical_404.html').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) reporter_filter = get_default_exception_reporter_filter() c = Context({ 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': error_url, 'urlpatterns': tried, 'reason': str(exception), 'request': request, 'settings': reporter_filter.get_safe_settings(), 'raising_view_name': caller, }) return HttpResponseNotFound(t.render(c), content_type='text/html') def default_urlconf(request): """Create an empty URLconf 404 error response.""" with Path(CURRENT_DIR, 'templates', 'default_urlconf.html').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context({ 'version': get_docs_version(), }) return HttpResponse(t.render(c), content_type='text/html')
bdb602803296753482ab9fc0858cddc824003f503d7a021b9cee6f3bb40439f1
""" Default Django settings. Override these with settings in the module pointed to by the DJANGO_SETTINGS_MODULE environment variable. """ # This is defined here as a do-nothing function because we can't import # django.utils.translation -- that module depends on the settings. def gettext_noop(s): return s #################### # CORE # #################### DEBUG = False # Whether the framework should propagate raw exceptions rather than catching # them. This is useful under some testing situations and should never be used # on a live site. DEBUG_PROPAGATE_EXCEPTIONS = False # People who get code error notifications. # In the format [('Full Name', '[email protected]'), ('Full Name', '[email protected]')] ADMINS = [] # List of IP addresses, as strings, that: # * See debug comments, when DEBUG is true # * Receive x-headers INTERNAL_IPS = [] # Hosts/domain names that are valid for this site. # "*" matches anything, ".example.com" matches example.com and all subdomains ALLOWED_HOSTS = [] # Local time zone for this installation. All choices can be found here: # https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all # systems may support all possibilities). When USE_TZ is True, this is # interpreted as the default user time zone. TIME_ZONE = 'America/Chicago' # If you set this to True, Django will use timezone-aware datetimes. USE_TZ = False # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # Languages we provide translations for, out of the box. LANGUAGES = [ ('af', gettext_noop('Afrikaans')), ('ar', gettext_noop('Arabic')), ('ar-dz', gettext_noop('Algerian Arabic')), ('ast', gettext_noop('Asturian')), ('az', gettext_noop('Azerbaijani')), ('bg', gettext_noop('Bulgarian')), ('be', gettext_noop('Belarusian')), ('bn', gettext_noop('Bengali')), ('br', gettext_noop('Breton')), ('bs', gettext_noop('Bosnian')), ('ca', gettext_noop('Catalan')), ('cs', gettext_noop('Czech')), ('cy', gettext_noop('Welsh')), ('da', gettext_noop('Danish')), ('de', gettext_noop('German')), ('dsb', gettext_noop('Lower Sorbian')), ('el', gettext_noop('Greek')), ('en', gettext_noop('English')), ('en-au', gettext_noop('Australian English')), ('en-gb', gettext_noop('British English')), ('eo', gettext_noop('Esperanto')), ('es', gettext_noop('Spanish')), ('es-ar', gettext_noop('Argentinian Spanish')), ('es-co', gettext_noop('Colombian Spanish')), ('es-mx', gettext_noop('Mexican Spanish')), ('es-ni', gettext_noop('Nicaraguan Spanish')), ('es-ve', gettext_noop('Venezuelan Spanish')), ('et', gettext_noop('Estonian')), ('eu', gettext_noop('Basque')), ('fa', gettext_noop('Persian')), ('fi', gettext_noop('Finnish')), ('fr', gettext_noop('French')), ('fy', gettext_noop('Frisian')), ('ga', gettext_noop('Irish')), ('gd', gettext_noop('Scottish Gaelic')), ('gl', gettext_noop('Galician')), ('he', gettext_noop('Hebrew')), ('hi', gettext_noop('Hindi')), ('hr', gettext_noop('Croatian')), ('hsb', gettext_noop('Upper Sorbian')), ('hu', gettext_noop('Hungarian')), ('hy', gettext_noop('Armenian')), ('ia', gettext_noop('Interlingua')), ('id', gettext_noop('Indonesian')), ('io', gettext_noop('Ido')), ('is', gettext_noop('Icelandic')), ('it', gettext_noop('Italian')), ('ja', gettext_noop('Japanese')), ('ka', gettext_noop('Georgian')), ('kab', gettext_noop('Kabyle')), ('kk', gettext_noop('Kazakh')), ('km', gettext_noop('Khmer')), ('kn', gettext_noop('Kannada')), ('ko', gettext_noop('Korean')), ('lb', gettext_noop('Luxembourgish')), ('lt', gettext_noop('Lithuanian')), ('lv', gettext_noop('Latvian')), ('mk', gettext_noop('Macedonian')), ('ml', gettext_noop('Malayalam')), ('mn', gettext_noop('Mongolian')), ('mr', gettext_noop('Marathi')), ('my', gettext_noop('Burmese')), ('nb', gettext_noop('Norwegian Bokmål')), ('ne', gettext_noop('Nepali')), ('nl', gettext_noop('Dutch')), ('nn', gettext_noop('Norwegian Nynorsk')), ('os', gettext_noop('Ossetic')), ('pa', gettext_noop('Punjabi')), ('pl', gettext_noop('Polish')), ('pt', gettext_noop('Portuguese')), ('pt-br', gettext_noop('Brazilian Portuguese')), ('ro', gettext_noop('Romanian')), ('ru', gettext_noop('Russian')), ('sk', gettext_noop('Slovak')), ('sl', gettext_noop('Slovenian')), ('sq', gettext_noop('Albanian')), ('sr', gettext_noop('Serbian')), ('sr-latn', gettext_noop('Serbian Latin')), ('sv', gettext_noop('Swedish')), ('sw', gettext_noop('Swahili')), ('ta', gettext_noop('Tamil')), ('te', gettext_noop('Telugu')), ('th', gettext_noop('Thai')), ('tr', gettext_noop('Turkish')), ('tt', gettext_noop('Tatar')), ('udm', gettext_noop('Udmurt')), ('uk', gettext_noop('Ukrainian')), ('ur', gettext_noop('Urdu')), ('uz', gettext_noop('Uzbek')), ('vi', gettext_noop('Vietnamese')), ('zh-hans', gettext_noop('Simplified Chinese')), ('zh-hant', gettext_noop('Traditional Chinese')), ] # Languages using BiDi (right-to-left) layout LANGUAGES_BIDI = ["he", "ar", "ar-dz", "fa", "ur"] # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True LOCALE_PATHS = [] # Settings for language cookie LANGUAGE_COOKIE_NAME = 'django_language' LANGUAGE_COOKIE_AGE = None LANGUAGE_COOKIE_DOMAIN = None LANGUAGE_COOKIE_PATH = '/' LANGUAGE_COOKIE_SECURE = False LANGUAGE_COOKIE_HTTPONLY = False LANGUAGE_COOKIE_SAMESITE = None # If you set this to True, Django will format dates, numbers and calendars # according to user current locale. USE_L10N = False # Not-necessarily-technical managers of the site. They get broken link # notifications and other various emails. MANAGERS = ADMINS # Default charset to use for all HttpResponse objects, if a MIME type isn't # manually specified. It's used to construct the Content-Type header. DEFAULT_CHARSET = 'utf-8' # Email address that error messages come from. SERVER_EMAIL = 'root@localhost' # Database connection info. If left empty, will default to the dummy backend. DATABASES = {} # Classes used to implement DB routing behavior. DATABASE_ROUTERS = [] # The email backend to use. For possible shortcuts see django.core.mail. # The default is to use the SMTP backend. # Third-party backends can be specified by providing a Python path # to a module that defines an EmailBackend class. EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # Host for sending email. EMAIL_HOST = 'localhost' # Port for sending email. EMAIL_PORT = 25 # Whether to send SMTP 'Date' header in the local time zone or in UTC. EMAIL_USE_LOCALTIME = False # Optional SMTP authentication information for EMAIL_HOST. EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_USE_TLS = False EMAIL_USE_SSL = False EMAIL_SSL_CERTFILE = None EMAIL_SSL_KEYFILE = None EMAIL_TIMEOUT = None # List of strings representing installed apps. INSTALLED_APPS = [] TEMPLATES = [] # Default form rendering class. FORM_RENDERER = 'django.forms.renderers.DjangoTemplates' # Default email address to use for various automated correspondence from # the site managers. DEFAULT_FROM_EMAIL = 'webmaster@localhost' # Subject-line prefix for email messages send with django.core.mail.mail_admins # or ...mail_managers. Make sure to include the trailing space. EMAIL_SUBJECT_PREFIX = '[Django] ' # Whether to append trailing slashes to URLs. APPEND_SLASH = True # Whether to prepend the "www." subdomain to URLs that don't have it. PREPEND_WWW = False # Override the server-derived value of SCRIPT_NAME FORCE_SCRIPT_NAME = None # List of compiled regular expression objects representing User-Agent strings # that are not allowed to visit any page, systemwide. Use this for bad # robots/crawlers. Here are a few examples: # import re # DISALLOWED_USER_AGENTS = [ # re.compile(r'^NaverBot.*'), # re.compile(r'^EmailSiphon.*'), # re.compile(r'^SiteSucker.*'), # re.compile(r'^sohu-search'), # ] DISALLOWED_USER_AGENTS = [] ABSOLUTE_URL_OVERRIDES = {} # List of compiled regular expression objects representing URLs that need not # be reported by BrokenLinkEmailsMiddleware. Here are a few examples: # import re # IGNORABLE_404_URLS = [ # re.compile(r'^/apple-touch-icon.*\.png$'), # re.compile(r'^/favicon.ico$'), # re.compile(r'^/robots.txt$'), # re.compile(r'^/phpmyadmin/'), # re.compile(r'\.(cgi|php|pl)$'), # ] IGNORABLE_404_URLS = [] # A secret key for this particular Django installation. Used in secret-key # hashing algorithms. Set this in your settings, or Django will complain # loudly. SECRET_KEY = '' # Default file storage mechanism that holds media. DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Example: "/var/www/example.com/static/" STATIC_ROOT = None # URL that handles the static files served from STATIC_ROOT. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = None # List of upload handler classes to be applied in order. FILE_UPLOAD_HANDLERS = [ 'django.core.files.uploadhandler.MemoryFileUploadHandler', 'django.core.files.uploadhandler.TemporaryFileUploadHandler', ] # Maximum size, in bytes, of a request before it will be streamed to the # file system instead of into memory. FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Maximum size in bytes of request data (excluding file uploads) that will be # read before a SuspiciousOperation (RequestDataTooBig) is raised. DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Maximum number of GET/POST parameters that will be read before a # SuspiciousOperation (TooManyFieldsSent) is raised. DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000 # Directory in which upload streamed files will be temporarily saved. A value of # `None` will make Django use the operating system's default temporary directory # (i.e. "/tmp" on *nix systems). FILE_UPLOAD_TEMP_DIR = None # The numeric mode to set newly-uploaded files to. The value should be a mode # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_PERMISSIONS = 0o644 # The numeric mode to assign to newly-created directories, when uploading files. # The value should be a mode as you'd pass to os.chmod; # see https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_DIRECTORY_PERMISSIONS = None # Python module path where user will place custom format definition. # The directory where this setting is pointing should contain subdirectories # named as the locales, containing a formats.py file # (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use) FORMAT_MODULE_PATH = None # Default formatting for date objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'N j, Y' # Default formatting for datetime objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATETIME_FORMAT = 'N j, Y, P' # Default formatting for time objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date TIME_FORMAT = 'P' # Default formatting for date objects when only the year and month are relevant. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date YEAR_MONTH_FORMAT = 'F Y' # Default formatting for date objects when only the month and day are relevant. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date MONTH_DAY_FORMAT = 'F j' # Default short formatting for date objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATE_FORMAT = 'm/d/Y' # Default short formatting for datetime objects. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATETIME_FORMAT = 'm/d/Y P' # Default formats to be used when parsing dates from input boxes, in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATE_INPUT_FORMATS = [ '%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06' '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006' '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006' '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006' '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' ] # Default formats to be used when parsing times from input boxes, in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates TIME_INPUT_FORMATS = [ '%H:%M:%S', # '14:30:59' '%H:%M:%S.%f', # '14:30:59.000200' '%H:%M', # '14:30' ] # Default formats to be used when parsing dates and times from input boxes, # in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATETIME_INPUT_FORMATS = [ '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200' '%m/%d/%Y %H:%M', # '10/25/2006 14:30' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59' '%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200' '%m/%d/%y %H:%M', # '10/25/06 14:30' ] # First day of week, to be used on calendars # 0 means Sunday, 1 means Monday... FIRST_DAY_OF_WEEK = 0 # Decimal separator symbol DECIMAL_SEPARATOR = '.' # Boolean that sets whether to add thousand separator when formatting numbers USE_THOUSAND_SEPARATOR = False # Number of digits that will be together, when splitting them by # THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands... NUMBER_GROUPING = 0 # Thousand separator symbol THOUSAND_SEPARATOR = ',' # The tablespaces to use for each model when not specified otherwise. DEFAULT_TABLESPACE = '' DEFAULT_INDEX_TABLESPACE = '' # Default X-Frame-Options header value X_FRAME_OPTIONS = 'DENY' USE_X_FORWARDED_HOST = False USE_X_FORWARDED_PORT = False # The Python dotted path to the WSGI application that Django's internal server # (runserver) will use. If `None`, the return value of # 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same # behavior as previous versions of Django. Otherwise this should point to an # actual WSGI application object. WSGI_APPLICATION = None # If your Django app is behind a proxy that sets a header to specify secure # connections, AND that proxy ensures that user-submitted headers with the # same name are ignored (so that people can't spoof it), set this value to # a tuple of (header_name, header_value). For any requests that come in with # that header/value, request.is_secure() will return True. # WARNING! Only set this if you fully understand what you're doing. Otherwise, # you may be opening yourself up to a security risk. SECURE_PROXY_SSL_HEADER = None ############## # MIDDLEWARE # ############## # List of middleware to use. Order is important; in the request phase, these # middleware will be applied in the order given, and in the response # phase the middleware will be applied in reverse order. MIDDLEWARE = [] ############ # SESSIONS # ############ # Cache to store session data if using the cache session backend. SESSION_CACHE_ALIAS = 'default' # Cookie name. This can be whatever you want. SESSION_COOKIE_NAME = 'sessionid' # Age of cookie, in seconds (default: 2 weeks). SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # A string like "example.com", or None for standard domain cookie. SESSION_COOKIE_DOMAIN = None # Whether the session cookie should be secure (https:// only). SESSION_COOKIE_SECURE = False # The path of the session cookie. SESSION_COOKIE_PATH = '/' # Whether to use the HttpOnly flag. SESSION_COOKIE_HTTPONLY = True # Whether to set the flag restricting cookie leaks on cross-site requests. # This can be 'Lax', 'Strict', or None to disable the flag. SESSION_COOKIE_SAMESITE = 'Lax' # Whether to save the session data on every request. SESSION_SAVE_EVERY_REQUEST = False # Whether a user's session cookie expires when the Web browser is closed. SESSION_EXPIRE_AT_BROWSER_CLOSE = False # The module to store session data SESSION_ENGINE = 'django.contrib.sessions.backends.db' # Directory to store session files if using the file session module. If None, # the backend will use a sensible default. SESSION_FILE_PATH = None # class to serialize session data SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' ######### # CACHE # ######### # The cache backends to use. CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } CACHE_MIDDLEWARE_KEY_PREFIX = '' CACHE_MIDDLEWARE_SECONDS = 600 CACHE_MIDDLEWARE_ALIAS = 'default' ################## # AUTHENTICATION # ################## AUTH_USER_MODEL = 'auth.User' AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend'] LOGIN_URL = '/accounts/login/' LOGIN_REDIRECT_URL = '/accounts/profile/' LOGOUT_REDIRECT_URL = None # The number of days a password reset link is valid for PASSWORD_RESET_TIMEOUT_DAYS = 3 # The minimum number of seconds a password reset link is valid for # (default: 3 days). PASSWORD_RESET_TIMEOUT = 60 * 60 * 24 * 3 # the first hasher in this list is the preferred algorithm. any # password using different algorithms will be converted automatically # upon login PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', ] AUTH_PASSWORD_VALIDATORS = [] ########### # SIGNING # ########### SIGNING_BACKEND = 'django.core.signing.TimestampSigner' ######## # CSRF # ######## # Dotted path to callable to be used as view when a request is # rejected by the CSRF middleware. CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure' # Settings for CSRF cookie. CSRF_COOKIE_NAME = 'csrftoken' CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52 CSRF_COOKIE_DOMAIN = None CSRF_COOKIE_PATH = '/' CSRF_COOKIE_SECURE = False CSRF_COOKIE_HTTPONLY = False CSRF_COOKIE_SAMESITE = 'Lax' CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN' CSRF_TRUSTED_ORIGINS = [] CSRF_USE_SESSIONS = False ############ # MESSAGES # ############ # Class to use as messages backend MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage' # Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within # django.contrib.messages to avoid imports in this settings file. ########### # LOGGING # ########### # The callable to use to configure logging LOGGING_CONFIG = 'logging.config.dictConfig' # Custom logging configuration. LOGGING = {} # Default exception reporter class used in case none has been # specifically assigned to the HttpRequest instance. DEFAULT_EXCEPTION_REPORTER = 'django.views.debug.ExceptionReporter' # Default exception reporter filter class used in case none has been # specifically assigned to the HttpRequest instance. DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter' ########### # TESTING # ########### # The name of the class to use to run the test suite TEST_RUNNER = 'django.test.runner.DiscoverRunner' # Apps that don't need to be serialized at test database creation time # (only apps with migrations are to start with) TEST_NON_SERIALIZED_APPS = [] ############ # FIXTURES # ############ # The list of directories to search for fixtures FIXTURE_DIRS = [] ############### # STATICFILES # ############### # A list of locations of additional static files STATICFILES_DIRS = [] # The default file storage backend used during the build process STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage' # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ] ############## # MIGRATIONS # ############## # Migration module overrides for apps, by app label. MIGRATION_MODULES = {} ################# # SYSTEM CHECKS # ################# # List of all issues generated by system checks that should be silenced. Light # issues like warnings, infos or debugs will not generate a message. Silencing # serious issues like errors and criticals does not result in hiding the # message, but Django will not stop you from e.g. running server. SILENCED_SYSTEM_CHECKS = [] ####################### # SECURITY MIDDLEWARE # ####################### SECURE_BROWSER_XSS_FILTER = False SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_HSTS_INCLUDE_SUBDOMAINS = False SECURE_HSTS_PRELOAD = False SECURE_HSTS_SECONDS = 0 SECURE_REDIRECT_EXEMPT = [] SECURE_REFERRER_POLICY = None SECURE_SSL_HOST = None SECURE_SSL_REDIRECT = False
6b24a93e241a2c36ea1f295c69eaf28532a7ed128f432a71ca4b04e9543466bb
""" Django's standard crypto functions and utilities. """ import hashlib import hmac import secrets from django.conf import settings from django.utils.encoding import force_bytes class InvalidAlgorithm(ValueError): """Algorithm is not supported by hashlib.""" pass def salted_hmac(key_salt, value, secret=None, *, algorithm='sha1'): """ Return the HMAC of 'value', using a key generated from key_salt and a secret (which defaults to settings.SECRET_KEY). Default algorithm is SHA1, but any algorithm name supported by hashlib.new() can be passed. A different key_salt should be passed in for every application of HMAC. """ if secret is None: secret = settings.SECRET_KEY key_salt = force_bytes(key_salt) secret = force_bytes(secret) try: hasher = getattr(hashlib, algorithm) except AttributeError as e: raise InvalidAlgorithm( '%r is not an algorithm accepted by the hashlib module.' % algorithm ) from e # We need to generate a derived key from our base key. We can do this by # passing the key_salt and our base key through a pseudo-random function. key = hasher(key_salt + secret).digest() # If len(key_salt + secret) > block size of the hash algorithm, the above # line is redundant and could be replaced by key = key_salt + secret, since # the hmac module does the same thing for keys longer than the block size. # However, we need to ensure that we *always* do this. return hmac.new(key, msg=force_bytes(value), digestmod=hasher) def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'): """ Return a securely generated random string. The default length of 12 with the a-z, A-Z, 0-9 character set returns a 71-bit value. log_2((26+26+10)^12) =~ 71 bits """ return ''.join(secrets.choice(allowed_chars) for i in range(length)) def constant_time_compare(val1, val2): """Return True if the two strings are equal, False otherwise.""" return secrets.compare_digest(force_bytes(val1), force_bytes(val2)) def pbkdf2(password, salt, iterations, dklen=0, digest=None): """Return the hash of password using pbkdf2.""" if digest is None: digest = hashlib.sha256 dklen = dklen or None password = force_bytes(password) salt = force_bytes(salt) return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)
2cb6e5be6a8722d5b5937f20ab106c14614e66ec61fc7d8c02b4377ed4cf8e10
import logging import logging.config # needed when logging_config doesn't start with logging.config from copy import copy from django.conf import settings from django.core import mail from django.core.mail import get_connection from django.core.management.color import color_style from django.utils.module_loading import import_string request_logger = logging.getLogger('django.request') # Default logging for Django. This sends an email to the site admins on every # HTTP 500 error. Depending on DEBUG, all other log records are either sent to # the console (DEBUG=True) or discarded (DEBUG=False) by means of the # require_debug_true filter. DEFAULT_LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse', }, 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue', }, }, 'formatters': { 'django.server': { '()': 'django.utils.log.ServerFormatter', 'format': '[{server_time}] {message}', 'style': '{', } }, 'handlers': { 'console': { 'level': 'INFO', 'filters': ['require_debug_true'], 'class': 'logging.StreamHandler', }, 'django.server': { 'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'django.server', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django': { 'handlers': ['console', 'mail_admins'], 'level': 'INFO', }, 'django.server': { 'handlers': ['django.server'], 'level': 'INFO', 'propagate': False, }, } } def configure_logging(logging_config, logging_settings): if logging_config: # First find the logging configuration function ... logging_config_func = import_string(logging_config) logging.config.dictConfig(DEFAULT_LOGGING) # ... then invoke it with the logging settings if logging_settings: logging_config_func(logging_settings) class AdminEmailHandler(logging.Handler): """An exception log handler that emails log entries to site admins. If the request is passed as the first argument to the log record, request data will be provided in the email report. """ def __init__(self, include_html=False, email_backend=None, reporter_class=None): super().__init__() self.include_html = include_html self.email_backend = email_backend self.reporter_class = import_string(reporter_class or settings.DEFAULT_EXCEPTION_REPORTER) def emit(self, record): try: request = record.request subject = '%s (%s IP): %s' % ( record.levelname, ('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS else 'EXTERNAL'), record.getMessage() ) except Exception: subject = '%s: %s' % ( record.levelname, record.getMessage() ) request = None subject = self.format_subject(subject) # Since we add a nicely formatted traceback on our own, create a copy # of the log record without the exception data. no_exc_record = copy(record) no_exc_record.exc_info = None no_exc_record.exc_text = None if record.exc_info: exc_info = record.exc_info else: exc_info = (None, record.getMessage(), None) reporter = self.reporter_class(request, is_email=True, *exc_info) message = "%s\n\n%s" % (self.format(no_exc_record), reporter.get_traceback_text()) html_message = reporter.get_traceback_html() if self.include_html else None self.send_mail(subject, message, fail_silently=True, html_message=html_message) def send_mail(self, subject, message, *args, **kwargs): mail.mail_admins(subject, message, *args, connection=self.connection(), **kwargs) def connection(self): return get_connection(backend=self.email_backend, fail_silently=True) def format_subject(self, subject): """ Escape CR and LF characters. """ return subject.replace('\n', '\\n').replace('\r', '\\r') class CallbackFilter(logging.Filter): """ A logging filter that checks the return value of a given callable (which takes the record-to-be-logged as its only parameter) to decide whether to log a record. """ def __init__(self, callback): self.callback = callback def filter(self, record): if self.callback(record): return 1 return 0 class RequireDebugFalse(logging.Filter): def filter(self, record): return not settings.DEBUG class RequireDebugTrue(logging.Filter): def filter(self, record): return settings.DEBUG class ServerFormatter(logging.Formatter): def __init__(self, *args, **kwargs): self.style = color_style() super().__init__(*args, **kwargs) def format(self, record): msg = record.msg status_code = getattr(record, 'status_code', None) if status_code: if 200 <= status_code < 300: # Put 2XX first, since it should be the common case msg = self.style.HTTP_SUCCESS(msg) elif 100 <= status_code < 200: msg = self.style.HTTP_INFO(msg) elif status_code == 304: msg = self.style.HTTP_NOT_MODIFIED(msg) elif 300 <= status_code < 400: msg = self.style.HTTP_REDIRECT(msg) elif status_code == 404: msg = self.style.HTTP_NOT_FOUND(msg) elif 400 <= status_code < 500: msg = self.style.HTTP_BAD_REQUEST(msg) else: # Any 5XX, or any other status code msg = self.style.HTTP_SERVER_ERROR(msg) if self.uses_server_time() and not hasattr(record, 'server_time'): record.server_time = self.formatTime(record, self.datefmt) record.msg = msg return super().format(record) def uses_server_time(self): return self._fmt.find('{server_time}') >= 0 def log_response(message, *args, response=None, request=None, logger=request_logger, level=None, exc_info=None): """ Log errors based on HttpResponse status. Log 5xx responses as errors and 4xx responses as warnings (unless a level is given as a keyword argument). The HttpResponse status_code and the request are passed to the logger's extra parameter. """ # Check if the response has already been logged. Multiple requests to log # the same response can be received in some cases, e.g., when the # response is the result of an exception and is logged at the time the # exception is caught so that the exc_info can be recorded. if getattr(response, '_has_been_logged', False): return if level is None: if response.status_code >= 500: level = 'error' elif response.status_code >= 400: level = 'warning' else: level = 'info' getattr(logger, level)( message, *args, extra={ 'status_code': response.status_code, 'request': request, }, exc_info=exc_info, ) response._has_been_logged = True
640ffcc58a109d093e04a5f79e5f7d796c5598251fbf052cde3839f7d1fe17ae
""" Cache middleware. If enabled, each Django-powered page will be cached based on URL. The canonical way to enable cache middleware is to set ``UpdateCacheMiddleware`` as your first piece of middleware, and ``FetchFromCacheMiddleware`` as the last:: MIDDLEWARE = [ 'django.middleware.cache.UpdateCacheMiddleware', ... 'django.middleware.cache.FetchFromCacheMiddleware' ] This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run last during the response phase, which processes middleware bottom-up; ``FetchFromCacheMiddleware`` needs to run last during the request phase, which processes middleware top-down. The single-class ``CacheMiddleware`` can be used for some simple sites. However, if any other piece of middleware needs to affect the cache key, you'll need to use the two-part ``UpdateCacheMiddleware`` and ``FetchFromCacheMiddleware``. This'll most often happen when you're using Django's ``LocaleMiddleware``. More details about how the caching works: * Only GET or HEAD-requests with status code 200 are cached. * The number of seconds each page is stored for is set by the "max-age" section of the response's "Cache-Control" header, falling back to the CACHE_MIDDLEWARE_SECONDS setting if the section was not found. * This middleware expects that a HEAD request is answered with the same response headers exactly like the corresponding GET request. * When a hit occurs, a shallow copy of the original response object is returned from process_request. * Pages will be cached based on the contents of the request headers listed in the response's "Vary" header. * This middleware also sets ETag, Last-Modified, Expires and Cache-Control headers on the response object. """ from django.conf import settings from django.core.cache import DEFAULT_CACHE_ALIAS, caches from django.utils.cache import ( get_cache_key, get_max_age, has_vary_header, learn_cache_key, patch_response_headers, ) from django.utils.deprecation import MiddlewareMixin class UpdateCacheMiddleware(MiddlewareMixin): """ Response-phase cache middleware that updates the cache if the response is cacheable. Must be used as part of the two-part update/fetch cache middleware. UpdateCacheMiddleware must be the first piece of middleware in MIDDLEWARE so that it'll get called last during the response phase. """ def __init__(self, get_response=None): self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS self.page_timeout = None self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS self.cache = caches[self.cache_alias] self.get_response = get_response def _should_update_cache(self, request, response): return hasattr(request, '_cache_update_cache') and request._cache_update_cache def process_response(self, request, response): """Set the cache, if needed.""" if not self._should_update_cache(request, response): # We don't need to update the cache, just return. return response if response.streaming or response.status_code not in (200, 304): return response # Don't cache responses that set a user-specific (and maybe security # sensitive) cookie in response to a cookie-less request. if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'): return response # Don't cache a response with 'Cache-Control: private' if 'private' in response.get('Cache-Control', ()): return response # Page timeout takes precedence over the "max-age" and the default # cache timeout. timeout = self.page_timeout if timeout is None: # The timeout from the "max-age" section of the "Cache-Control" # header takes precedence over the default cache timeout. timeout = get_max_age(response) if timeout is None: timeout = self.cache_timeout elif timeout == 0: # max-age was set to 0, don't cache. return response patch_response_headers(response, timeout) if timeout and response.status_code == 200: cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache) if hasattr(response, 'render') and callable(response.render): response.add_post_render_callback( lambda r: self.cache.set(cache_key, r, timeout) ) else: self.cache.set(cache_key, response, timeout) return response class FetchFromCacheMiddleware(MiddlewareMixin): """ Request-phase cache middleware that fetches a page from the cache. Must be used as part of the two-part update/fetch cache middleware. FetchFromCacheMiddleware must be the last piece of middleware in MIDDLEWARE so that it'll get called last during the request phase. """ def __init__(self, get_response=None): self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS self.cache = caches[self.cache_alias] self.get_response = get_response def process_request(self, request): """ Check whether the page is already cached and return the cached version if available. """ if request.method not in ('GET', 'HEAD'): request._cache_update_cache = False return None # Don't bother checking the cache. # try and get the cached GET response cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache) if cache_key is None: request._cache_update_cache = True return None # No cache information available, need to rebuild. response = self.cache.get(cache_key) # if it wasn't found and we are looking for a HEAD, try looking just for that if response is None and request.method == 'HEAD': cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache) response = self.cache.get(cache_key) if response is None: request._cache_update_cache = True return None # No cache information available, need to rebuild. # hit, return cached response request._cache_update_cache = False return response class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware): """ Cache middleware that provides basic behavior for many simple sites. Also used as the hook point for the cache decorator, which is generated using the decorator-from-middleware utility. """ def __init__(self, get_response=None, cache_timeout=None, page_timeout=None, **kwargs): self.get_response = get_response # We need to differentiate between "provided, but using default value", # and "not provided". If the value is provided using a default, then # we fall back to system defaults. If it is not provided at all, # we need to use middleware defaults. try: key_prefix = kwargs['key_prefix'] if key_prefix is None: key_prefix = '' except KeyError: key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX self.key_prefix = key_prefix try: cache_alias = kwargs['cache_alias'] if cache_alias is None: cache_alias = DEFAULT_CACHE_ALIAS except KeyError: cache_alias = settings.CACHE_MIDDLEWARE_ALIAS self.cache_alias = cache_alias if cache_timeout is None: cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS self.cache_timeout = cache_timeout self.page_timeout = page_timeout self.cache = caches[self.cache_alias]
aee95948cea4df4e4f9f002f9b4b9aaafcd785e41af30bf45ca23eaf2c2f0d38
""" This module converts requested URLs to callback view functions. URLResolver is the main class here. Its resolve() method takes a URL (as a string) and returns a ResolverMatch object which provides access to all attributes of the resolved URL match. """ import functools import inspect import re import string from importlib import import_module from urllib.parse import quote from asgiref.local import Local from django.conf import settings from django.core.checks import Error, Warning from django.core.checks.urls import check_resolver from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.utils.datastructures import MultiValueDict from django.utils.functional import cached_property from django.utils.http import RFC3986_SUBDELIMS, escape_leading_slashes from django.utils.regex_helper import _lazy_re_compile, normalize from django.utils.translation import get_language from .converters import get_converter from .exceptions import NoReverseMatch, Resolver404 from .utils import get_callable class ResolverMatch: def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None, route=None): self.func = func self.args = args self.kwargs = kwargs self.url_name = url_name self.route = route # If a URLRegexResolver doesn't have a namespace or app_name, it passes # in an empty value. self.app_names = [x for x in app_names if x] if app_names else [] self.app_name = ':'.join(self.app_names) self.namespaces = [x for x in namespaces if x] if namespaces else [] self.namespace = ':'.join(self.namespaces) if not hasattr(func, '__name__'): # A class-based view self._func_path = func.__class__.__module__ + '.' + func.__class__.__name__ else: # A function-based view self._func_path = func.__module__ + '.' + func.__name__ view_path = url_name or self._func_path self.view_name = ':'.join(self.namespaces + [view_path]) def __getitem__(self, index): return (self.func, self.args, self.kwargs)[index] def __repr__(self): return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s, route=%s)" % ( self._func_path, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces, self.route, ) def get_resolver(urlconf=None): if urlconf is None: urlconf = settings.ROOT_URLCONF return _get_cached_resolver(urlconf) @functools.lru_cache(maxsize=None) def _get_cached_resolver(urlconf=None): return URLResolver(RegexPattern(r'^/'), urlconf) @functools.lru_cache(maxsize=None) def get_ns_resolver(ns_pattern, resolver, converters): # Build a namespaced resolver for the given parent URLconf pattern. # This makes it possible to have captured parameters in the parent # URLconf pattern. pattern = RegexPattern(ns_pattern) pattern.converters = dict(converters) ns_resolver = URLResolver(pattern, resolver.url_patterns) return URLResolver(RegexPattern(r'^/'), [ns_resolver]) class LocaleRegexDescriptor: def __init__(self, attr): self.attr = attr def __get__(self, instance, cls=None): """ Return a compiled regular expression based on the active language. """ if instance is None: return self # As a performance optimization, if the given regex string is a regular # string (not a lazily-translated string proxy), compile it once and # avoid per-language compilation. pattern = getattr(instance, self.attr) if isinstance(pattern, str): instance.__dict__['regex'] = instance._compile(pattern) return instance.__dict__['regex'] language_code = get_language() if language_code not in instance._regex_dict: instance._regex_dict[language_code] = instance._compile(str(pattern)) return instance._regex_dict[language_code] class CheckURLMixin: def describe(self): """ Format the URL pattern for display in warning messages. """ description = "'{}'".format(self) if self.name: description += " [name='{}']".format(self.name) return description def _check_pattern_startswith_slash(self): """ Check that the pattern does not begin with a forward slash. """ regex_pattern = self.regex.pattern if not settings.APPEND_SLASH: # Skip check as it can be useful to start a URL pattern with a slash # when APPEND_SLASH=False. return [] if regex_pattern.startswith(('/', '^/', '^\\/')) and not regex_pattern.endswith('/'): warning = Warning( "Your URL pattern {} has a route beginning with a '/'. Remove this " "slash as it is unnecessary. If this pattern is targeted in an " "include(), ensure the include() pattern has a trailing '/'.".format( self.describe() ), id="urls.W002", ) return [warning] else: return [] class RegexPattern(CheckURLMixin): regex = LocaleRegexDescriptor('_regex') def __init__(self, regex, name=None, is_endpoint=False): self._regex = regex self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = {} def match(self, path): match = self.regex.search(path) if match: # If there are any named groups, use those as kwargs, ignoring # non-named groups. Otherwise, pass all non-named arguments as # positional arguments. kwargs = match.groupdict() args = () if kwargs else match.groups() kwargs = {k: v for k, v in kwargs.items() if v is not None} return path[match.end():], args, kwargs return None def check(self): warnings = [] warnings.extend(self._check_pattern_startswith_slash()) if not self._is_endpoint: warnings.extend(self._check_include_trailing_dollar()) return warnings def _check_include_trailing_dollar(self): regex_pattern = self.regex.pattern if regex_pattern.endswith('$') and not regex_pattern.endswith(r'\$'): return [Warning( "Your URL pattern {} uses include with a route ending with a '$'. " "Remove the dollar from the route to avoid problems including " "URLs.".format(self.describe()), id='urls.W001', )] else: return [] def _compile(self, regex): """Compile and return the given regular expression.""" try: return re.compile(regex) except re.error as e: raise ImproperlyConfigured( '"%s" is not a valid regular expression: %s' % (regex, e) ) from e def __str__(self): return str(self._regex) _PATH_PARAMETER_COMPONENT_RE = _lazy_re_compile( r'<(?:(?P<converter>[^>:]+):)?(?P<parameter>\w+)>' ) def _route_to_regex(route, is_endpoint=False): """ Convert a path pattern into a regular expression. Return the regular expression and a dictionary mapping the capture names to the converters. For example, 'foo/<int:pk>' returns '^foo\\/(?P<pk>[0-9]+)' and {'pk': <django.urls.converters.IntConverter>}. """ if not set(route).isdisjoint(string.whitespace): raise ImproperlyConfigured("URL route '%s' cannot contain whitespace." % route) original_route = route parts = ['^'] converters = {} while True: match = _PATH_PARAMETER_COMPONENT_RE.search(route) if not match: parts.append(re.escape(route)) break parts.append(re.escape(route[:match.start()])) route = route[match.end():] parameter = match.group('parameter') if not parameter.isidentifier(): raise ImproperlyConfigured( "URL route '%s' uses parameter name %r which isn't a valid " "Python identifier." % (original_route, parameter) ) raw_converter = match.group('converter') if raw_converter is None: # If a converter isn't specified, the default is `str`. raw_converter = 'str' try: converter = get_converter(raw_converter) except KeyError as e: raise ImproperlyConfigured( 'URL route %r uses invalid converter %r.' % (original_route, raw_converter) ) from e converters[parameter] = converter parts.append('(?P<' + parameter + '>' + converter.regex + ')') if is_endpoint: parts.append('$') return ''.join(parts), converters class RoutePattern(CheckURLMixin): regex = LocaleRegexDescriptor('_route') def __init__(self, route, name=None, is_endpoint=False): self._route = route self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = _route_to_regex(str(route), is_endpoint)[1] def match(self, path): match = self.regex.search(path) if match: # RoutePattern doesn't allow non-named groups so args are ignored. kwargs = match.groupdict() for key, value in kwargs.items(): converter = self.converters[key] try: kwargs[key] = converter.to_python(value) except ValueError: return None return path[match.end():], (), kwargs return None def check(self): warnings = self._check_pattern_startswith_slash() route = self._route if '(?P<' in route or route.startswith('^') or route.endswith('$'): warnings.append(Warning( "Your URL pattern {} has a route that contains '(?P<', begins " "with a '^', or ends with a '$'. This was likely an oversight " "when migrating to django.urls.path().".format(self.describe()), id='2_0.W001', )) return warnings def _compile(self, route): return re.compile(_route_to_regex(route, self._is_endpoint)[0]) def __str__(self): return str(self._route) class LocalePrefixPattern: def __init__(self, prefix_default_language=True): self.prefix_default_language = prefix_default_language self.converters = {} @property def regex(self): # This is only used by reverse() and cached in _reverse_dict. return re.compile(self.language_prefix) @property def language_prefix(self): language_code = get_language() or settings.LANGUAGE_CODE if language_code == settings.LANGUAGE_CODE and not self.prefix_default_language: return '' else: return '%s/' % language_code def match(self, path): language_prefix = self.language_prefix if path.startswith(language_prefix): return path[len(language_prefix):], (), {} return None def check(self): return [] def describe(self): return "'{}'".format(self) def __str__(self): return self.language_prefix class URLPattern: def __init__(self, pattern, callback, default_args=None, name=None): self.pattern = pattern self.callback = callback # the view self.default_args = default_args or {} self.name = name def __repr__(self): return '<%s %s>' % (self.__class__.__name__, self.pattern.describe()) def check(self): warnings = self._check_pattern_name() warnings.extend(self.pattern.check()) return warnings def _check_pattern_name(self): """ Check that the pattern name does not contain a colon. """ if self.pattern.name is not None and ":" in self.pattern.name: warning = Warning( "Your URL pattern {} has a name including a ':'. Remove the colon, to " "avoid ambiguous namespace references.".format(self.pattern.describe()), id="urls.W003", ) return [warning] else: return [] def resolve(self, path): match = self.pattern.match(path) if match: new_path, args, kwargs = match # Pass any extra_kwargs as **kwargs. kwargs.update(self.default_args) return ResolverMatch(self.callback, args, kwargs, self.pattern.name, route=str(self.pattern)) @cached_property def lookup_str(self): """ A string that identifies the view (e.g. 'path.to.view_function' or 'path.to.ClassBasedView'). """ callback = self.callback if isinstance(callback, functools.partial): callback = callback.func if not hasattr(callback, '__name__'): return callback.__module__ + "." + callback.__class__.__name__ return callback.__module__ + "." + callback.__qualname__ class URLResolver: def __init__(self, pattern, urlconf_name, default_kwargs=None, app_name=None, namespace=None): self.pattern = pattern # urlconf_name is the dotted Python path to the module defining # urlpatterns. It may also be an object with an urlpatterns attribute # or urlpatterns itself. self.urlconf_name = urlconf_name self.callback = None self.default_kwargs = default_kwargs or {} self.namespace = namespace self.app_name = app_name self._reverse_dict = {} self._namespace_dict = {} self._app_dict = {} # set of dotted paths to all functions and classes that are used in # urlpatterns self._callback_strs = set() self._populated = False self._local = Local() def __repr__(self): if isinstance(self.urlconf_name, list) and self.urlconf_name: # Don't bother to output the whole list, it can be huge urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__ else: urlconf_repr = repr(self.urlconf_name) return '<%s %s (%s:%s) %s>' % ( self.__class__.__name__, urlconf_repr, self.app_name, self.namespace, self.pattern.describe(), ) def check(self): messages = [] for pattern in self.url_patterns: messages.extend(check_resolver(pattern)) messages.extend(self._check_custom_error_handlers()) return messages or self.pattern.check() def _check_custom_error_handlers(self): messages = [] # All handlers take (request, exception) arguments except handler500 # which takes (request). for status_code, num_parameters in [(400, 2), (403, 2), (404, 2), (500, 1)]: try: handler, param_dict = self.resolve_error_handler(status_code) except (ImportError, ViewDoesNotExist) as e: path = getattr(self.urlconf_module, 'handler%s' % status_code) msg = ( "The custom handler{status_code} view '{path}' could not be imported." ).format(status_code=status_code, path=path) messages.append(Error(msg, hint=str(e), id='urls.E008')) continue signature = inspect.signature(handler) args = [None] * num_parameters try: signature.bind(*args) except TypeError: msg = ( "The custom handler{status_code} view '{path}' does not " "take the correct number of arguments ({args})." ).format( status_code=status_code, path=handler.__module__ + '.' + handler.__qualname__, args='request, exception' if num_parameters == 2 else 'request', ) messages.append(Error(msg, id='urls.E007')) return messages def _populate(self): # Short-circuit if called recursively in this thread to prevent # infinite recursion. Concurrent threads may call this at the same # time and will need to continue, so set 'populating' on a # thread-local variable. if getattr(self._local, 'populating', False): return try: self._local.populating = True lookups = MultiValueDict() namespaces = {} apps = {} language_code = get_language() for url_pattern in reversed(self.url_patterns): p_pattern = url_pattern.pattern.regex.pattern if p_pattern.startswith('^'): p_pattern = p_pattern[1:] if isinstance(url_pattern, URLPattern): self._callback_strs.add(url_pattern.lookup_str) bits = normalize(url_pattern.pattern.regex.pattern) lookups.appendlist( url_pattern.callback, (bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters) ) if url_pattern.name is not None: lookups.appendlist( url_pattern.name, (bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters) ) else: # url_pattern is a URLResolver. url_pattern._populate() if url_pattern.app_name: apps.setdefault(url_pattern.app_name, []).append(url_pattern.namespace) namespaces[url_pattern.namespace] = (p_pattern, url_pattern) else: for name in url_pattern.reverse_dict: for matches, pat, defaults, converters in url_pattern.reverse_dict.getlist(name): new_matches = normalize(p_pattern + pat) lookups.appendlist( name, ( new_matches, p_pattern + pat, {**defaults, **url_pattern.default_kwargs}, {**self.pattern.converters, **url_pattern.pattern.converters, **converters} ) ) for namespace, (prefix, sub_pattern) in url_pattern.namespace_dict.items(): current_converters = url_pattern.pattern.converters sub_pattern.pattern.converters.update(current_converters) namespaces[namespace] = (p_pattern + prefix, sub_pattern) for app_name, namespace_list in url_pattern.app_dict.items(): apps.setdefault(app_name, []).extend(namespace_list) self._callback_strs.update(url_pattern._callback_strs) self._namespace_dict[language_code] = namespaces self._app_dict[language_code] = apps self._reverse_dict[language_code] = lookups self._populated = True finally: self._local.populating = False @property def reverse_dict(self): language_code = get_language() if language_code not in self._reverse_dict: self._populate() return self._reverse_dict[language_code] @property def namespace_dict(self): language_code = get_language() if language_code not in self._namespace_dict: self._populate() return self._namespace_dict[language_code] @property def app_dict(self): language_code = get_language() if language_code not in self._app_dict: self._populate() return self._app_dict[language_code] @staticmethod def _join_route(route1, route2): """Join two routes, without the starting ^ in the second route.""" if not route1: return route2 if route2.startswith('^'): route2 = route2[1:] return route1 + route2 def _is_callback(self, name): if not self._populated: self._populate() return name in self._callback_strs def resolve(self, path): path = str(path) # path may be a reverse_lazy object tried = [] match = self.pattern.match(path) if match: new_path, args, kwargs = match for pattern in self.url_patterns: try: sub_match = pattern.resolve(new_path) except Resolver404 as e: sub_tried = e.args[0].get('tried') if sub_tried is not None: tried.extend([pattern] + t for t in sub_tried) else: tried.append([pattern]) else: if sub_match: # Merge captured arguments in match with submatch sub_match_dict = {**kwargs, **self.default_kwargs} # Update the sub_match_dict with the kwargs from the sub_match. sub_match_dict.update(sub_match.kwargs) # If there are *any* named groups, ignore all non-named groups. # Otherwise, pass all non-named arguments as positional arguments. sub_match_args = sub_match.args if not sub_match_dict: sub_match_args = args + sub_match.args current_route = '' if isinstance(pattern, URLPattern) else str(pattern.pattern) return ResolverMatch( sub_match.func, sub_match_args, sub_match_dict, sub_match.url_name, [self.app_name] + sub_match.app_names, [self.namespace] + sub_match.namespaces, self._join_route(current_route, sub_match.route), ) tried.append([pattern]) raise Resolver404({'tried': tried, 'path': new_path}) raise Resolver404({'path': path}) @cached_property def urlconf_module(self): if isinstance(self.urlconf_name, str): return import_module(self.urlconf_name) else: return self.urlconf_name @cached_property def url_patterns(self): # urlconf_module might be a valid set of patterns, so we default to it patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module) try: iter(patterns) except TypeError as e: msg = ( "The included URLconf '{name}' does not appear to have any " "patterns in it. If you see valid patterns in the file then " "the issue is probably caused by a circular import." ) raise ImproperlyConfigured(msg.format(name=self.urlconf_name)) from e return patterns def resolve_error_handler(self, view_type): callback = getattr(self.urlconf_module, 'handler%s' % view_type, None) if not callback: # No handler specified in file; use lazy import, since # django.conf.urls imports this file. from django.conf import urls callback = getattr(urls, 'handler%s' % view_type) return get_callable(callback), {} def reverse(self, lookup_view, *args, **kwargs): return self._reverse_with_prefix(lookup_view, '', *args, **kwargs) def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs): if args and kwargs: raise ValueError("Don't mix *args and **kwargs in call to reverse()!") if not self._populated: self._populate() possibilities = self.reverse_dict.getlist(lookup_view) for possibility, pattern, defaults, converters in possibilities: for result, params in possibility: if args: if len(args) != len(params): continue candidate_subs = dict(zip(params, args)) else: if set(kwargs).symmetric_difference(params).difference(defaults): continue if any(kwargs.get(k, v) != v for k, v in defaults.items()): continue candidate_subs = kwargs # Convert the candidate subs to text using Converter.to_url(). text_candidate_subs = {} match = True for k, v in candidate_subs.items(): if k in converters: try: text_candidate_subs[k] = converters[k].to_url(v) except ValueError: match = False break else: text_candidate_subs[k] = str(v) if not match: continue # WSGI provides decoded URLs, without %xx escapes, and the URL # resolver operates on such URLs. First substitute arguments # without quoting to build a decoded URL and look for a match. # Then, if we have a match, redo the substitution with quoted # arguments in order to return a properly encoded URL. candidate_pat = _prefix.replace('%', '%%') + result if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % text_candidate_subs): # safe characters from `pchar` definition of RFC 3986 url = quote(candidate_pat % text_candidate_subs, safe=RFC3986_SUBDELIMS + '/~:@') # Don't allow construction of scheme relative urls. return escape_leading_slashes(url) # lookup_view can be URL name or callable, but callables are not # friendly in error messages. m = getattr(lookup_view, '__module__', None) n = getattr(lookup_view, '__name__', None) if m is not None and n is not None: lookup_view_s = "%s.%s" % (m, n) else: lookup_view_s = lookup_view patterns = [pattern for (_, pattern, _, _) in possibilities] if patterns: if args: arg_msg = "arguments '%s'" % (args,) elif kwargs: arg_msg = "keyword arguments '%s'" % (kwargs,) else: arg_msg = "no arguments" msg = ( "Reverse for '%s' with %s not found. %d pattern(s) tried: %s" % (lookup_view_s, arg_msg, len(patterns), patterns) ) else: msg = ( "Reverse for '%(view)s' not found. '%(view)s' is not " "a valid view function or pattern name." % {'view': lookup_view_s} ) raise NoReverseMatch(msg)
48271e76e1816ba6d648179b9b380b4fcd8b10218c5d3aad8c93f5c694fbe922
""" Helper functions for creating Form classes from Django models and database field objects. """ from itertools import chain from django.core.exceptions import ( NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError, ) from django.forms.fields import ChoiceField, Field from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass from django.forms.formsets import BaseFormSet, formset_factory from django.forms.utils import ErrorList from django.forms.widgets import ( HiddenInput, MultipleHiddenInput, SelectMultiple, ) from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext, gettext_lazy as _ __all__ = ( 'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model', 'ModelChoiceField', 'ModelMultipleChoiceField', 'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory', 'BaseInlineFormSet', 'inlineformset_factory', 'modelform_factory', ) ALL_FIELDS = '__all__' def construct_instance(form, instance, fields=None, exclude=None): """ Construct and return a model instance from the bound ``form``'s ``cleaned_data``, but do not save the returned instance to the database. """ from django.db import models opts = instance._meta cleaned_data = form.cleaned_data file_field_list = [] for f in opts.fields: if not f.editable or isinstance(f, models.AutoField) \ or f.name not in cleaned_data: continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue # Leave defaults for fields that aren't in POST data, except for # checkbox inputs because they don't appear in POST data if not checked. if ( f.has_default() and form[f.name].field.widget.value_omitted_from_data(form.data, form.files, form.add_prefix(f.name)) and cleaned_data.get(f.name) in form[f.name].field.empty_values ): continue # Defer saving file-type fields until after the other fields, so a # callable upload_to can use the values from other fields. if isinstance(f, models.FileField): file_field_list.append(f) else: f.save_form_data(instance, cleaned_data[f.name]) for f in file_field_list: f.save_form_data(instance, cleaned_data[f.name]) return instance # ModelForms ################################################################# def model_to_dict(instance, fields=None, exclude=None): """ Return a dict containing the data in ``instance`` suitable for passing as a Form's ``initial`` keyword argument. ``fields`` is an optional list of field names. If provided, return only the named. ``exclude`` is an optional list of field names. If provided, exclude the named from the returned dict, even if they are listed in the ``fields`` argument. """ opts = instance._meta data = {} for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many): if not getattr(f, 'editable', False): continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue data[f.name] = f.value_from_object(instance) return data def apply_limit_choices_to_to_formfield(formfield): """Apply limit_choices_to to the formfield's queryset if needed.""" if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'): limit_choices_to = formfield.get_limit_choices_to() if limit_choices_to is not None: formfield.queryset = formfield.queryset.complex_filter(limit_choices_to) def fields_for_model(model, fields=None, exclude=None, widgets=None, formfield_callback=None, localized_fields=None, labels=None, help_texts=None, error_messages=None, field_classes=None, *, apply_limit_choices_to=True): """ Return a dictionary containing form fields for the given model. ``fields`` is an optional list of field names. If provided, return only the named fields. ``exclude`` is an optional list of field names. If provided, exclude the named fields from the returned fields, even if they are listed in the ``fields`` argument. ``widgets`` is a dictionary of model field names mapped to a widget. ``formfield_callback`` is a callable that takes a model field and returns a form field. ``localized_fields`` is a list of names of fields which should be localized. ``labels`` is a dictionary of model field names mapped to a label. ``help_texts`` is a dictionary of model field names mapped to a help text. ``error_messages`` is a dictionary of model field names mapped to a dictionary of error messages. ``field_classes`` is a dictionary of model field names mapped to a form field class. ``apply_limit_choices_to`` is a boolean indicating if limit_choices_to should be applied to a field's queryset. """ field_dict = {} ignored = [] opts = model._meta # Avoid circular import from django.db.models import Field as ModelField sortable_private_fields = [f for f in opts.private_fields if isinstance(f, ModelField)] for f in sorted(chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many)): if not getattr(f, 'editable', False): if (fields is not None and f.name in fields and (exclude is None or f.name not in exclude)): raise FieldError( "'%s' cannot be specified for %s model form as it is a non-editable field" % ( f.name, model.__name__) ) continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue kwargs = {} if widgets and f.name in widgets: kwargs['widget'] = widgets[f.name] if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields): kwargs['localize'] = True if labels and f.name in labels: kwargs['label'] = labels[f.name] if help_texts and f.name in help_texts: kwargs['help_text'] = help_texts[f.name] if error_messages and f.name in error_messages: kwargs['error_messages'] = error_messages[f.name] if field_classes and f.name in field_classes: kwargs['form_class'] = field_classes[f.name] if formfield_callback is None: formfield = f.formfield(**kwargs) elif not callable(formfield_callback): raise TypeError('formfield_callback must be a function or callable') else: formfield = formfield_callback(f, **kwargs) if formfield: if apply_limit_choices_to: apply_limit_choices_to_to_formfield(formfield) field_dict[f.name] = formfield else: ignored.append(f.name) if fields: field_dict = { f: field_dict.get(f) for f in fields if (not exclude or f not in exclude) and f not in ignored } return field_dict class ModelFormOptions: def __init__(self, options=None): self.model = getattr(options, 'model', None) self.fields = getattr(options, 'fields', None) self.exclude = getattr(options, 'exclude', None) self.widgets = getattr(options, 'widgets', None) self.localized_fields = getattr(options, 'localized_fields', None) self.labels = getattr(options, 'labels', None) self.help_texts = getattr(options, 'help_texts', None) self.error_messages = getattr(options, 'error_messages', None) self.field_classes = getattr(options, 'field_classes', None) class ModelFormMetaclass(DeclarativeFieldsMetaclass): def __new__(mcs, name, bases, attrs): base_formfield_callback = None for b in bases: if hasattr(b, 'Meta') and hasattr(b.Meta, 'formfield_callback'): base_formfield_callback = b.Meta.formfield_callback break formfield_callback = attrs.pop('formfield_callback', base_formfield_callback) new_class = super().__new__(mcs, name, bases, attrs) if bases == (BaseModelForm,): return new_class opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None)) # We check if a string was passed to `fields` or `exclude`, # which is likely to be a mistake where the user typed ('foo') instead # of ('foo',) for opt in ['fields', 'exclude', 'localized_fields']: value = getattr(opts, opt) if isinstance(value, str) and value != ALL_FIELDS: msg = ("%(model)s.Meta.%(opt)s cannot be a string. " "Did you mean to type: ('%(value)s',)?" % { 'model': new_class.__name__, 'opt': opt, 'value': value, }) raise TypeError(msg) if opts.model: # If a model is defined, extract form fields from it. if opts.fields is None and opts.exclude is None: raise ImproperlyConfigured( "Creating a ModelForm without either the 'fields' attribute " "or the 'exclude' attribute is prohibited; form %s " "needs updating." % name ) if opts.fields == ALL_FIELDS: # Sentinel for fields_for_model to indicate "get the list of # fields from the model" opts.fields = None fields = fields_for_model( opts.model, opts.fields, opts.exclude, opts.widgets, formfield_callback, opts.localized_fields, opts.labels, opts.help_texts, opts.error_messages, opts.field_classes, # limit_choices_to will be applied during ModelForm.__init__(). apply_limit_choices_to=False, ) # make sure opts.fields doesn't specify an invalid field none_model_fields = {k for k, v in fields.items() if not v} missing_fields = none_model_fields.difference(new_class.declared_fields) if missing_fields: message = 'Unknown field(s) (%s) specified for %s' message = message % (', '.join(missing_fields), opts.model.__name__) raise FieldError(message) # Override default model fields with any custom declared ones # (plus, include all the other declared fields). fields.update(new_class.declared_fields) else: fields = new_class.declared_fields new_class.base_fields = fields return new_class class BaseModelForm(BaseForm): def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, instance=None, use_required_attribute=None, renderer=None): opts = self._meta if opts.model is None: raise ValueError('ModelForm has no model class specified.') if instance is None: # if we didn't get an instance, instantiate a new one self.instance = opts.model() object_data = {} else: self.instance = instance object_data = model_to_dict(instance, opts.fields, opts.exclude) # if initial was provided, it should override the values from instance if initial is not None: object_data.update(initial) # self._validate_unique will be set to True by BaseModelForm.clean(). # It is False by default so overriding self.clean() and failing to call # super will stop validate_unique from being called. self._validate_unique = False super().__init__( data, files, auto_id, prefix, object_data, error_class, label_suffix, empty_permitted, use_required_attribute=use_required_attribute, renderer=renderer, ) for formfield in self.fields.values(): apply_limit_choices_to_to_formfield(formfield) def _get_validation_exclusions(self): """ For backwards-compatibility, exclude several types of fields from model validation. See tickets #12507, #12521, #12553. """ exclude = [] # Build up a list of fields that should be excluded from model field # validation and unique checks. for f in self.instance._meta.fields: field = f.name # Exclude fields that aren't on the form. The developer may be # adding these values to the model after form validation. if field not in self.fields: exclude.append(f.name) # Don't perform model validation on fields that were defined # manually on the form and excluded via the ModelForm's Meta # class. See #12901. elif self._meta.fields and field not in self._meta.fields: exclude.append(f.name) elif self._meta.exclude and field in self._meta.exclude: exclude.append(f.name) # Exclude fields that failed form validation. There's no need for # the model fields to validate them as well. elif field in self._errors: exclude.append(f.name) # Exclude empty fields that are not required by the form, if the # underlying model field is required. This keeps the model field # from raising a required error. Note: don't exclude the field from # validation if the model field allows blanks. If it does, the blank # value may be included in a unique check, so cannot be excluded # from validation. else: form_field = self.fields[field] field_value = self.cleaned_data.get(field) if not f.blank and not form_field.required and field_value in form_field.empty_values: exclude.append(f.name) return exclude def clean(self): self._validate_unique = True return self.cleaned_data def _update_errors(self, errors): # Override any validation error messages defined at the model level # with those defined at the form level. opts = self._meta # Allow the model generated by construct_instance() to raise # ValidationError and have them handled in the same way as others. if hasattr(errors, 'error_dict'): error_dict = errors.error_dict else: error_dict = {NON_FIELD_ERRORS: errors} for field, messages in error_dict.items(): if (field == NON_FIELD_ERRORS and opts.error_messages and NON_FIELD_ERRORS in opts.error_messages): error_messages = opts.error_messages[NON_FIELD_ERRORS] elif field in self.fields: error_messages = self.fields[field].error_messages else: continue for message in messages: if (isinstance(message, ValidationError) and message.code in error_messages): message.message = error_messages[message.code] self.add_error(None, errors) def _post_clean(self): opts = self._meta exclude = self._get_validation_exclusions() # Foreign Keys being used to represent inline relationships # are excluded from basic field value validation. This is for two # reasons: firstly, the value may not be supplied (#12507; the # case of providing new values to the admin); secondly the # object being referred to may not yet fully exist (#12749). # However, these fields *must* be included in uniqueness checks, # so this can't be part of _get_validation_exclusions(). for name, field in self.fields.items(): if isinstance(field, InlineForeignKeyField): exclude.append(name) try: self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude) except ValidationError as e: self._update_errors(e) try: self.instance.full_clean(exclude=exclude, validate_unique=False) except ValidationError as e: self._update_errors(e) # Validate uniqueness if needed. if self._validate_unique: self.validate_unique() def validate_unique(self): """ Call the instance's validate_unique() method and update the form's validation errors if any were raised. """ exclude = self._get_validation_exclusions() try: self.instance.validate_unique(exclude=exclude) except ValidationError as e: self._update_errors(e) def _save_m2m(self): """ Save the many-to-many fields and generic relations for this form. """ cleaned_data = self.cleaned_data exclude = self._meta.exclude fields = self._meta.fields opts = self.instance._meta # Note that for historical reasons we want to include also # private_fields here. (GenericRelation was previously a fake # m2m field). for f in chain(opts.many_to_many, opts.private_fields): if not hasattr(f, 'save_form_data'): continue if fields and f.name not in fields: continue if exclude and f.name in exclude: continue if f.name in cleaned_data: f.save_form_data(self.instance, cleaned_data[f.name]) def save(self, commit=True): """ Save this form's self.instance object if commit=True. Otherwise, add a save_m2m() method to the form which can be called after the instance is saved manually at a later time. Return the model instance. """ if self.errors: raise ValueError( "The %s could not be %s because the data didn't validate." % ( self.instance._meta.object_name, 'created' if self.instance._state.adding else 'changed', ) ) if commit: # If committing, save the instance and the m2m data immediately. self.instance.save() self._save_m2m() else: # If not committing, add a method to the form to allow deferred # saving of m2m data. self.save_m2m = self._save_m2m return self.instance save.alters_data = True class ModelForm(BaseModelForm, metaclass=ModelFormMetaclass): pass def modelform_factory(model, form=ModelForm, fields=None, exclude=None, formfield_callback=None, widgets=None, localized_fields=None, labels=None, help_texts=None, error_messages=None, field_classes=None): """ Return a ModelForm containing form fields for the given model. You can optionally pass a `form` argument to use as a starting point for constructing the ModelForm. ``fields`` is an optional list of field names. If provided, include only the named fields in the returned fields. If omitted or '__all__', use all fields. ``exclude`` is an optional list of field names. If provided, exclude the named fields from the returned fields, even if they are listed in the ``fields`` argument. ``widgets`` is a dictionary of model field names mapped to a widget. ``localized_fields`` is a list of names of fields which should be localized. ``formfield_callback`` is a callable that takes a model field and returns a form field. ``labels`` is a dictionary of model field names mapped to a label. ``help_texts`` is a dictionary of model field names mapped to a help text. ``error_messages`` is a dictionary of model field names mapped to a dictionary of error messages. ``field_classes`` is a dictionary of model field names mapped to a form field class. """ # Create the inner Meta class. FIXME: ideally, we should be able to # construct a ModelForm without creating and passing in a temporary # inner class. # Build up a list of attributes that the Meta object will have. attrs = {'model': model} if fields is not None: attrs['fields'] = fields if exclude is not None: attrs['exclude'] = exclude if widgets is not None: attrs['widgets'] = widgets if localized_fields is not None: attrs['localized_fields'] = localized_fields if labels is not None: attrs['labels'] = labels if help_texts is not None: attrs['help_texts'] = help_texts if error_messages is not None: attrs['error_messages'] = error_messages if field_classes is not None: attrs['field_classes'] = field_classes # If parent form class already has an inner Meta, the Meta we're # creating needs to inherit from the parent's inner meta. bases = (form.Meta,) if hasattr(form, 'Meta') else () Meta = type('Meta', bases, attrs) if formfield_callback: Meta.formfield_callback = staticmethod(formfield_callback) # Give this new form class a reasonable name. class_name = model.__name__ + 'Form' # Class attributes for the new form class. form_class_attrs = { 'Meta': Meta, 'formfield_callback': formfield_callback } if (getattr(Meta, 'fields', None) is None and getattr(Meta, 'exclude', None) is None): raise ImproperlyConfigured( "Calling modelform_factory without defining 'fields' or " "'exclude' explicitly is prohibited." ) # Instantiate type(form) in order to use the same metaclass as form. return type(form)(class_name, (form,), form_class_attrs) # ModelFormSets ############################################################## class BaseModelFormSet(BaseFormSet): """ A ``FormSet`` for editing a queryset and/or adding new objects to it. """ model = None # Set of fields that must be unique among forms of this set. unique_fields = set() def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, queryset=None, *, initial=None, **kwargs): self.queryset = queryset self.initial_extra = initial super().__init__(**{'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix, **kwargs}) def initial_form_count(self): """Return the number of forms that are required in this FormSet.""" if not self.is_bound: return len(self.get_queryset()) return super().initial_form_count() def _existing_object(self, pk): if not hasattr(self, '_object_dict'): self._object_dict = {o.pk: o for o in self.get_queryset()} return self._object_dict.get(pk) def _get_to_python(self, field): """ If the field is a related field, fetch the concrete field's (that is, the ultimate pointed-to field's) to_python. """ while field.remote_field is not None: field = field.remote_field.get_related_field() return field.to_python def _construct_form(self, i, **kwargs): pk_required = i < self.initial_form_count() if pk_required: if self.is_bound: pk_key = '%s-%s' % (self.add_prefix(i), self.model._meta.pk.name) try: pk = self.data[pk_key] except KeyError: # The primary key is missing. The user may have tampered # with POST data. pass else: to_python = self._get_to_python(self.model._meta.pk) try: pk = to_python(pk) except ValidationError: # The primary key exists but is an invalid value. The # user may have tampered with POST data. pass else: kwargs['instance'] = self._existing_object(pk) else: kwargs['instance'] = self.get_queryset()[i] elif self.initial_extra: # Set initial values for extra forms try: kwargs['initial'] = self.initial_extra[i - self.initial_form_count()] except IndexError: pass form = super()._construct_form(i, **kwargs) if pk_required: form.fields[self.model._meta.pk.name].required = True return form def get_queryset(self): if not hasattr(self, '_queryset'): if self.queryset is not None: qs = self.queryset else: qs = self.model._default_manager.get_queryset() # If the queryset isn't already ordered we need to add an # artificial ordering here to make sure that all formsets # constructed from this queryset have the same form order. if not qs.ordered: qs = qs.order_by(self.model._meta.pk.name) # Removed queryset limiting here. As per discussion re: #13023 # on django-dev, max_num should not prevent existing # related objects/inlines from being displayed. self._queryset = qs return self._queryset def save_new(self, form, commit=True): """Save and return a new model instance for the given form.""" return form.save(commit=commit) def save_existing(self, form, instance, commit=True): """Save and return an existing model instance for the given form.""" return form.save(commit=commit) def delete_existing(self, obj, commit=True): """Deletes an existing model instance.""" if commit: obj.delete() def save(self, commit=True): """ Save model instances for every form, adding and changing instances as necessary, and return the list of instances. """ if not commit: self.saved_forms = [] def save_m2m(): for form in self.saved_forms: form.save_m2m() self.save_m2m = save_m2m return self.save_existing_objects(commit) + self.save_new_objects(commit) save.alters_data = True def clean(self): self.validate_unique() def validate_unique(self): # Collect unique_checks and date_checks to run from all the forms. all_unique_checks = set() all_date_checks = set() forms_to_delete = self.deleted_forms valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete] for form in valid_forms: exclude = form._get_validation_exclusions() unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude) all_unique_checks.update(unique_checks) all_date_checks.update(date_checks) errors = [] # Do each of the unique checks (unique and unique_together) for uclass, unique_check in all_unique_checks: seen_data = set() for form in valid_forms: # Get the data for the set of fields that must be unique among the forms. row_data = ( field if field in self.unique_fields else form.cleaned_data[field] for field in unique_check if field in form.cleaned_data ) # Reduce Model instances to their primary key values row_data = tuple( d._get_pk_val() if hasattr(d, '_get_pk_val') # Prevent "unhashable type: list" errors later on. else tuple(d) if isinstance(d, list) else d for d in row_data ) if row_data and None not in row_data: # if we've already seen it then we have a uniqueness failure if row_data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_unique_error_message(unique_check)) form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()]) # remove the data from the cleaned_data dict since it was invalid for field in unique_check: if field in form.cleaned_data: del form.cleaned_data[field] # mark the data as seen seen_data.add(row_data) # iterate over each of the date checks now for date_check in all_date_checks: seen_data = set() uclass, lookup, field, unique_for = date_check for form in valid_forms: # see if we have data for both fields if (form.cleaned_data and form.cleaned_data[field] is not None and form.cleaned_data[unique_for] is not None): # if it's a date lookup we need to get the data for all the fields if lookup == 'date': date = form.cleaned_data[unique_for] date_data = (date.year, date.month, date.day) # otherwise it's just the attribute on the date/datetime # object else: date_data = (getattr(form.cleaned_data[unique_for], lookup),) data = (form.cleaned_data[field],) + date_data # if we've already seen it then we have a uniqueness failure if data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_date_error_message(date_check)) form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()]) # remove the data from the cleaned_data dict since it was invalid del form.cleaned_data[field] # mark the data as seen seen_data.add(data) if errors: raise ValidationError(errors) def get_unique_error_message(self, unique_check): if len(unique_check) == 1: return gettext("Please correct the duplicate data for %(field)s.") % { "field": unique_check[0], } else: return gettext("Please correct the duplicate data for %(field)s, which must be unique.") % { "field": get_text_list(unique_check, _("and")), } def get_date_error_message(self, date_check): return gettext( "Please correct the duplicate data for %(field_name)s " "which must be unique for the %(lookup)s in %(date_field)s." ) % { 'field_name': date_check[2], 'date_field': date_check[3], 'lookup': str(date_check[1]), } def get_form_error(self): return gettext("Please correct the duplicate values below.") def save_existing_objects(self, commit=True): self.changed_objects = [] self.deleted_objects = [] if not self.initial_forms: return [] saved_instances = [] forms_to_delete = self.deleted_forms for form in self.initial_forms: obj = form.instance # If the pk is None, it means either: # 1. The object is an unexpected empty model, created by invalid # POST data such as an object outside the formset's queryset. # 2. The object was already deleted from the database. if obj.pk is None: continue if form in forms_to_delete: self.deleted_objects.append(obj) self.delete_existing(obj, commit=commit) elif form.has_changed(): self.changed_objects.append((obj, form.changed_data)) saved_instances.append(self.save_existing(form, obj, commit=commit)) if not commit: self.saved_forms.append(form) return saved_instances def save_new_objects(self, commit=True): self.new_objects = [] for form in self.extra_forms: if not form.has_changed(): continue # If someone has marked an add form for deletion, don't save the # object. if self.can_delete and self._should_delete_form(form): continue self.new_objects.append(self.save_new(form, commit=commit)) if not commit: self.saved_forms.append(form) return self.new_objects def add_fields(self, form, index): """Add a hidden field for the object's primary key.""" from django.db.models import AutoField, OneToOneField, ForeignKey self._pk_field = pk = self.model._meta.pk # If a pk isn't editable, then it won't be on the form, so we need to # add it here so we can tell which object is which when we get the # data back. Generally, pk.editable should be false, but for some # reason, auto_created pk fields and AutoField's editable attribute is # True, so check for that as well. def pk_is_not_editable(pk): return ( (not pk.editable) or (pk.auto_created or isinstance(pk, AutoField)) or ( pk.remote_field and pk.remote_field.parent_link and pk_is_not_editable(pk.remote_field.model._meta.pk) ) ) if pk_is_not_editable(pk) or pk.name not in form.fields: if form.is_bound: # If we're adding the related instance, ignore its primary key # as it could be an auto-generated default which isn't actually # in the database. pk_value = None if form.instance._state.adding else form.instance.pk else: try: if index is not None: pk_value = self.get_queryset()[index].pk else: pk_value = None except IndexError: pk_value = None if isinstance(pk, (ForeignKey, OneToOneField)): qs = pk.remote_field.model._default_manager.get_queryset() else: qs = self.model._default_manager.get_queryset() qs = qs.using(form.instance._state.db) if form._meta.widgets: widget = form._meta.widgets.get(self._pk_field.name, HiddenInput) else: widget = HiddenInput form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget) super().add_fields(form, index) def modelformset_factory(model, form=ModelForm, formfield_callback=None, formset=BaseModelFormSet, extra=1, can_delete=False, can_order=False, max_num=None, fields=None, exclude=None, widgets=None, validate_max=False, localized_fields=None, labels=None, help_texts=None, error_messages=None, min_num=None, validate_min=False, field_classes=None): """Return a FormSet class for the given Django model class.""" meta = getattr(form, 'Meta', None) if (getattr(meta, 'fields', fields) is None and getattr(meta, 'exclude', exclude) is None): raise ImproperlyConfigured( "Calling modelformset_factory without defining 'fields' or " "'exclude' explicitly is prohibited." ) form = modelform_factory(model, form=form, fields=fields, exclude=exclude, formfield_callback=formfield_callback, widgets=widgets, localized_fields=localized_fields, labels=labels, help_texts=help_texts, error_messages=error_messages, field_classes=field_classes) FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num, can_order=can_order, can_delete=can_delete, validate_min=validate_min, validate_max=validate_max) FormSet.model = model return FormSet # InlineFormSets ############################################################# class BaseInlineFormSet(BaseModelFormSet): """A formset for child objects related to a parent.""" def __init__(self, data=None, files=None, instance=None, save_as_new=False, prefix=None, queryset=None, **kwargs): if instance is None: self.instance = self.fk.remote_field.model() else: self.instance = instance self.save_as_new = save_as_new if queryset is None: queryset = self.model._default_manager if self.instance.pk is not None: qs = queryset.filter(**{self.fk.name: self.instance}) else: qs = queryset.none() self.unique_fields = {self.fk.name} super().__init__(data, files, prefix=prefix, queryset=qs, **kwargs) # Add the generated field to form._meta.fields if it's defined to make # sure validation isn't skipped on that field. if self.form._meta.fields and self.fk.name not in self.form._meta.fields: if isinstance(self.form._meta.fields, tuple): self.form._meta.fields = list(self.form._meta.fields) self.form._meta.fields.append(self.fk.name) def initial_form_count(self): if self.save_as_new: return 0 return super().initial_form_count() def _construct_form(self, i, **kwargs): form = super()._construct_form(i, **kwargs) if self.save_as_new: mutable = getattr(form.data, '_mutable', None) # Allow modifying an immutable QueryDict. if mutable is not None: form.data._mutable = True # Remove the primary key from the form's data, we are only # creating new instances form.data[form.add_prefix(self._pk_field.name)] = None # Remove the foreign key from the form's data form.data[form.add_prefix(self.fk.name)] = None if mutable is not None: form.data._mutable = mutable # Set the fk value here so that the form can do its validation. fk_value = self.instance.pk if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name: fk_value = getattr(self.instance, self.fk.remote_field.field_name) fk_value = getattr(fk_value, 'pk', fk_value) setattr(form.instance, self.fk.get_attname(), fk_value) return form @classmethod def get_default_prefix(cls): return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '') def save_new(self, form, commit=True): # Ensure the latest copy of the related instance is present on each # form (it may have been saved after the formset was originally # instantiated). setattr(form.instance, self.fk.name, self.instance) return super().save_new(form, commit=commit) def add_fields(self, form, index): super().add_fields(form, index) if self._pk_field == self.fk: name = self._pk_field.name kwargs = {'pk_field': True} else: # The foreign key field might not be on the form, so we poke at the # Model field to get the label, since we need that for error messages. name = self.fk.name kwargs = { 'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name)) } # The InlineForeignKeyField assumes that the foreign key relation is # based on the parent model's pk. If this isn't the case, set to_field # to correctly resolve the initial form value. if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name: kwargs['to_field'] = self.fk.remote_field.field_name # If we're adding a new object, ignore a parent's auto-generated key # as it will be regenerated on the save request. if self.instance._state.adding: if kwargs.get('to_field') is not None: to_field = self.instance._meta.get_field(kwargs['to_field']) else: to_field = self.instance._meta.pk if to_field.has_default(): setattr(self.instance, to_field.attname, None) form.fields[name] = InlineForeignKeyField(self.instance, **kwargs) def get_unique_error_message(self, unique_check): unique_check = [field for field in unique_check if field != self.fk.name] return super().get_unique_error_message(unique_check) def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False): """ Find and return the ForeignKey from model to parent if there is one (return None if can_fail is True and no such field exists). If fk_name is provided, assume it is the name of the ForeignKey field. Unless can_fail is True, raise an exception if there isn't a ForeignKey from model to parent_model. """ # avoid circular import from django.db.models import ForeignKey opts = model._meta if fk_name: fks_to_parent = [f for f in opts.fields if f.name == fk_name] if len(fks_to_parent) == 1: fk = fks_to_parent[0] if not isinstance(fk, ForeignKey) or \ (fk.remote_field.model != parent_model and fk.remote_field.model not in parent_model._meta.get_parent_list()): raise ValueError( "fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label) ) elif not fks_to_parent: raise ValueError( "'%s' has no field named '%s'." % (model._meta.label, fk_name) ) else: # Try to discover what the ForeignKey from model to parent_model is fks_to_parent = [ f for f in opts.fields if isinstance(f, ForeignKey) and ( f.remote_field.model == parent_model or f.remote_field.model in parent_model._meta.get_parent_list() ) ] if len(fks_to_parent) == 1: fk = fks_to_parent[0] elif not fks_to_parent: if can_fail: return raise ValueError( "'%s' has no ForeignKey to '%s'." % ( model._meta.label, parent_model._meta.label, ) ) else: raise ValueError( "'%s' has more than one ForeignKey to '%s'. You must specify " "a 'fk_name' attribute." % ( model._meta.label, parent_model._meta.label, ) ) return fk def inlineformset_factory(parent_model, model, form=ModelForm, formset=BaseInlineFormSet, fk_name=None, fields=None, exclude=None, extra=3, can_order=False, can_delete=True, max_num=None, formfield_callback=None, widgets=None, validate_max=False, localized_fields=None, labels=None, help_texts=None, error_messages=None, min_num=None, validate_min=False, field_classes=None): """ Return an ``InlineFormSet`` for the given kwargs. ``fk_name`` must be provided if ``model`` has more than one ``ForeignKey`` to ``parent_model``. """ fk = _get_foreign_key(parent_model, model, fk_name=fk_name) # enforce a max_num=1 when the foreign key to the parent model is unique. if fk.unique: max_num = 1 kwargs = { 'form': form, 'formfield_callback': formfield_callback, 'formset': formset, 'extra': extra, 'can_delete': can_delete, 'can_order': can_order, 'fields': fields, 'exclude': exclude, 'min_num': min_num, 'max_num': max_num, 'widgets': widgets, 'validate_min': validate_min, 'validate_max': validate_max, 'localized_fields': localized_fields, 'labels': labels, 'help_texts': help_texts, 'error_messages': error_messages, 'field_classes': field_classes, } FormSet = modelformset_factory(model, **kwargs) FormSet.fk = fk return FormSet # Fields ##################################################################### class InlineForeignKeyField(Field): """ A basic integer field that deals with validating the given value to a given parent instance in an inline. """ widget = HiddenInput default_error_messages = { 'invalid_choice': _('The inline value did not match the parent instance.'), } def __init__(self, parent_instance, *args, pk_field=False, to_field=None, **kwargs): self.parent_instance = parent_instance self.pk_field = pk_field self.to_field = to_field if self.parent_instance is not None: if self.to_field: kwargs["initial"] = getattr(self.parent_instance, self.to_field) else: kwargs["initial"] = self.parent_instance.pk kwargs["required"] = False super().__init__(*args, **kwargs) def clean(self, value): if value in self.empty_values: if self.pk_field: return None # if there is no value act as we did before. return self.parent_instance # ensure the we compare the values as equal types. if self.to_field: orig = getattr(self.parent_instance, self.to_field) else: orig = self.parent_instance.pk if str(value) != str(orig): raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice') return self.parent_instance def has_changed(self, initial, data): return False class ModelChoiceIteratorValue: def __init__(self, value, instance): self.value = value self.instance = instance def __str__(self): return str(self.value) def __eq__(self, other): if isinstance(other, ModelChoiceIteratorValue): other = other.value return self.value == other class ModelChoiceIterator: def __init__(self, field): self.field = field self.queryset = field.queryset def __iter__(self): if self.field.empty_label is not None: yield ("", self.field.empty_label) queryset = self.queryset # Can't use iterator() when queryset uses prefetch_related() if not queryset._prefetch_related_lookups: queryset = queryset.iterator() for obj in queryset: yield self.choice(obj) def __len__(self): # count() adds a query but uses less memory since the QuerySet results # won't be cached. In most cases, the choices will only be iterated on, # and __len__() won't be called. return self.queryset.count() + (1 if self.field.empty_label is not None else 0) def __bool__(self): return self.field.empty_label is not None or self.queryset.exists() def choice(self, obj): return ( ModelChoiceIteratorValue(self.field.prepare_value(obj), obj), self.field.label_from_instance(obj), ) class ModelChoiceField(ChoiceField): """A ChoiceField whose choices are a model QuerySet.""" # This class is a subclass of ChoiceField for purity, but it doesn't # actually use any of ChoiceField's implementation. default_error_messages = { 'invalid_choice': _('Select a valid choice. That choice is not one of' ' the available choices.'), } iterator = ModelChoiceIterator def __init__(self, queryset, *, empty_label="---------", required=True, widget=None, label=None, initial=None, help_text='', to_field_name=None, limit_choices_to=None, **kwargs): if required and (initial is not None): self.empty_label = None else: self.empty_label = empty_label # Call Field instead of ChoiceField __init__() because we don't need # ChoiceField.__init__(). Field.__init__( self, required=required, widget=widget, label=label, initial=initial, help_text=help_text, **kwargs ) self.queryset = queryset self.limit_choices_to = limit_choices_to # limit the queryset later. self.to_field_name = to_field_name def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this form field. If it is a callable, invoke it and return the result. """ if callable(self.limit_choices_to): return self.limit_choices_to() return self.limit_choices_to def __deepcopy__(self, memo): result = super(ChoiceField, self).__deepcopy__(memo) # Need to force a new ModelChoiceIterator to be created, bug #11183 if self.queryset is not None: result.queryset = self.queryset.all() return result def _get_queryset(self): return self._queryset def _set_queryset(self, queryset): self._queryset = None if queryset is None else queryset.all() self.widget.choices = self.choices queryset = property(_get_queryset, _set_queryset) # this method will be used to create object labels by the QuerySetIterator. # Override it to customize the label. def label_from_instance(self, obj): """ Convert objects into strings and generate the labels for the choices presented by this object. Subclasses can override this method to customize the display of the choices. """ return str(obj) def _get_choices(self): # If self._choices is set, then somebody must have manually set # the property self.choices. In this case, just return self._choices. if hasattr(self, '_choices'): return self._choices # Otherwise, execute the QuerySet in self.queryset to determine the # choices dynamically. Return a fresh ModelChoiceIterator that has not been # consumed. Note that we're instantiating a new ModelChoiceIterator *each* # time _get_choices() is called (and, thus, each time self.choices is # accessed) so that we can ensure the QuerySet has not been consumed. This # construct might look complicated but it allows for lazy evaluation of # the queryset. return self.iterator(self) choices = property(_get_choices, ChoiceField._set_choices) def prepare_value(self, value): if hasattr(value, '_meta'): if self.to_field_name: return value.serializable_value(self.to_field_name) else: return value.pk return super().prepare_value(value) def to_python(self, value): if value in self.empty_values: return None try: key = self.to_field_name or 'pk' if isinstance(value, self.queryset.model): value = getattr(value, key) value = self.queryset.get(**{key: value}) except (ValueError, TypeError, self.queryset.model.DoesNotExist): raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice') return value def validate(self, value): return Field.validate(self, value) def has_changed(self, initial, data): if self.disabled: return False initial_value = initial if initial is not None else '' data_value = data if data is not None else '' return str(self.prepare_value(initial_value)) != str(data_value) class ModelMultipleChoiceField(ModelChoiceField): """A MultipleChoiceField whose choices are a model QuerySet.""" widget = SelectMultiple hidden_widget = MultipleHiddenInput default_error_messages = { 'list': _('Enter a list of values.'), 'invalid_choice': _('Select a valid choice. %(value)s is not one of the' ' available choices.'), 'invalid_pk_value': _('“%(pk)s” is not a valid value.') } def __init__(self, queryset, **kwargs): super().__init__(queryset, empty_label=None, **kwargs) def to_python(self, value): if not value: return [] return list(self._check_values(value)) def clean(self, value): value = self.prepare_value(value) if self.required and not value: raise ValidationError(self.error_messages['required'], code='required') elif not self.required and not value: return self.queryset.none() if not isinstance(value, (list, tuple)): raise ValidationError(self.error_messages['list'], code='list') qs = self._check_values(value) # Since this overrides the inherited ModelChoiceField.clean # we run custom validators here self.run_validators(value) return qs def _check_values(self, value): """ Given a list of possible PK values, return a QuerySet of the corresponding objects. Raise a ValidationError if a given value is invalid (not a valid PK, not in the queryset, etc.) """ key = self.to_field_name or 'pk' # deduplicate given values to avoid creating many querysets or # requiring the database backend deduplicate efficiently. try: value = frozenset(value) except TypeError: # list of lists isn't hashable, for example raise ValidationError( self.error_messages['list'], code='list', ) for pk in value: try: self.queryset.filter(**{key: pk}) except (ValueError, TypeError): raise ValidationError( self.error_messages['invalid_pk_value'], code='invalid_pk_value', params={'pk': pk}, ) qs = self.queryset.filter(**{'%s__in' % key: value}) pks = {str(getattr(o, key)) for o in qs} for val in value: if str(val) not in pks: raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': val}, ) return qs def prepare_value(self, value): if (hasattr(value, '__iter__') and not isinstance(value, str) and not hasattr(value, '_meta')): prepare_value = super().prepare_value return [prepare_value(v) for v in value] return super().prepare_value(value) def has_changed(self, initial, data): if self.disabled: return False if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True initial_set = {str(value) for value in self.prepare_value(initial)} data_set = {str(value) for value in data} return data_set != initial_set def modelform_defines_fields(form_class): return hasattr(form_class, '_meta') and ( form_class._meta.fields is not None or form_class._meta.exclude is not None )
af54e0f50eb1ca5c50ad4927c17452747a6f5713b34091e40ce43b65751f4be2
import copy from contextlib import contextmanager from django.apps import AppConfig from django.apps.registry import Apps, apps as global_apps from django.conf import settings from django.db import models from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT from django.db.models.options import DEFAULT_NAMES, normalize_together from django.db.models.utils import make_model_tuple from django.utils.functional import cached_property from django.utils.module_loading import import_string from django.utils.version import get_docs_version from .exceptions import InvalidBasesError def _get_app_label_and_model_name(model, app_label=''): if isinstance(model, str): split = model.split('.', 1) return tuple(split) if len(split) == 2 else (app_label, split[0]) else: return model._meta.app_label, model._meta.model_name def _get_related_models(m): """Return all models that have a direct relationship to the given model.""" related_models = [ subclass for subclass in m.__subclasses__() if issubclass(subclass, models.Model) ] related_fields_models = set() for f in m._meta.get_fields(include_parents=True, include_hidden=True): if f.is_relation and f.related_model is not None and not isinstance(f.related_model, str): related_fields_models.add(f.model) related_models.append(f.related_model) # Reverse accessors of foreign keys to proxy models are attached to their # concrete proxied model. opts = m._meta if opts.proxy and m in related_fields_models: related_models.append(opts.concrete_model) return related_models def get_related_models_tuples(model): """ Return a list of typical (app_label, model_name) tuples for all related models for the given model. """ return { (rel_mod._meta.app_label, rel_mod._meta.model_name) for rel_mod in _get_related_models(model) } def get_related_models_recursive(model): """ Return all models that have a direct or indirect relationship to the given model. Relationships are either defined by explicit relational fields, like ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another model (a superclass is related to its subclasses, but not vice versa). Note, however, that a model inheriting from a concrete model is also related to its superclass through the implicit *_ptr OneToOneField on the subclass. """ seen = set() queue = _get_related_models(model) for rel_mod in queue: rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name if (rel_app_label, rel_model_name) in seen: continue seen.add((rel_app_label, rel_model_name)) queue.extend(_get_related_models(rel_mod)) return seen - {(model._meta.app_label, model._meta.model_name)} class ProjectState: """ Represent the entire project's overall state. This is the item that is passed around - do it here rather than at the app level so that cross-app FKs/etc. resolve properly. """ def __init__(self, models=None, real_apps=None): self.models = models or {} # Apps to include from main registry, usually unmigrated ones self.real_apps = real_apps or [] self.is_delayed = False def add_model(self, model_state): app_label, model_name = model_state.app_label, model_state.name_lower self.models[(app_label, model_name)] = model_state if 'apps' in self.__dict__: # hasattr would cache the property self.reload_model(app_label, model_name) def remove_model(self, app_label, model_name): del self.models[app_label, model_name] if 'apps' in self.__dict__: # hasattr would cache the property self.apps.unregister_model(app_label, model_name) # Need to do this explicitly since unregister_model() doesn't clear # the cache automatically (#24513) self.apps.clear_cache() def _find_reload_model(self, app_label, model_name, delay=False): if delay: self.is_delayed = True related_models = set() try: old_model = self.apps.get_model(app_label, model_name) except LookupError: pass else: # Get all relations to and from the old model before reloading, # as _meta.apps may change if delay: related_models = get_related_models_tuples(old_model) else: related_models = get_related_models_recursive(old_model) # Get all outgoing references from the model to be rendered model_state = self.models[(app_label, model_name)] # Directly related models are the models pointed to by ForeignKeys, # OneToOneFields, and ManyToManyFields. direct_related_models = set() for name, field in model_state.fields: if field.is_relation: if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT: continue rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label) direct_related_models.add((rel_app_label, rel_model_name.lower())) # For all direct related models recursively get all related models. related_models.update(direct_related_models) for rel_app_label, rel_model_name in direct_related_models: try: rel_model = self.apps.get_model(rel_app_label, rel_model_name) except LookupError: pass else: if delay: related_models.update(get_related_models_tuples(rel_model)) else: related_models.update(get_related_models_recursive(rel_model)) # Include the model itself related_models.add((app_label, model_name)) return related_models def reload_model(self, app_label, model_name, delay=False): if 'apps' in self.__dict__: # hasattr would cache the property related_models = self._find_reload_model(app_label, model_name, delay) self._reload(related_models) def reload_models(self, models, delay=True): if 'apps' in self.__dict__: # hasattr would cache the property related_models = set() for app_label, model_name in models: related_models.update(self._find_reload_model(app_label, model_name, delay)) self._reload(related_models) def _reload(self, related_models): # Unregister all related models with self.apps.bulk_update(): for rel_app_label, rel_model_name in related_models: self.apps.unregister_model(rel_app_label, rel_model_name) states_to_be_rendered = [] # Gather all models states of those models that will be rerendered. # This includes: # 1. All related models of unmigrated apps for model_state in self.apps.real_models: if (model_state.app_label, model_state.name_lower) in related_models: states_to_be_rendered.append(model_state) # 2. All related models of migrated apps for rel_app_label, rel_model_name in related_models: try: model_state = self.models[rel_app_label, rel_model_name] except KeyError: pass else: states_to_be_rendered.append(model_state) # Render all models self.apps.render_multiple(states_to_be_rendered) def clone(self): """Return an exact copy of this ProjectState.""" new_state = ProjectState( models={k: v.clone() for k, v in self.models.items()}, real_apps=self.real_apps, ) if 'apps' in self.__dict__: new_state.apps = self.apps.clone() new_state.is_delayed = self.is_delayed return new_state def clear_delayed_apps_cache(self): if self.is_delayed and 'apps' in self.__dict__: del self.__dict__['apps'] @cached_property def apps(self): return StateApps(self.real_apps, self.models) @property def concrete_apps(self): self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True) return self.apps @classmethod def from_apps(cls, apps): """Take an Apps and return a ProjectState matching it.""" app_models = {} for model in apps.get_models(include_swapped=True): model_state = ModelState.from_model(model) app_models[(model_state.app_label, model_state.name_lower)] = model_state return cls(app_models) def __eq__(self, other): return self.models == other.models and set(self.real_apps) == set(other.real_apps) class AppConfigStub(AppConfig): """Stub of an AppConfig. Only provides a label and a dict of models.""" # Not used, but required by AppConfig.__init__ path = '' def __init__(self, label): self.label = label # App-label and app-name are not the same thing, so technically passing # in the label here is wrong. In practice, migrations don't care about # the app name, but we need something unique, and the label works fine. super().__init__(label, None) def import_models(self): self.models = self.apps.all_models[self.label] class StateApps(Apps): """ Subclass of the global Apps registry class to better handle dynamic model additions and removals. """ def __init__(self, real_apps, models, ignore_swappable=False): # Any apps in self.real_apps should have all their models included # in the render. We don't use the original model instances as there # are some variables that refer to the Apps object. # FKs/M2Ms from real apps are also not included as they just # mess things up with partial states (due to lack of dependencies) self.real_models = [] for app_label in real_apps: app = global_apps.get_app_config(app_label) for model in app.get_models(): self.real_models.append(ModelState.from_model(model, exclude_rels=True)) # Populate the app registry with a stub for each application. app_labels = {model_state.app_label for model_state in models.values()} app_configs = [AppConfigStub(label) for label in sorted([*real_apps, *app_labels])] super().__init__(app_configs) # These locks get in the way of copying as implemented in clone(), # which is called whenever Django duplicates a StateApps before # updating it. self._lock = None self.ready_event = None self.render_multiple([*models.values(), *self.real_models]) # There shouldn't be any operations pending at this point. from django.core.checks.model_checks import _check_lazy_references ignore = {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set() errors = _check_lazy_references(self, ignore=ignore) if errors: raise ValueError("\n".join(error.msg for error in errors)) @contextmanager def bulk_update(self): # Avoid clearing each model's cache for each change. Instead, clear # all caches when we're finished updating the model instances. ready = self.ready self.ready = False try: yield finally: self.ready = ready self.clear_cache() def render_multiple(self, model_states): # We keep trying to render the models in a loop, ignoring invalid # base errors, until the size of the unrendered models doesn't # decrease by at least one, meaning there's a base dependency loop/ # missing base. if not model_states: return # Prevent that all model caches are expired for each render. with self.bulk_update(): unrendered_models = model_states while unrendered_models: new_unrendered_models = [] for model in unrendered_models: try: model.render(self) except InvalidBasesError: new_unrendered_models.append(model) if len(new_unrendered_models) == len(unrendered_models): raise InvalidBasesError( "Cannot resolve bases for %r\nThis can happen if you are inheriting models from an " "app with migrations (e.g. contrib.auth)\n in an app with no migrations; see " "https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies " "for more" % (new_unrendered_models, get_docs_version()) ) unrendered_models = new_unrendered_models def clone(self): """Return a clone of this registry.""" clone = StateApps([], {}) clone.all_models = copy.deepcopy(self.all_models) clone.app_configs = copy.deepcopy(self.app_configs) # Set the pointer to the correct app registry. for app_config in clone.app_configs.values(): app_config.apps = clone # No need to actually clone them, they'll never change clone.real_models = self.real_models return clone def register_model(self, app_label, model): self.all_models[app_label][model._meta.model_name] = model if app_label not in self.app_configs: self.app_configs[app_label] = AppConfigStub(app_label) self.app_configs[app_label].apps = self self.app_configs[app_label].models = {} self.app_configs[app_label].models[model._meta.model_name] = model self.do_pending_operations(model) self.clear_cache() def unregister_model(self, app_label, model_name): try: del self.all_models[app_label][model_name] del self.app_configs[app_label].models[model_name] except KeyError: pass class ModelState: """ Represent a Django Model. Don't use the actual Model class as it's not designed to have its options changed - instead, mutate this one and then render it into a Model as required. Note that while you are allowed to mutate .fields, you are not allowed to mutate the Field instances inside there themselves - you must instead assign new ones, as these are not detached during a clone. """ def __init__(self, app_label, name, fields, options=None, bases=None, managers=None): self.app_label = app_label self.name = name self.fields = fields self.options = options or {} self.options.setdefault('indexes', []) self.options.setdefault('constraints', []) self.bases = bases or (models.Model,) self.managers = managers or [] # Sanity-check that fields is NOT a dict. It must be ordered. if isinstance(self.fields, dict): raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.") for name, field in fields: # Sanity-check that fields are NOT already bound to a model. if hasattr(field, 'model'): raise ValueError( 'ModelState.fields cannot be bound to a model - "%s" is.' % name ) # Sanity-check that relation fields are NOT referring to a model class. if field.is_relation and hasattr(field.related_model, '_meta'): raise ValueError( 'ModelState.fields cannot refer to a model class - "%s.to" does. ' 'Use a string reference instead.' % name ) if field.many_to_many and hasattr(field.remote_field.through, '_meta'): raise ValueError( 'ModelState.fields cannot refer to a model class - "%s.through" does. ' 'Use a string reference instead.' % name ) # Sanity-check that indexes have their name set. for index in self.options['indexes']: if not index.name: raise ValueError( "Indexes passed to ModelState require a name attribute. " "%r doesn't have one." % index ) @cached_property def name_lower(self): return self.name.lower() @classmethod def from_model(cls, model, exclude_rels=False): """Given a model, return a ModelState representing it.""" # Deconstruct the fields fields = [] for field in model._meta.local_fields: if getattr(field, "remote_field", None) and exclude_rels: continue if isinstance(field, models.OrderWrt): continue name = field.name try: fields.append((name, field.clone())) except TypeError as e: raise TypeError("Couldn't reconstruct field %s on %s: %s" % ( name, model._meta.label, e, )) if not exclude_rels: for field in model._meta.local_many_to_many: name = field.name try: fields.append((name, field.clone())) except TypeError as e: raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % ( name, model._meta.object_name, e, )) # Extract the options options = {} for name in DEFAULT_NAMES: # Ignore some special options if name in ["apps", "app_label"]: continue elif name in model._meta.original_attrs: if name == "unique_together": ut = model._meta.original_attrs["unique_together"] options[name] = set(normalize_together(ut)) elif name == "index_together": it = model._meta.original_attrs["index_together"] options[name] = set(normalize_together(it)) elif name == "indexes": indexes = [idx.clone() for idx in model._meta.indexes] for index in indexes: if not index.name: index.set_name_with_model(model) options['indexes'] = indexes elif name == 'constraints': options['constraints'] = [con.clone() for con in model._meta.constraints] else: options[name] = model._meta.original_attrs[name] # If we're ignoring relationships, remove all field-listing model # options (that option basically just means "make a stub model") if exclude_rels: for key in ["unique_together", "index_together", "order_with_respect_to"]: if key in options: del options[key] # Private fields are ignored, so remove options that refer to them. elif options.get('order_with_respect_to') in {field.name for field in model._meta.private_fields}: del options['order_with_respect_to'] def flatten_bases(model): bases = [] for base in model.__bases__: if hasattr(base, "_meta") and base._meta.abstract: bases.extend(flatten_bases(base)) else: bases.append(base) return bases # We can't rely on __mro__ directly because we only want to flatten # abstract models and not the whole tree. However by recursing on # __bases__ we may end up with duplicates and ordering issues, we # therefore discard any duplicates and reorder the bases according # to their index in the MRO. flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x)) # Make our record bases = tuple( ( base._meta.label_lower if hasattr(base, "_meta") else base ) for base in flattened_bases ) # Ensure at least one base inherits from models.Model if not any((isinstance(base, str) or issubclass(base, models.Model)) for base in bases): bases = (models.Model,) managers = [] manager_names = set() default_manager_shim = None for manager in model._meta.managers: if manager.name in manager_names: # Skip overridden managers. continue elif manager.use_in_migrations: # Copy managers usable in migrations. new_manager = copy.copy(manager) new_manager._set_creation_counter() elif manager is model._base_manager or manager is model._default_manager: # Shim custom managers used as default and base managers. new_manager = models.Manager() new_manager.model = manager.model new_manager.name = manager.name if manager is model._default_manager: default_manager_shim = new_manager else: continue manager_names.add(manager.name) managers.append((manager.name, new_manager)) # Ignore a shimmed default manager called objects if it's the only one. if managers == [('objects', default_manager_shim)]: managers = [] # Construct the new ModelState return cls( model._meta.app_label, model._meta.object_name, fields, options, bases, managers, ) def construct_managers(self): """Deep-clone the managers using deconstruction.""" # Sort all managers by their creation counter sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter) for mgr_name, manager in sorted_managers: as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct() if as_manager: qs_class = import_string(qs_path) yield mgr_name, qs_class.as_manager() else: manager_class = import_string(manager_path) yield mgr_name, manager_class(*args, **kwargs) def clone(self): """Return an exact copy of this ModelState.""" return self.__class__( app_label=self.app_label, name=self.name, fields=list(self.fields), # Since options are shallow-copied here, operations such as # AddIndex must replace their option (e.g 'indexes') rather # than mutating it. options=dict(self.options), bases=self.bases, managers=list(self.managers), ) def render(self, apps): """Create a Model object from our current state into the given apps.""" # First, make a Meta object meta_contents = {'app_label': self.app_label, 'apps': apps, **self.options} meta = type("Meta", (), meta_contents) # Then, work out our bases try: bases = tuple( (apps.get_model(base) if isinstance(base, str) else base) for base in self.bases ) except LookupError: raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,)) # Turn fields into a dict for the body, add other bits body = {name: field.clone() for name, field in self.fields} body['Meta'] = meta body['__module__'] = "__fake__" # Restore managers body.update(self.construct_managers()) # Then, make a Model object (apps.register_model is called in __new__) return type(self.name, bases, body) def get_field_by_name(self, name): for fname, field in self.fields: if fname == name: return field raise ValueError("No field called %s on model %s" % (name, self.name)) def get_index_by_name(self, name): for index in self.options['indexes']: if index.name == name: return index raise ValueError("No index named %s on model %s" % (name, self.name)) def get_constraint_by_name(self, name): for constraint in self.options['constraints']: if constraint.name == name: return constraint raise ValueError('No constraint named %s on model %s' % (name, self.name)) def __repr__(self): return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name) def __eq__(self, other): return ( (self.app_label == other.app_label) and (self.name == other.name) and (len(self.fields) == len(other.fields)) and all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:])) for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and (self.options == other.options) and (self.bases == other.bases) and (self.managers == other.managers) )
8dc9d01148bfd0cae0a98a1a253c3d2aee549084d3e669985699f3a1ac0369e1
import pkgutil import sys from importlib import import_module, reload from django.apps import apps from django.conf import settings from django.db.migrations.graph import MigrationGraph from django.db.migrations.recorder import MigrationRecorder from .exceptions import ( AmbiguityError, BadMigrationError, InconsistentMigrationHistory, NodeNotFoundError, ) MIGRATIONS_MODULE_NAME = 'migrations' class MigrationLoader: """ Load migration files from disk and their status from the database. Migration files are expected to live in the "migrations" directory of an app. Their names are entirely unimportant from a code perspective, but will probably follow the 1234_name.py convention. On initialization, this class will scan those directories, and open and read the Python files, looking for a class called Migration, which should inherit from django.db.migrations.Migration. See django.db.migrations.migration for what that looks like. Some migrations will be marked as "replacing" another set of migrations. These are loaded into a separate set of migrations away from the main ones. If all the migrations they replace are either unapplied or missing from disk, then they are injected into the main set, replacing the named migrations. Any dependency pointers to the replaced migrations are re-pointed to the new migration. This does mean that this class MUST also talk to the database as well as to disk, but this is probably fine. We're already not just operating in memory. """ def __init__(self, connection, load=True, ignore_no_migrations=False): self.connection = connection self.disk_migrations = None self.applied_migrations = None self.ignore_no_migrations = ignore_no_migrations if load: self.build_graph() @classmethod def migrations_module(cls, app_label): """ Return the path to the migrations module for the specified app_label and a boolean indicating if the module is specified in settings.MIGRATION_MODULE. """ if app_label in settings.MIGRATION_MODULES: return settings.MIGRATION_MODULES[app_label], True else: app_package_name = apps.get_app_config(app_label).name return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False def load_disk(self): """Load the migrations from all INSTALLED_APPS from disk.""" self.disk_migrations = {} self.unmigrated_apps = set() self.migrated_apps = set() for app_config in apps.get_app_configs(): # Get the migrations module directory module_name, explicit = self.migrations_module(app_config.label) if module_name is None: self.unmigrated_apps.add(app_config.label) continue was_loaded = module_name in sys.modules try: module = import_module(module_name) except ImportError as e: # I hate doing this, but I don't want to squash other import errors. # Might be better to try a directory check directly. if ((explicit and self.ignore_no_migrations) or ( not explicit and "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e))): self.unmigrated_apps.add(app_config.label) continue raise else: # Module is not a package (e.g. migrations.py). if not hasattr(module, '__path__'): self.unmigrated_apps.add(app_config.label) continue # Force a reload if it's already loaded (tests need this) if was_loaded: reload(module) migration_names = { name for _, name, is_pkg in pkgutil.iter_modules(module.__path__) if not is_pkg and name[0] not in '_~' } if migration_names or self.ignore_no_migrations: self.migrated_apps.add(app_config.label) else: self.unmigrated_apps.add(app_config.label) # Load migrations for migration_name in migration_names: migration_path = '%s.%s' % (module_name, migration_name) try: migration_module = import_module(migration_path) except ImportError as e: if 'bad magic number' in str(e): raise ImportError( "Couldn't import %r as it appears to be a stale " ".pyc file." % migration_path ) from e else: raise if not hasattr(migration_module, "Migration"): raise BadMigrationError( "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) ) self.disk_migrations[app_config.label, migration_name] = migration_module.Migration( migration_name, app_config.label, ) def get_migration(self, app_label, name_prefix): """Return the named migration or raise NodeNotFoundError.""" return self.graph.nodes[app_label, name_prefix] def get_migration_by_prefix(self, app_label, name_prefix): """ Return the migration(s) which match the given app label and name_prefix. """ # Do the search results = [] for migration_app_label, migration_name in self.disk_migrations: if migration_app_label == app_label and migration_name.startswith(name_prefix): results.append((migration_app_label, migration_name)) if len(results) > 1: raise AmbiguityError( "There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix) ) elif not results: raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix)) else: return self.disk_migrations[results[0]] def check_key(self, key, current_app): if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph: return key # Special-case __first__, which means "the first migration" for # migrated apps, and is ignored for unmigrated apps. It allows # makemigrations to declare dependencies on apps before they even have # migrations. if key[0] == current_app: # Ignore __first__ references to the same app (#22325) return if key[0] in self.unmigrated_apps: # This app isn't migrated, but something depends on it. # The models will get auto-added into the state, though # so we're fine. return if key[0] in self.migrated_apps: try: if key[1] == "__first__": return self.graph.root_nodes(key[0])[0] else: # "__latest__" return self.graph.leaf_nodes(key[0])[0] except IndexError: if self.ignore_no_migrations: return None else: raise ValueError("Dependency on app with no migrations: %s" % key[0]) raise ValueError("Dependency on unknown app: %s" % key[0]) def add_internal_dependencies(self, key, migration): """ Internal dependencies need to be added first to ensure `__first__` dependencies find the correct root node. """ for parent in migration.dependencies: # Ignore __first__ references to the same app. if parent[0] == key[0] and parent[1] != '__first__': self.graph.add_dependency(migration, key, parent, skip_validation=True) def add_external_dependencies(self, key, migration): for parent in migration.dependencies: # Skip internal dependencies if key[0] == parent[0]: continue parent = self.check_key(parent, key[0]) if parent is not None: self.graph.add_dependency(migration, key, parent, skip_validation=True) for child in migration.run_before: child = self.check_key(child, key[0]) if child is not None: self.graph.add_dependency(migration, child, key, skip_validation=True) def build_graph(self): """ Build a migration dependency graph using both the disk and database. You'll need to rebuild the graph if you apply migrations. This isn't usually a problem as generally migration stuff runs in a one-shot process. """ # Load disk data self.load_disk() # Load database data if self.connection is None: self.applied_migrations = {} else: recorder = MigrationRecorder(self.connection) self.applied_migrations = recorder.applied_migrations() # To start, populate the migration graph with nodes for ALL migrations # and their dependencies. Also make note of replacing migrations at this step. self.graph = MigrationGraph() self.replacements = {} for key, migration in self.disk_migrations.items(): self.graph.add_node(key, migration) # Replacing migrations. if migration.replaces: self.replacements[key] = migration for key, migration in self.disk_migrations.items(): # Internal (same app) dependencies. self.add_internal_dependencies(key, migration) # Add external dependencies now that the internal ones have been resolved. for key, migration in self.disk_migrations.items(): self.add_external_dependencies(key, migration) # Carry out replacements where possible. for key, migration in self.replacements.items(): # Get applied status of each of this migration's replacement targets. applied_statuses = [(target in self.applied_migrations) for target in migration.replaces] # Ensure the replacing migration is only marked as applied if all of # its replacement targets are. if all(applied_statuses): self.applied_migrations[key] = migration else: self.applied_migrations.pop(key, None) # A replacing migration can be used if either all or none of its # replacement targets have been applied. if all(applied_statuses) or (not any(applied_statuses)): self.graph.remove_replaced_nodes(key, migration.replaces) else: # This replacing migration cannot be used because it is partially applied. # Remove it from the graph and remap dependencies to it (#25945). self.graph.remove_replacement_node(key, migration.replaces) # Ensure the graph is consistent. try: self.graph.validate_consistency() except NodeNotFoundError as exc: # Check if the missing node could have been replaced by any squash # migration but wasn't because the squash migration was partially # applied before. In that case raise a more understandable exception # (#23556). # Get reverse replacements. reverse_replacements = {} for key, migration in self.replacements.items(): for replaced in migration.replaces: reverse_replacements.setdefault(replaced, set()).add(key) # Try to reraise exception with more detail. if exc.node in reverse_replacements: candidates = reverse_replacements.get(exc.node, set()) is_replaced = any(candidate in self.graph.nodes for candidate in candidates) if not is_replaced: tries = ', '.join('%s.%s' % c for c in candidates) raise NodeNotFoundError( "Migration {0} depends on nonexistent node ('{1}', '{2}'). " "Django tried to replace migration {1}.{2} with any of [{3}] " "but wasn't able to because some of the replaced migrations " "are already applied.".format( exc.origin, exc.node[0], exc.node[1], tries ), exc.node ) from exc raise self.graph.ensure_not_cyclic() def check_consistent_history(self, connection): """ Raise InconsistentMigrationHistory if any applied migrations have unapplied dependencies. """ recorder = MigrationRecorder(connection) applied = recorder.applied_migrations() for migration in applied: # If the migration is unknown, skip it. if migration not in self.graph.nodes: continue for parent in self.graph.node_map[migration].parents: if parent not in applied: # Skip unapplied squashed migrations that have all of their # `replaces` applied. if parent in self.replacements: if all(m in applied for m in self.replacements[parent].replaces): continue raise InconsistentMigrationHistory( "Migration {}.{} is applied before its dependency " "{}.{} on database '{}'.".format( migration[0], migration[1], parent[0], parent[1], connection.alias, ) ) def detect_conflicts(self): """ Look through the loaded graph and detect any conflicts - apps with more than one leaf migration. Return a dict of the app labels that conflict with the migration names that conflict. """ seen_apps = {} conflicting_apps = set() for app_label, migration_name in self.graph.leaf_nodes(): if app_label in seen_apps: conflicting_apps.add(app_label) seen_apps.setdefault(app_label, set()).add(migration_name) return {app_label: seen_apps[app_label] for app_label in conflicting_apps} def project_state(self, nodes=None, at_end=True): """ Return a ProjectState object representing the most recent state that the loaded migrations represent. See graph.make_state() for the meaning of "nodes" and "at_end". """ return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))
6c14753855e094a5823ad4ad27d718e48ee2d94022cb146ab7c871dd793d0687
from django.apps.registry import Apps from django.db import DatabaseError, models from django.utils.functional import classproperty from django.utils.timezone import now from .exceptions import MigrationSchemaMissing class MigrationRecorder: """ Deal with storing migration records in the database. Because this table is actually itself used for dealing with model creation, it's the one thing we can't do normally via migrations. We manually handle table creation/schema updating (using schema backend) and then have a floating model to do queries with. If a migration is unapplied its row is removed from the table. Having a row in the table always means a migration is applied. """ _migration_class = None @classproperty def Migration(cls): """ Lazy load to avoid AppRegistryNotReady if installed apps import MigrationRecorder. """ if cls._migration_class is None: class Migration(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField(default=now) class Meta: apps = Apps() app_label = 'migrations' db_table = 'django_migrations' def __str__(self): return 'Migration %s for %s' % (self.name, self.app) cls._migration_class = Migration return cls._migration_class def __init__(self, connection): self.connection = connection @property def migration_qs(self): return self.Migration.objects.using(self.connection.alias) def has_table(self): """Return True if the django_migrations table exists.""" return self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()) def ensure_schema(self): """Ensure the table exists and has the correct schema.""" # If the table's there, that's fine - we've never changed its schema # in the codebase. if self.has_table(): return # Make the table try: with self.connection.schema_editor() as editor: editor.create_model(self.Migration) except DatabaseError as exc: raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc) def applied_migrations(self): """ Return a dict mapping (app_name, migration_name) to Migration instances for all applied migrations. """ if self.has_table(): return {(migration.app, migration.name): migration for migration in self.migration_qs} else: # If the django_migrations table doesn't exist, then no migrations # are applied. return {} def record_applied(self, app, name): """Record that a migration was applied.""" self.ensure_schema() self.migration_qs.create(app=app, name=name) def record_unapplied(self, app, name): """Record that a migration was unapplied.""" self.ensure_schema() self.migration_qs.filter(app=app, name=name).delete() def flush(self): """Delete all migration records. Useful for testing migrations.""" self.migration_qs.all().delete()
09a188815137f903eeba0ada38d3e0d6cef8dafe98769b27f6709b4f53706e6d
""" The main QuerySet implementation. This provides the public API for the ORM. """ import copy import operator import warnings from collections import namedtuple from functools import lru_cache from itertools import chain from django.conf import settings from django.core import exceptions from django.db import ( DJANGO_VERSION_PICKLE_KEY, IntegrityError, NotSupportedError, connections, router, transaction, ) from django.db.models import AutoField, DateField, DateTimeField, sql from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import Collector from django.db.models.expressions import Case, Expression, F, Value, When from django.db.models.functions import Cast, Trunc from django.db.models.query_utils import FilteredRelation, Q from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE from django.db.models.utils import resolve_callables from django.utils import timezone from django.utils.functional import cached_property, partition from django.utils.version import get_version # The maximum number of results to fetch in a get() query. MAX_GET_RESULTS = 21 # The maximum number of items to display in a QuerySet.__repr__ REPR_OUTPUT_SIZE = 20 class BaseIterable: def __init__(self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): self.queryset = queryset self.chunked_fetch = chunked_fetch self.chunk_size = chunk_size class ModelIterable(BaseIterable): """Iterable that yields a model instance for each row.""" def __iter__(self): queryset = self.queryset db = queryset.db compiler = queryset.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info, compiler.annotation_col_map) model_cls = klass_info['model'] select_fields = klass_info['select_fields'] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [f[0].target.attname for f in select[model_fields_start:model_fields_end]] related_populators = get_related_populators(klass_info, select, db) known_related_objects = [ (field, related_objs, operator.attrgetter(*[ field.attname if from_field == 'self' else queryset.model._meta.get_field(from_field).attname for from_field in field.from_fields ])) for field, related_objs in queryset._known_related_objects.items() ] for row in compiler.results_iter(results): obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end]) for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model. for field, rel_objs, rel_getter in known_related_objects: # Avoid overwriting objects loaded by, e.g., select_related(). if field.is_cached(obj): continue rel_obj_id = rel_getter(obj) try: rel_obj = rel_objs[rel_obj_id] except KeyError: pass # May happen in qs1 | qs2 scenarios. else: setattr(obj, field.name, rel_obj) yield obj class ValuesIterable(BaseIterable): """ Iterable returned by QuerySet.values() that yields a dict for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] indexes = range(len(names)) for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size): yield {names[i]: row[i] for i in indexes} class ValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=False) that yields a tuple for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) if queryset._fields: # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] fields = [*queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields)] if fields != names: # Reorder according to fields. index_map = {name: idx for idx, name in enumerate(names)} rowfactory = operator.itemgetter(*[index_map[f] for f in fields]) return map( rowfactory, compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) ) return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) class NamedValuesListIterable(ValuesListIterable): """ Iterable returned by QuerySet.values_list(named=True) that yields a namedtuple for each row. """ @staticmethod @lru_cache() def create_namedtuple_class(*names): # Cache namedtuple() with @lru_cache() since it's too slow to be # called for every QuerySet evaluation. return namedtuple('Row', names) def __iter__(self): queryset = self.queryset if queryset._fields: names = queryset._fields else: query = queryset.query names = [*query.extra_select, *query.values_select, *query.annotation_select] tuple_class = self.create_namedtuple_class(*names) new = tuple.__new__ for row in super().__iter__(): yield new(tuple_class, row) class FlatValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=True) that yields single values. """ def __iter__(self): queryset = self.queryset compiler = queryset.query.get_compiler(queryset.db) for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size): yield row[0] class QuerySet: """Represent a lazy database lookup for a set of objects.""" def __init__(self, model=None, query=None, using=None, hints=None): self.model = model self._db = using self._hints = hints or {} self._query = query or sql.Query(self.model) self._result_cache = None self._sticky_filter = False self._for_write = False self._prefetch_related_lookups = () self._prefetch_done = False self._known_related_objects = {} # {rel_field: {pk: rel_obj}} self._iterable_class = ModelIterable self._fields = None self._defer_next_filter = False self._deferred_filter = None @property def query(self): if self._deferred_filter: negate, args, kwargs = self._deferred_filter self._filter_or_exclude_inplace(negate, *args, **kwargs) self._deferred_filter = None return self._query @query.setter def query(self, value): self._query = value def as_manager(cls): # Address the circular dependency between `Queryset` and `Manager`. from django.db.models.manager import Manager manager = Manager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) ######################## # PYTHON MAGIC METHODS # ######################## def __deepcopy__(self, memo): """Don't populate the QuerySet's cache.""" obj = self.__class__() for k, v in self.__dict__.items(): if k == '_result_cache': obj.__dict__[k] = None else: obj.__dict__[k] = copy.deepcopy(v, memo) return obj def __getstate__(self): # Force the cache to be fully populated. self._fetch_all() return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: get_version()} def __setstate__(self, state): msg = None pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: current_version = get_version() if current_version != pickled_version: msg = ( "Pickled queryset instance's Django version %s does not " "match the current version %s." % (pickled_version, current_version) ) else: msg = "Pickled queryset instance's Django version is not specified." if msg: warnings.warn(msg, RuntimeWarning, stacklevel=2) self.__dict__.update(state) def __repr__(self): data = list(self[:REPR_OUTPUT_SIZE + 1]) if len(data) > REPR_OUTPUT_SIZE: data[-1] = "...(remaining elements truncated)..." return '<%s %r>' % (self.__class__.__name__, data) def __len__(self): self._fetch_all() return len(self._result_cache) def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler.execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql.compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __getitem__(self, k): """Retrieve an item or slice from the set of results.""" if not isinstance(k, (int, slice)): raise TypeError( 'QuerySet indices must be integers or slices, not %s.' % type(k).__name__ ) assert ((not isinstance(k, slice) and (k >= 0)) or (isinstance(k, slice) and (k.start is None or k.start >= 0) and (k.stop is None or k.stop >= 0))), \ "Negative indexing is not supported." if self._result_cache is not None: return self._result_cache[k] if isinstance(k, slice): qs = self._chain() if k.start is not None: start = int(k.start) else: start = None if k.stop is not None: stop = int(k.stop) else: stop = None qs.query.set_limits(start, stop) return list(qs)[::k.step] if k.step else qs qs = self._chain() qs.query.set_limits(k, k + 1) qs._fetch_all() return qs._result_cache[0] def __and__(self, other): self._merge_sanity_check(other) if isinstance(other, EmptyQuerySet): return other if isinstance(self, EmptyQuerySet): return self combined = self._chain() combined._merge_known_related_objects(other) combined.query.combine(other.query, sql.AND) return combined def __or__(self, other): self._merge_sanity_check(other) if isinstance(self, EmptyQuerySet): return other if isinstance(other, EmptyQuerySet): return self query = self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values('pk')) combined = query._chain() combined._merge_known_related_objects(other) if not other.query.can_filter(): other = other.model._base_manager.filter(pk__in=other.values('pk')) combined.query.combine(other.query, sql.OR) return combined #################################### # METHODS THAT DO DATABASE QUERIES # #################################### def _iterator(self, use_chunked_fetch, chunk_size): yield from self._iterable_class(self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size) def iterator(self, chunk_size=2000): """ An iterator over the results from applying this QuerySet to the database. """ if chunk_size <= 0: raise ValueError('Chunk size must be strictly positive.') use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS') return self._iterator(use_chunked_fetch, chunk_size) def aggregate(self, *args, **kwargs): """ Return a dictionary containing the calculations (aggregation) over the current queryset. If args is present the expression is passed as a kwarg using the Aggregate object's default alias. """ if self.query.distinct_fields: raise NotImplementedError("aggregate() + distinct(fields) not implemented.") self._validate_values_are_expressions((*args, *kwargs.values()), method_name='aggregate') for arg in args: # The default_alias property raises TypeError if default_alias # can't be set automatically or AttributeError if it isn't an # attribute. try: arg.default_alias except (AttributeError, TypeError): raise TypeError("Complex aggregates require an alias") kwargs[arg.default_alias] = arg query = self.query.chain() for (alias, aggregate_expr) in kwargs.items(): query.add_annotation(aggregate_expr, alias, is_summary=True) if not query.annotations[alias].contains_aggregate: raise TypeError("%s is not an aggregate expression" % alias) return query.get_aggregation(self.db, kwargs) def count(self): """ Perform a SELECT COUNT() and return the number of records as an integer. If the QuerySet is already fully cached, return the length of the cached results set to avoid multiple SELECT COUNT(*) calls. """ if self._result_cache is not None: return len(self._result_cache) return self.query.get_count(using=self.db) def get(self, *args, **kwargs): """ Perform the query and return a single object matching the given keyword arguments. """ clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs) if self.query.can_filter() and not self.query.distinct_fields: clone = clone.order_by() limit = None if not clone.query.select_for_update or connections[clone.db].features.supports_select_for_update_with_limit: limit = MAX_GET_RESULTS clone.query.set_limits(high=limit) num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise self.model.DoesNotExist( "%s matching query does not exist." % self.model._meta.object_name ) raise self.model.MultipleObjectsReturned( 'get() returned more than one %s -- it returned %s!' % ( self.model._meta.object_name, num if not limit or num < limit else 'more than %s' % (limit - 1), ) ) def create(self, **kwargs): """ Create a new object with the given kwargs, saving it to the database and returning the created object. """ obj = self.model(**kwargs) self._for_write = True obj.save(force_insert=True, using=self.db) return obj def _populate_pk_values(self, objs): for obj in objs: if obj.pk is None: obj.pk = obj._meta.pk.get_pk_value_on_save(obj) def bulk_create(self, objs, batch_size=None, ignore_conflicts=False): """ Insert each of the instances into the database. Do *not* call save() on each of the instances, do not send any pre/post_save signals, and do not set the primary key attribute if it is an autoincrement field (except if features.can_return_rows_from_bulk_insert=True). Multi-table models are not supported. """ # When you bulk insert you don't get the primary keys back (if it's an # autoincrement, except if can_return_rows_from_bulk_insert=True), so # you can't insert into the child tables which references this. There # are two workarounds: # 1) This could be implemented if you didn't have an autoincrement pk # 2) You could do it by doing O(n) normal inserts into the parent # tables to get the primary keys back and then doing a single bulk # insert into the childmost table. # We currently set the primary keys on the objects when using # PostgreSQL via the RETURNING ID clause. It should be possible for # Oracle as well, but the semantics for extracting the primary keys is # trickier so it's not done yet. assert batch_size is None or batch_size > 0 # Check that the parents share the same concrete model with the our # model to detect the inheritance pattern ConcreteGrandParent -> # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy # would not identify that case as involving multiple tables. for parent in self.model._meta.get_parent_list(): if parent._meta.concrete_model is not self.model._meta.concrete_model: raise ValueError("Can't bulk create a multi-table inherited model") if not objs: return objs self._for_write = True connection = connections[self.db] opts = self.model._meta fields = opts.concrete_fields objs = list(objs) self._populate_pk_values(objs) with transaction.atomic(using=self.db, savepoint=False): objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) if objs_with_pk: returned_columns = self._batched_insert( objs_with_pk, fields, batch_size, ignore_conflicts=ignore_conflicts, ) for obj_with_pk, results in zip(objs_with_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): if field != opts.pk: setattr(obj_with_pk, field.attname, result) for obj_with_pk in objs_with_pk: obj_with_pk._state.adding = False obj_with_pk._state.db = self.db if objs_without_pk: fields = [f for f in fields if not isinstance(f, AutoField)] returned_columns = self._batched_insert( objs_without_pk, fields, batch_size, ignore_conflicts=ignore_conflicts, ) if connection.features.can_return_rows_from_bulk_insert and not ignore_conflicts: assert len(returned_columns) == len(objs_without_pk) for obj_without_pk, results in zip(objs_without_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): setattr(obj_without_pk, field.attname, result) obj_without_pk._state.adding = False obj_without_pk._state.db = self.db return objs def bulk_update(self, objs, fields, batch_size=None): """ Update the given fields in each of the given objects in the database. """ if batch_size is not None and batch_size < 0: raise ValueError('Batch size must be a positive integer.') if not fields: raise ValueError('Field names must be given to bulk_update().') objs = tuple(objs) if any(obj.pk is None for obj in objs): raise ValueError('All bulk_update() objects must have a primary key set.') fields = [self.model._meta.get_field(name) for name in fields] if any(not f.concrete or f.many_to_many for f in fields): raise ValueError('bulk_update() can only be used with concrete fields.') if any(f.primary_key for f in fields): raise ValueError('bulk_update() cannot be used with primary key fields.') if not objs: return # PK is used twice in the resulting update query, once in the filter # and once in the WHEN. Each field will also have one CAST. max_batch_size = connections[self.db].ops.bulk_batch_size(['pk', 'pk'] + fields, objs) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size requires_casting = connections[self.db].features.requires_casted_case_in_updates batches = (objs[i:i + batch_size] for i in range(0, len(objs), batch_size)) updates = [] for batch_objs in batches: update_kwargs = {} for field in fields: when_statements = [] for obj in batch_objs: attr = getattr(obj, field.attname) if not isinstance(attr, Expression): attr = Value(attr, output_field=field) when_statements.append(When(pk=obj.pk, then=attr)) case_statement = Case(*when_statements, output_field=field) if requires_casting: case_statement = Cast(case_statement, output_field=field) update_kwargs[field.attname] = case_statement updates.append(([obj.pk for obj in batch_objs], update_kwargs)) with transaction.atomic(using=self.db, savepoint=False): for pks, update_kwargs in updates: self.filter(pk__in=pks).update(**update_kwargs) bulk_update.alters_data = True def get_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, creating one if necessary. Return a tuple of (object, created), where created is a boolean specifying whether an object was created. """ # The get() needs to be targeted at the write database in order # to avoid potential transaction consistency problems. self._for_write = True try: return self.get(**kwargs), False except self.model.DoesNotExist: params = self._extract_model_params(defaults, **kwargs) return self._create_object_from_params(kwargs, params) def update_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, updating one with defaults if it exists, otherwise create a new one. Return a tuple (object, created), where created is a boolean specifying whether an object was created. """ defaults = defaults or {} self._for_write = True with transaction.atomic(using=self.db): try: obj = self.select_for_update().get(**kwargs) except self.model.DoesNotExist: params = self._extract_model_params(defaults, **kwargs) # Lock the row so that a concurrent update is blocked until # after update_or_create() has performed its save. obj, created = self._create_object_from_params(kwargs, params, lock=True) if created: return obj, created for k, v in resolve_callables(defaults): setattr(obj, k, v) obj.save(using=self.db) return obj, False def _create_object_from_params(self, lookup, params, lock=False): """ Try to create an object using passed params. Used by get_or_create() and update_or_create(). """ try: with transaction.atomic(using=self.db): params = dict(resolve_callables(params)) obj = self.create(**params) return obj, True except IntegrityError: try: qs = self.select_for_update() if lock else self return qs.get(**lookup), False except self.model.DoesNotExist: pass raise def _extract_model_params(self, defaults, **kwargs): """ Prepare `params` for creating a model instance based on the given kwargs; for use by get_or_create() and update_or_create(). """ defaults = defaults or {} params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k} params.update(defaults) property_names = self.model._meta._property_names invalid_params = [] for param in params: try: self.model._meta.get_field(param) except exceptions.FieldDoesNotExist: # It's okay to use a model's property if it has a setter. if not (param in property_names and getattr(self.model, param).fset): invalid_params.append(param) if invalid_params: raise exceptions.FieldError( "Invalid field name(s) for model %s: '%s'." % ( self.model._meta.object_name, "', '".join(sorted(invalid_params)), )) return params def _earliest(self, *fields): """ Return the earliest object according to fields (if given) or by the model's Meta.get_latest_by. """ if fields: order_by = fields else: order_by = getattr(self.model._meta, 'get_latest_by') if order_by and not isinstance(order_by, (tuple, list)): order_by = (order_by,) if order_by is None: raise ValueError( "earliest() and latest() require either fields as positional " "arguments or 'get_latest_by' in the model's Meta." ) assert not self.query.is_sliced, \ "Cannot change a query once a slice has been taken." obj = self._chain() obj.query.set_limits(high=1) obj.query.clear_ordering(force_empty=True) obj.query.add_ordering(*order_by) return obj.get() def earliest(self, *fields): return self._earliest(*fields) def latest(self, *fields): return self.reverse()._earliest(*fields) def first(self): """Return the first object of a query or None if no match is found.""" for obj in (self if self.ordered else self.order_by('pk'))[:1]: return obj def last(self): """Return the last object of a query or None if no match is found.""" for obj in (self.reverse() if self.ordered else self.order_by('-pk'))[:1]: return obj def in_bulk(self, id_list=None, *, field_name='pk'): """ Return a dictionary mapping each of the given IDs to the object with that ID. If `id_list` isn't provided, evaluate the entire QuerySet. """ assert not self.query.is_sliced, \ "Cannot use 'limit' or 'offset' with in_bulk" if field_name != 'pk' and not self.model._meta.get_field(field_name).unique: raise ValueError("in_bulk()'s field_name must be a unique field but %r isn't." % field_name) if id_list is not None: if not id_list: return {} filter_key = '{}__in'.format(field_name) batch_size = connections[self.db].features.max_query_params id_list = tuple(id_list) # If the database has a limit on the number of query parameters # (e.g. SQLite), retrieve objects in batches if necessary. if batch_size and batch_size < len(id_list): qs = () for offset in range(0, len(id_list), batch_size): batch = id_list[offset:offset + batch_size] qs += tuple(self.filter(**{filter_key: batch}).order_by()) else: qs = self.filter(**{filter_key: id_list}).order_by() else: qs = self._chain() return {getattr(obj, field_name): obj for obj in qs} def delete(self): """Delete the records in the current QuerySet.""" self._not_support_combined_queries('delete') assert not self.query.is_sliced, \ "Cannot use 'limit' or 'offset' with delete." if self._fields is not None: raise TypeError("Cannot call delete() after .values() or .values_list()") del_query = self._chain() # The delete is actually 2 queries - one to find related objects, # and one to delete. Make sure that the discovery of related # objects is performed on the same database as the deletion. del_query._for_write = True # Disable non-supported fields. del_query.query.select_for_update = False del_query.query.select_related = False del_query.query.clear_ordering(force_empty=True) collector = Collector(using=del_query.db) collector.collect(del_query) deleted, _rows_count = collector.delete() # Clear the result cache, in case this QuerySet gets reused. self._result_cache = None return deleted, _rows_count delete.alters_data = True delete.queryset_only = True def _raw_delete(self, using): """ Delete objects found from the given queryset in single direct SQL query. No signals are sent and there is no protection for cascades. """ query = self.query.clone() query.__class__ = sql.DeleteQuery cursor = query.get_compiler(using).execute_sql(CURSOR) return cursor.rowcount if cursor else 0 _raw_delete.alters_data = True def update(self, **kwargs): """ Update all elements in the current QuerySet, setting all the given fields to the appropriate values. """ self._not_support_combined_queries('update') assert not self.query.is_sliced, \ "Cannot update a query once a slice has been taken." self._for_write = True query = self.query.chain(sql.UpdateQuery) query.add_update_values(kwargs) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} with transaction.mark_for_rollback_on_error(using=self.db): rows = query.get_compiler(self.db).execute_sql(CURSOR) self._result_cache = None return rows update.alters_data = True def _update(self, values): """ A version of update() that accepts field objects instead of field names. Used primarily for model saving and not intended for use by general code (it requires too much poking around at model internals to be useful at that level). """ assert not self.query.is_sliced, \ "Cannot update a query once a slice has been taken." query = self.query.chain(sql.UpdateQuery) query.add_update_fields(values) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} self._result_cache = None return query.get_compiler(self.db).execute_sql(CURSOR) _update.alters_data = True _update.queryset_only = False def exists(self): if self._result_cache is None: return self.query.has_results(using=self.db) return bool(self._result_cache) def _prefetch_related_objects(self): # This method can only be called once the result cache has been filled. prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def explain(self, *, format=None, **options): return self.query.explain(using=self.db, format=format, **options) ################################################## # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # ################################################## def raw(self, raw_query, params=None, translations=None, using=None): if using is None: using = self.db qs = RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using) qs._prefetch_related_lookups = self._prefetch_related_lookups[:] return qs def _values(self, *fields, **expressions): clone = self._chain() if expressions: clone = clone.annotate(**expressions) clone._fields = fields clone.query.set_values(fields) return clone def values(self, *fields, **expressions): fields += tuple(expressions) clone = self._values(*fields, **expressions) clone._iterable_class = ValuesIterable return clone def values_list(self, *fields, flat=False, named=False): if flat and named: raise TypeError("'flat' and 'named' can't be used together.") if flat and len(fields) > 1: raise TypeError("'flat' is not valid when values_list is called with more than one field.") field_names = {f for f in fields if not hasattr(f, 'resolve_expression')} _fields = [] expressions = {} counter = 1 for field in fields: if hasattr(field, 'resolve_expression'): field_id_prefix = getattr(field, 'default_alias', field.__class__.__name__.lower()) while True: field_id = field_id_prefix + str(counter) counter += 1 if field_id not in field_names: break expressions[field_id] = field _fields.append(field_id) else: _fields.append(field) clone = self._values(*_fields, **expressions) clone._iterable_class = ( NamedValuesListIterable if named else FlatValuesListIterable if flat else ValuesListIterable ) return clone def dates(self, field_name, kind, order='ASC'): """ Return a list of date objects representing all available dates for the given field_name, scoped to 'kind'. """ assert kind in ('year', 'month', 'week', 'day'), \ "'kind' must be one of 'year', 'month', 'week', or 'day'." assert order in ('ASC', 'DESC'), \ "'order' must be either 'ASC' or 'DESC'." return self.annotate( datefield=Trunc(field_name, kind, output_field=DateField()), plain_field=F(field_name) ).values_list( 'datefield', flat=True ).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield') def datetimes(self, field_name, kind, order='ASC', tzinfo=None): """ Return a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'. """ assert kind in ('year', 'month', 'week', 'day', 'hour', 'minute', 'second'), \ "'kind' must be one of 'year', 'month', 'week', 'day', 'hour', 'minute', or 'second'." assert order in ('ASC', 'DESC'), \ "'order' must be either 'ASC' or 'DESC'." if settings.USE_TZ: if tzinfo is None: tzinfo = timezone.get_current_timezone() else: tzinfo = None return self.annotate( datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo), plain_field=F(field_name) ).values_list( 'datetimefield', flat=True ).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield') def none(self): """Return an empty QuerySet.""" clone = self._chain() clone.query.set_empty() return clone ################################################################## # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # ################################################################## def all(self): """ Return a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases. """ return self._chain() def filter(self, *args, **kwargs): """ Return a new QuerySet instance with the args ANDed to the existing set. """ self._not_support_combined_queries('filter') return self._filter_or_exclude(False, *args, **kwargs) def exclude(self, *args, **kwargs): """ Return a new QuerySet instance with NOT (args) ANDed to the existing set. """ self._not_support_combined_queries('exclude') return self._filter_or_exclude(True, *args, **kwargs) def _filter_or_exclude(self, negate, *args, **kwargs): if args or kwargs: assert not self.query.is_sliced, \ "Cannot filter a query once a slice has been taken." clone = self._chain() if self._defer_next_filter: self._defer_next_filter = False clone._deferred_filter = negate, args, kwargs else: clone._filter_or_exclude_inplace(negate, *args, **kwargs) return clone def _filter_or_exclude_inplace(self, negate, *args, **kwargs): if negate: self._query.add_q(~Q(*args, **kwargs)) else: self._query.add_q(Q(*args, **kwargs)) def complex_filter(self, filter_obj): """ Return a new QuerySet instance with filter_obj added to the filters. filter_obj can be a Q object or a dictionary of keyword lookup arguments. This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods. """ if isinstance(filter_obj, Q): clone = self._chain() clone.query.add_q(filter_obj) return clone else: return self._filter_or_exclude(False, **filter_obj) def _combinator_query(self, combinator, *other_qs, all=False): # Clone the query to inherit the select list and everything clone = self._chain() # Clear limits and ordering so they can be reapplied clone.query.clear_ordering(True) clone.query.clear_limits() clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs) clone.query.combinator = combinator clone.query.combinator_all = all return clone def union(self, *other_qs, all=False): # If the query is an EmptyQuerySet, combine all nonempty querysets. if isinstance(self, EmptyQuerySet): qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)] return qs[0]._combinator_query('union', *qs[1:], all=all) if qs else self return self._combinator_query('union', *other_qs, all=all) def intersection(self, *other_qs): # If any query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self for other in other_qs: if isinstance(other, EmptyQuerySet): return other return self._combinator_query('intersection', *other_qs) def difference(self, *other_qs): # If the query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self return self._combinator_query('difference', *other_qs) def select_for_update(self, nowait=False, skip_locked=False, of=()): """ Return a new QuerySet instance that will select objects with a FOR UPDATE lock. """ if nowait and skip_locked: raise ValueError('The nowait option cannot be used with skip_locked.') obj = self._chain() obj._for_write = True obj.query.select_for_update = True obj.query.select_for_update_nowait = nowait obj.query.select_for_update_skip_locked = skip_locked obj.query.select_for_update_of = of return obj def select_related(self, *fields): """ Return a new QuerySet instance that will select related objects. If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection. If select_related(None) is called, clear the list. """ self._not_support_combined_queries('select_related') if self._fields is not None: raise TypeError("Cannot call select_related() after .values() or .values_list()") obj = self._chain() if fields == (None,): obj.query.select_related = False elif fields: obj.query.add_select_related(fields) else: obj.query.select_related = True return obj def prefetch_related(self, *lookups): """ Return a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated. When prefetch_related() is called more than once, append to the list of prefetch lookups. If prefetch_related(None) is called, clear the list. """ self._not_support_combined_queries('prefetch_related') clone = self._chain() if lookups == (None,): clone._prefetch_related_lookups = () else: for lookup in lookups: if isinstance(lookup, Prefetch): lookup = lookup.prefetch_to lookup = lookup.split(LOOKUP_SEP, 1)[0] if lookup in self.query._filtered_relations: raise ValueError('prefetch_related() is not supported with FilteredRelation.') clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def annotate(self, *args, **kwargs): """ Return a query set in which the returned objects have been annotated with extra data or aggregations. """ self._not_support_combined_queries('annotate') self._validate_values_are_expressions(args + tuple(kwargs.values()), method_name='annotate') annotations = {} for arg in args: # The default_alias property may raise a TypeError. try: if arg.default_alias in kwargs: raise ValueError("The named annotation '%s' conflicts with the " "default name for another annotation." % arg.default_alias) except TypeError: raise TypeError("Complex annotations require an alias") annotations[arg.default_alias] = arg annotations.update(kwargs) clone = self._chain() names = self._fields if names is None: names = set(chain.from_iterable( (field.name, field.attname) if hasattr(field, 'attname') else (field.name,) for field in self.model._meta.get_fields() )) for alias, annotation in annotations.items(): if alias in names: raise ValueError("The annotation '%s' conflicts with a field on " "the model." % alias) if isinstance(annotation, FilteredRelation): clone.query.add_filtered_relation(annotation, alias) else: clone.query.add_annotation(annotation, alias, is_summary=False) for alias, annotation in clone.query.annotations.items(): if alias in annotations and annotation.contains_aggregate: if clone._fields is None: clone.query.group_by = True else: clone.query.set_group_by() break return clone def order_by(self, *field_names): """Return a new QuerySet instance with the ordering changed.""" assert not self.query.is_sliced, \ "Cannot reorder a query once a slice has been taken." obj = self._chain() obj.query.clear_ordering(force_empty=False) obj.query.add_ordering(*field_names) return obj def distinct(self, *field_names): """ Return a new QuerySet instance that will select only distinct results. """ assert not self.query.is_sliced, \ "Cannot create distinct fields once a slice has been taken." obj = self._chain() obj.query.add_distinct_fields(*field_names) return obj def extra(self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None): """Add extra SQL fragments to the query.""" self._not_support_combined_queries('extra') assert not self.query.is_sliced, \ "Cannot change a query once a slice has been taken" clone = self._chain() clone.query.add_extra(select, select_params, where, params, tables, order_by) return clone def reverse(self): """Reverse the ordering of the QuerySet.""" if self.query.is_sliced: raise TypeError('Cannot reverse a query once a slice has been taken.') clone = self._chain() clone.query.standard_ordering = not clone.query.standard_ordering return clone def defer(self, *fields): """ Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case removal all deferrals. """ self._not_support_combined_queries('defer') if self._fields is not None: raise TypeError("Cannot call defer() after .values() or .values_list()") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone def only(self, *fields): """ Essentially, the opposite of defer(). Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated. """ self._not_support_combined_queries('only') if self._fields is not None: raise TypeError("Cannot call only() after .values() or .values_list()") if fields == (None,): # Can only pass None to defer(), not only(), as the rest option. # That won't stop people trying to do this, so let's be explicit. raise TypeError("Cannot pass None as an argument to only().") for field in fields: field = field.split(LOOKUP_SEP, 1)[0] if field in self.query._filtered_relations: raise ValueError('only() is not supported with FilteredRelation.') clone = self._chain() clone.query.add_immediate_loading(fields) return clone def using(self, alias): """Select which database this QuerySet should execute against.""" clone = self._chain() clone._db = alias return clone ################################### # PUBLIC INTROSPECTION ATTRIBUTES # ################################### @property def ordered(self): """ Return True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model (or is empty). """ if isinstance(self, EmptyQuerySet): return True if self.query.extra_order_by or self.query.order_by: return True elif self.query.default_ordering and self.query.get_meta().ordering: return True else: return False @property def db(self): """Return the database used if this query is executed now.""" if self._for_write: return self._db or router.db_for_write(self.model, **self._hints) return self._db or router.db_for_read(self.model, **self._hints) ################### # PRIVATE METHODS # ################### def _insert(self, objs, fields, returning_fields=None, raw=False, using=None, ignore_conflicts=False): """ Insert a new record for the given model. This provides an interface to the InsertQuery class and is how Model.save() is implemented. """ self._for_write = True if using is None: using = self.db query = sql.InsertQuery(self.model, ignore_conflicts=ignore_conflicts) query.insert_values(fields, objs, raw=raw) return query.get_compiler(using=using).execute_sql(returning_fields) _insert.alters_data = True _insert.queryset_only = False def _batched_insert(self, objs, fields, batch_size, ignore_conflicts=False): """ Helper method for bulk_create() to insert objs one batch at a time. """ if ignore_conflicts and not connections[self.db].features.supports_ignore_conflicts: raise NotSupportedError('This database backend does not support ignoring conflicts.') ops = connections[self.db].ops max_batch_size = max(ops.bulk_batch_size(fields, objs), 1) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size inserted_rows = [] bulk_return = connections[self.db].features.can_return_rows_from_bulk_insert for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]: if bulk_return and not ignore_conflicts: inserted_columns = self._insert( item, fields=fields, using=self.db, returning_fields=self.model._meta.db_returning_fields, ignore_conflicts=ignore_conflicts, ) if isinstance(inserted_columns, list): inserted_rows.extend(inserted_columns) else: inserted_rows.append(inserted_columns) else: self._insert(item, fields=fields, using=self.db, ignore_conflicts=ignore_conflicts) return inserted_rows def _chain(self, **kwargs): """ Return a copy of the current QuerySet that's ready for another operation. """ obj = self._clone() if obj._sticky_filter: obj.query.filter_is_sticky = True obj._sticky_filter = False obj.__dict__.update(kwargs) return obj def _clone(self): """ Return a copy of the current QuerySet. A lightweight alternative to deepcopy(). """ c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints) c._sticky_filter = self._sticky_filter c._for_write = self._for_write c._prefetch_related_lookups = self._prefetch_related_lookups[:] c._known_related_objects = self._known_related_objects c._iterable_class = self._iterable_class c._fields = self._fields return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self._iterable_class(self)) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def _next_is_sticky(self): """ Indicate that the next filter call and the one following that should be treated as a single filter. This is only important when it comes to determining when to reuse tables for many-to-many filters. Required so that we can filter naturally on the results of related managers. This doesn't return a clone of the current QuerySet (it returns "self"). The method is only used internally and should be immediately followed by a filter() that does create a clone. """ self._sticky_filter = True return self def _merge_sanity_check(self, other): """Check that two QuerySet classes may be merged.""" if self._fields is not None and ( set(self.query.values_select) != set(other.query.values_select) or set(self.query.extra_select) != set(other.query.extra_select) or set(self.query.annotation_select) != set(other.query.annotation_select)): raise TypeError( "Merging '%s' classes must involve the same values in each case." % self.__class__.__name__ ) def _merge_known_related_objects(self, other): """ Keep track of all known related objects from either QuerySet instance. """ for field, objects in other._known_related_objects.items(): self._known_related_objects.setdefault(field, {}).update(objects) def resolve_expression(self, *args, **kwargs): if self._fields and len(self._fields) > 1: # values() queryset can only be used as nested queries # if they are set up to select only a single field. raise TypeError('Cannot use multi-field values as a filter value.') query = self.query.resolve_expression(*args, **kwargs) query._db = self._db return query resolve_expression.queryset_only = True def _add_hints(self, **hints): """ Update hinting information for use by routers. Add new key/values or overwrite existing key/values. """ self._hints.update(hints) def _has_filters(self): """ Check if this QuerySet has any filtering going on. This isn't equivalent with checking if all objects are present in results, for example, qs[1:]._has_filters() -> False. """ return self.query.has_filters() @staticmethod def _validate_values_are_expressions(values, method_name): invalid_args = sorted(str(arg) for arg in values if not hasattr(arg, 'resolve_expression')) if invalid_args: raise TypeError( 'QuerySet.%s() received non-expression(s): %s.' % ( method_name, ', '.join(invalid_args), ) ) def _not_support_combined_queries(self, operation_name): if self.query.combinator: raise NotSupportedError( 'Calling QuerySet.%s() after %s() is not supported.' % (operation_name, self.query.combinator) ) class InstanceCheckMeta(type): def __instancecheck__(self, instance): return isinstance(instance, QuerySet) and instance.query.is_empty() class EmptyQuerySet(metaclass=InstanceCheckMeta): """ Marker class to checking if a queryset is empty by .none(): isinstance(qs.none(), EmptyQuerySet) -> True """ def __init__(self, *args, **kwargs): raise TypeError("EmptyQuerySet can't be instantiated") class RawQuerySet: """ Provide an iterator which converts the results of raw SQL queries into annotated model instances. """ def __init__(self, raw_query, model=None, query=None, params=None, translations=None, using=None, hints=None): self.raw_query = raw_query self.model = model self._db = using self._hints = hints or {} self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) self.params = params or () self.translations = translations or {} self._result_cache = None self._prefetch_related_lookups = () self._prefetch_done = False def resolve_model_init_order(self): """Resolve the init field names and value positions.""" converter = connections[self.db].introspection.identifier_converter model_init_fields = [f for f in self.model._meta.fields if converter(f.column) in self.columns] annotation_fields = [(column, pos) for pos, column in enumerate(self.columns) if column not in self.model_fields] model_init_order = [self.columns.index(converter(f.column)) for f in model_init_fields] model_init_names = [f.attname for f in model_init_fields] return model_init_names, model_init_order, annotation_fields def prefetch_related(self, *lookups): """Same as QuerySet.prefetch_related()""" clone = self._clone() if lookups == (None,): clone._prefetch_related_lookups = () else: clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def _prefetch_related_objects(self): prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def _clone(self): """Same as QuerySet._clone()""" c = self.__class__( self.raw_query, model=self.model, query=self.query, params=self.params, translations=self.translations, using=self._db, hints=self._hints ) c._prefetch_related_lookups = self._prefetch_related_lookups[:] return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self.iterator()) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def __len__(self): self._fetch_all() return len(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __iter__(self): self._fetch_all() return iter(self._result_cache) def iterator(self): # Cache some things for performance reasons outside the loop. db = self.db compiler = connections[db].ops.compiler('SQLCompiler')( self.query, connections[db], db ) query = iter(self.query) try: model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order() if self.model._meta.pk.attname not in model_init_names: raise exceptions.FieldDoesNotExist( 'Raw query must include the primary key' ) model_cls = self.model fields = [self.model_fields.get(c) for c in self.columns] converters = compiler.get_converters([ f.get_col(f.model._meta.db_table) if f else None for f in fields ]) if converters: query = compiler.apply_converters(query, converters) for values in query: # Associate fields to values model_init_values = [values[pos] for pos in model_init_pos] instance = model_cls.from_db(db, model_init_names, model_init_values) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) yield instance finally: # Done iterating the Query. If it has its own cursor, close it. if hasattr(self.query, 'cursor') and self.query.cursor: self.query.cursor.close() def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.query) def __getitem__(self, k): return list(self)[k] @property def db(self): """Return the database used if this query is executed now.""" return self._db or router.db_for_read(self.model, **self._hints) def using(self, alias): """Select the database this RawQuerySet should execute against.""" return RawQuerySet( self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias, ) @cached_property def columns(self): """ A list of model field names in the order they'll appear in the query results. """ columns = self.query.get_columns() # Adjust any column names which don't match field names for (query_name, model_name) in self.translations.items(): # Ignore translations for nonexistent column names try: index = columns.index(query_name) except ValueError: pass else: columns[index] = model_name return columns @cached_property def model_fields(self): """A dict mapping column names to model field names.""" converter = connections[self.db].introspection.identifier_converter model_fields = {} for field in self.model._meta.fields: name, column = field.get_attname_column() model_fields[converter(column)] = field return model_fields class Prefetch: def __init__(self, lookup, queryset=None, to_attr=None): # `prefetch_through` is the path we traverse to perform the prefetch. self.prefetch_through = lookup # `prefetch_to` is the path to the attribute that stores the result. self.prefetch_to = lookup if queryset is not None and ( isinstance(queryset, RawQuerySet) or ( hasattr(queryset, '_iterable_class') and not issubclass(queryset._iterable_class, ModelIterable) ) ): raise ValueError( 'Prefetch querysets cannot use raw(), values(), and ' 'values_list().' ) if to_attr: self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr]) self.queryset = queryset self.to_attr = to_attr def __getstate__(self): obj_dict = self.__dict__.copy() if self.queryset is not None: # Prevent the QuerySet from being evaluated obj_dict['queryset'] = self.queryset._chain( _result_cache=[], _prefetch_done=True, ) return obj_dict def add_prefix(self, prefix): self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to def get_current_prefetch_to(self, level): return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1]) def get_current_to_attr(self, level): parts = self.prefetch_to.split(LOOKUP_SEP) to_attr = parts[level] as_attr = self.to_attr and level == len(parts) - 1 return to_attr, as_attr def get_current_queryset(self, level): if self.get_current_prefetch_to(level) == self.prefetch_to: return self.queryset return None def __eq__(self, other): if not isinstance(other, Prefetch): return NotImplemented return self.prefetch_to == other.prefetch_to def __hash__(self): return hash((self.__class__, self.prefetch_to)) def normalize_prefetch_lookups(lookups, prefix=None): """Normalize lookups into Prefetch objects.""" ret = [] for lookup in lookups: if not isinstance(lookup, Prefetch): lookup = Prefetch(lookup) if prefix: lookup.add_prefix(prefix) ret.append(lookup) return ret def prefetch_related_objects(model_instances, *related_lookups): """ Populate prefetched object caches for a list of model instances based on the lookups/Prefetch instances given. """ if not model_instances: return # nothing to do # We need to be able to dynamically add to the list of prefetch_related # lookups that we look up (see below). So we need some book keeping to # ensure we don't do duplicate work. done_queries = {} # dictionary of things like 'foo__bar': [results] auto_lookups = set() # we add to this as we go through. followed_descriptors = set() # recursion protection all_lookups = normalize_prefetch_lookups(reversed(related_lookups)) while all_lookups: lookup = all_lookups.pop() if lookup.prefetch_to in done_queries: if lookup.queryset is not None: raise ValueError("'%s' lookup was already seen with a different queryset. " "You may need to adjust the ordering of your lookups." % lookup.prefetch_to) continue # Top level, the list of objects to decorate is the result cache # from the primary QuerySet. It won't be for deeper levels. obj_list = model_instances through_attrs = lookup.prefetch_through.split(LOOKUP_SEP) for level, through_attr in enumerate(through_attrs): # Prepare main instances if not obj_list: break prefetch_to = lookup.get_current_prefetch_to(level) if prefetch_to in done_queries: # Skip any prefetching, and any object preparation obj_list = done_queries[prefetch_to] continue # Prepare objects: good_objects = True for obj in obj_list: # Since prefetching can re-use instances, it is possible to have # the same instance multiple times in obj_list, so obj might # already be prepared. if not hasattr(obj, '_prefetched_objects_cache'): try: obj._prefetched_objects_cache = {} except (AttributeError, TypeError): # Must be an immutable object from # values_list(flat=True), for example (TypeError) or # a QuerySet subclass that isn't returning Model # instances (AttributeError), either in Django or a 3rd # party. prefetch_related() doesn't make sense, so quit. good_objects = False break if not good_objects: break # Descend down tree # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. first_obj = obj_list[0] to_attr = lookup.get_current_to_attr(level)[0] prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr) if not attr_found: raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid " "parameter to prefetch_related()" % (through_attr, first_obj.__class__.__name__, lookup.prefetch_through)) if level == len(through_attrs) - 1 and prefetcher is None: # Last one, this *must* resolve to something that supports # prefetching, otherwise there is no point adding it and the # developer asking for it has made a mistake. raise ValueError("'%s' does not resolve to an item that supports " "prefetching - this is an invalid parameter to " "prefetch_related()." % lookup.prefetch_through) if prefetcher is not None and not is_fetched: obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level) # We need to ensure we don't keep adding lookups from the # same relationships to stop infinite recursion. So, if we # are already on an automatically added lookup, don't add # the new lookups from relationships we've seen already. if not (prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors): done_queries[prefetch_to] = obj_list new_lookups = normalize_prefetch_lookups(reversed(additional_lookups), prefetch_to) auto_lookups.update(new_lookups) all_lookups.extend(new_lookups) followed_descriptors.add(descriptor) else: # Either a singly related object that has already been fetched # (e.g. via select_related), or hopefully some other property # that doesn't support prefetching but needs to be traversed. # We replace the current list of parent objects with the list # of related objects, filtering out empty or missing values so # that we can continue with nullable or reverse relations. new_obj_list = [] for obj in obj_list: if through_attr in getattr(obj, '_prefetched_objects_cache', ()): # If related objects have been prefetched, use the # cache rather than the object's through_attr. new_obj = list(obj._prefetched_objects_cache.get(through_attr)) else: try: new_obj = getattr(obj, through_attr) except exceptions.ObjectDoesNotExist: continue if new_obj is None: continue # We special-case `list` rather than something more generic # like `Iterable` because we don't want to accidentally match # user models that define __iter__. if isinstance(new_obj, list): new_obj_list.extend(new_obj) else: new_obj_list.append(new_obj) obj_list = new_obj_list def get_prefetcher(instance, through_attr, to_attr): """ For the attribute 'through_attr' on the given instance, find an object that has a get_prefetch_queryset(). Return a 4 tuple containing: (the object with get_prefetch_queryset (or None), the descriptor object representing this relationship (or None), a boolean that is False if the attribute was not found at all, a boolean that is True if the attribute has already been fetched) """ prefetcher = None is_fetched = False # For singly related objects, we have to avoid getting the attribute # from the object, as this will trigger the query. So we first try # on the class, in order to get the descriptor object. rel_obj_descriptor = getattr(instance.__class__, through_attr, None) if rel_obj_descriptor is None: attr_found = hasattr(instance, through_attr) else: attr_found = True if rel_obj_descriptor: # singly related object, descriptor object has the # get_prefetch_queryset() method. if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'): prefetcher = rel_obj_descriptor if rel_obj_descriptor.is_cached(instance): is_fetched = True else: # descriptor doesn't support prefetching, so we go ahead and get # the attribute on the instance rather than the class to # support many related managers rel_obj = getattr(instance, through_attr) if hasattr(rel_obj, 'get_prefetch_queryset'): prefetcher = rel_obj if through_attr != to_attr: # Special case cached_property instances because hasattr # triggers attribute computation and assignment. if isinstance(getattr(instance.__class__, to_attr, None), cached_property): is_fetched = to_attr in instance.__dict__ else: is_fetched = hasattr(instance, to_attr) else: is_fetched = through_attr in instance._prefetched_objects_cache return prefetcher, rel_obj_descriptor, attr_found, is_fetched def prefetch_one_level(instances, prefetcher, lookup, level): """ Helper function for prefetch_related_objects(). Run prefetches on all instances using the prefetcher object, assigning results to relevant caches in instance. Return the prefetched objects along with any additional prefetches that must be done due to prefetch_related lookups found from default managers. """ # prefetcher must have a method get_prefetch_queryset() which takes a list # of instances, and returns a tuple: # (queryset of instances of self.model that are related to passed in instances, # callable that gets value to be matched for returned instances, # callable that gets value to be matched for passed in instances, # boolean that is True for singly related objects, # cache or field name to assign to, # boolean that is True when the previous argument is a cache name vs a field name). # The 'values to be matched' must be hashable as they will be used # in a dictionary. rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor = ( prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))) # We have to handle the possibility that the QuerySet we just got back # contains some prefetch_related lookups. We don't want to trigger the # prefetch_related functionality by evaluating the query. Rather, we need # to merge in the prefetch_related lookups. # Copy the lookups in case it is a Prefetch object which could be reused # later (happens in nested prefetch_related). additional_lookups = [ copy.copy(additional_lookup) for additional_lookup in getattr(rel_qs, '_prefetch_related_lookups', ()) ] if additional_lookups: # Don't need to clone because the manager should have given us a fresh # instance, so we access an internal instead of using public interface # for performance reasons. rel_qs._prefetch_related_lookups = () all_related_objects = list(rel_qs) rel_obj_cache = {} for rel_obj in all_related_objects: rel_attr_val = rel_obj_attr(rel_obj) rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) to_attr, as_attr = lookup.get_current_to_attr(level) # Make sure `to_attr` does not conflict with a field. if as_attr and instances: # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. model = instances[0].__class__ try: model._meta.get_field(to_attr) except exceptions.FieldDoesNotExist: pass else: msg = 'to_attr={} conflicts with a field on the {} model.' raise ValueError(msg.format(to_attr, model.__name__)) # Whether or not we're prefetching the last part of the lookup. leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level for obj in instances: instance_attr_val = instance_attr(obj) vals = rel_obj_cache.get(instance_attr_val, []) if single: val = vals[0] if vals else None if as_attr: # A to_attr has been given for the prefetch. setattr(obj, to_attr, val) elif is_descriptor: # cache_name points to a field name in obj. # This field is a descriptor for a related object. setattr(obj, cache_name, val) else: # No to_attr has been given for this prefetch operation and the # cache_name does not point to a descriptor. Store the value of # the field in the object's field cache. obj._state.fields_cache[cache_name] = val else: if as_attr: setattr(obj, to_attr, vals) else: manager = getattr(obj, to_attr) if leaf and lookup.queryset is not None: qs = manager._apply_rel_filters(lookup.queryset) else: qs = manager.get_queryset() qs._result_cache = vals # We don't want the individual qs doing prefetch_related now, # since we have merged this into the current work. qs._prefetch_done = True obj._prefetched_objects_cache[cache_name] = qs return all_related_objects, additional_lookups class RelatedPopulator: """ RelatedPopulator is used for select_related() object instantiation. The idea is that each select_related() model will be populated by a different RelatedPopulator instance. The RelatedPopulator instances get klass_info and select (computed in SQLCompiler) plus the used db as input for initialization. That data is used to compute which columns to use, how to instantiate the model, and how to populate the links between the objects. The actual creation of the objects is done in populate() method. This method gets row and from_obj as input and populates the select_related() model instance. """ def __init__(self, klass_info, select, db): self.db = db # Pre-compute needed attributes. The attributes are: # - model_cls: the possibly deferred model class to instantiate # - either: # - cols_start, cols_end: usually the columns in the row are # in the same order model_cls.__init__ expects them, so we # can instantiate by model_cls(*row[cols_start:cols_end]) # - reorder_for_init: When select_related descends to a child # class, then we want to reuse the already selected parent # data. However, in this case the parent data isn't necessarily # in the same order that Model.__init__ expects it to be, so # we have to reorder the parent data. The reorder_for_init # attribute contains a function used to reorder the field data # in the order __init__ expects it. # - pk_idx: the index of the primary key field in the reordered # model data. Used to check if a related object exists at all. # - init_list: the field attnames fetched from the database. For # deferred models this isn't the same as all attnames of the # model's fields. # - related_populators: a list of RelatedPopulator instances if # select_related() descends to related models from this model. # - local_setter, remote_setter: Methods to set cached values on # the object being populated and on the remote object. Usually # these are Field.set_cached_value() methods. select_fields = klass_info['select_fields'] from_parent = klass_info['from_parent'] if not from_parent: self.cols_start = select_fields[0] self.cols_end = select_fields[-1] + 1 self.init_list = [ f[0].target.attname for f in select[self.cols_start:self.cols_end] ] self.reorder_for_init = None else: attname_indexes = {select[idx][0].target.attname: idx for idx in select_fields} model_init_attnames = (f.attname for f in klass_info['model']._meta.concrete_fields) self.init_list = [attname for attname in model_init_attnames if attname in attname_indexes] self.reorder_for_init = operator.itemgetter(*[attname_indexes[attname] for attname in self.init_list]) self.model_cls = klass_info['model'] self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname) self.related_populators = get_related_populators(klass_info, select, self.db) self.local_setter = klass_info['local_setter'] self.remote_setter = klass_info['remote_setter'] def populate(self, row, from_obj): if self.reorder_for_init: obj_data = self.reorder_for_init(row) else: obj_data = row[self.cols_start:self.cols_end] if obj_data[self.pk_idx] is None: obj = None else: obj = self.model_cls.from_db(self.db, self.init_list, obj_data) for rel_iter in self.related_populators: rel_iter.populate(row, obj) self.local_setter(from_obj, obj) if obj is not None: self.remote_setter(obj, from_obj) def get_related_populators(klass_info, select, db): iterators = [] related_klass_infos = klass_info.get('related_klass_infos', []) for rel_klass_info in related_klass_infos: rel_cls = RelatedPopulator(rel_klass_info, select, db) iterators.append(rel_cls) return iterators
d752ab469825a4e9e4ce22b6a8e2eea8066a2a90aea58c47110cef2fd8a4b962
from django.core.exceptions import ObjectDoesNotExist from django.db.models import signals from django.db.models.aggregates import * # NOQA from django.db.models.aggregates import __all__ as aggregates_all from django.db.models.constraints import * # NOQA from django.db.models.constraints import __all__ as constraints_all from django.db.models.deletion import ( CASCADE, DO_NOTHING, PROTECT, RESTRICT, SET, SET_DEFAULT, SET_NULL, ProtectedError, RestrictedError, ) from django.db.models.enums import * # NOQA from django.db.models.enums import __all__ as enums_all from django.db.models.expressions import ( Case, Exists, Expression, ExpressionList, ExpressionWrapper, F, Func, OrderBy, OuterRef, RowRange, Subquery, Value, ValueRange, When, Window, WindowFrame, ) from django.db.models.fields import * # NOQA from django.db.models.fields import __all__ as fields_all from django.db.models.fields.files import FileField, ImageField from django.db.models.fields.proxy import OrderWrt from django.db.models.indexes import * # NOQA from django.db.models.indexes import __all__ as indexes_all from django.db.models.lookups import Lookup, Transform from django.db.models.manager import Manager from django.db.models.query import Prefetch, QuerySet, prefetch_related_objects from django.db.models.query_utils import FilteredRelation, Q # Imports that would create circular imports if sorted from django.db.models.base import DEFERRED, Model # isort:skip from django.db.models.fields.related import ( # isort:skip ForeignKey, ForeignObject, OneToOneField, ManyToManyField, ForeignObjectRel, ManyToOneRel, ManyToManyRel, OneToOneRel, ) __all__ = aggregates_all + constraints_all + enums_all + fields_all + indexes_all __all__ += [ 'ObjectDoesNotExist', 'signals', 'CASCADE', 'DO_NOTHING', 'PROTECT', 'RESTRICT', 'SET', 'SET_DEFAULT', 'SET_NULL', 'ProtectedError', 'RestrictedError', 'Case', 'Exists', 'Expression', 'ExpressionList', 'ExpressionWrapper', 'F', 'Func', 'OrderBy', 'OuterRef', 'RowRange', 'Subquery', 'Value', 'ValueRange', 'When', 'Window', 'WindowFrame', 'FileField', 'ImageField', 'OrderWrt', 'Lookup', 'Transform', 'Manager', 'Prefetch', 'Q', 'QuerySet', 'prefetch_related_objects', 'DEFERRED', 'Model', 'FilteredRelation', 'ForeignKey', 'ForeignObject', 'OneToOneField', 'ManyToManyField', 'ForeignObjectRel', 'ManyToOneRel', 'ManyToManyRel', 'OneToOneRel', ]
a00946b4b6e2b8cabd468a6e38ad4facc3d3fceec00473b17d77150daf88c54d
import bisect import copy import inspect from collections import defaultdict from django.apps import apps from django.conf import settings from django.core.exceptions import FieldDoesNotExist from django.db import connections from django.db.models import AutoField, Manager, OrderWrt from django.db.models.query_utils import PathInfo from django.utils.datastructures import ImmutableList, OrderedSet from django.utils.functional import cached_property from django.utils.text import camel_case_to_spaces, format_lazy from django.utils.translation import override PROXY_PARENTS = object() EMPTY_RELATION_TREE = () IMMUTABLE_WARNING = ( "The return type of '%s' should never be mutated. If you want to manipulate this list " "for your own use, make a copy first." ) DEFAULT_NAMES = ( 'verbose_name', 'verbose_name_plural', 'db_table', 'ordering', 'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to', 'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable', 'auto_created', 'index_together', 'apps', 'default_permissions', 'select_on_save', 'default_related_name', 'required_db_features', 'required_db_vendor', 'base_manager_name', 'default_manager_name', 'indexes', 'constraints', ) def normalize_together(option_together): """ option_together can be either a tuple of tuples, or a single tuple of two strings. Normalize it to a tuple of tuples, so that calling code can uniformly expect that. """ try: if not option_together: return () if not isinstance(option_together, (tuple, list)): raise TypeError first_element = option_together[0] if not isinstance(first_element, (tuple, list)): option_together = (option_together,) # Normalize everything to tuples return tuple(tuple(ot) for ot in option_together) except TypeError: # If the value of option_together isn't valid, return it # verbatim; this will be picked up by the check framework later. return option_together def make_immutable_fields_list(name, data): return ImmutableList(data, warning=IMMUTABLE_WARNING % name) class Options: FORWARD_PROPERTIES = { 'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields', '_forward_fields_map', 'managers', 'managers_map', 'base_manager', 'default_manager', } REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'} default_apps = apps def __init__(self, meta, app_label=None): self._get_fields_cache = {} self.local_fields = [] self.local_many_to_many = [] self.private_fields = [] self.local_managers = [] self.base_manager_name = None self.default_manager_name = None self.model_name = None self.verbose_name = None self.verbose_name_plural = None self.db_table = '' self.ordering = [] self._ordering_clash = False self.indexes = [] self.constraints = [] self.unique_together = [] self.index_together = [] self.select_on_save = False self.default_permissions = ('add', 'change', 'delete', 'view') self.permissions = [] self.object_name = None self.app_label = app_label self.get_latest_by = None self.order_with_respect_to = None self.db_tablespace = settings.DEFAULT_TABLESPACE self.required_db_features = [] self.required_db_vendor = None self.meta = meta self.pk = None self.auto_field = None self.abstract = False self.managed = True self.proxy = False # For any class that is a proxy (including automatically created # classes for deferred object loading), proxy_for_model tells us # which class this model is proxying. Note that proxy_for_model # can create a chain of proxy models. For non-proxy models, the # variable is always None. self.proxy_for_model = None # For any non-abstract class, the concrete class is the model # in the end of the proxy_for_model chain. In particular, for # concrete models, the concrete_model is always the class itself. self.concrete_model = None self.swappable = None self.parents = {} self.auto_created = False # List of all lookups defined in ForeignKey 'limit_choices_to' options # from *other* models. Needed for some admin checks. Internal use only. self.related_fkey_lookups = [] # A custom app registry to use, if you're making a separate model set. self.apps = self.default_apps self.default_related_name = None @property def label(self): return '%s.%s' % (self.app_label, self.object_name) @property def label_lower(self): return '%s.%s' % (self.app_label, self.model_name) @property def app_config(self): # Don't go through get_app_config to avoid triggering imports. return self.apps.app_configs.get(self.app_label) @property def installed(self): return self.app_config is not None def contribute_to_class(self, cls, name): from django.db import connection from django.db.backends.utils import truncate_name cls._meta = self self.model = cls # First, construct the default values for these options. self.object_name = cls.__name__ self.model_name = self.object_name.lower() self.verbose_name = camel_case_to_spaces(self.object_name) # Store the original user-defined values for each option, # for use when serializing the model definition self.original_attrs = {} # Next, apply any overridden values from 'class Meta'. if self.meta: meta_attrs = self.meta.__dict__.copy() for name in self.meta.__dict__: # Ignore any private attributes that Django doesn't care about. # NOTE: We can't modify a dictionary's contents while looping # over it, so we loop over the *original* dictionary instead. if name.startswith('_'): del meta_attrs[name] for attr_name in DEFAULT_NAMES: if attr_name in meta_attrs: setattr(self, attr_name, meta_attrs.pop(attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) elif hasattr(self.meta, attr_name): setattr(self, attr_name, getattr(self.meta, attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) self.unique_together = normalize_together(self.unique_together) self.index_together = normalize_together(self.index_together) # App label/class name interpolation for names of constraints and # indexes. if not getattr(cls._meta, 'abstract', False): for attr_name in {'constraints', 'indexes'}: objs = getattr(self, attr_name, []) setattr(self, attr_name, self._format_names_with_class(cls, objs)) # verbose_name_plural is a special case because it uses a 's' # by default. if self.verbose_name_plural is None: self.verbose_name_plural = format_lazy('{}s', self.verbose_name) # order_with_respect_and ordering are mutually exclusive. self._ordering_clash = bool(self.ordering and self.order_with_respect_to) # Any leftover attributes must be invalid. if meta_attrs != {}: raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs)) else: self.verbose_name_plural = format_lazy('{}s', self.verbose_name) del self.meta # If the db_table wasn't provided, use the app_label + model_name. if not self.db_table: self.db_table = "%s_%s" % (self.app_label, self.model_name) self.db_table = truncate_name(self.db_table, connection.ops.max_name_length()) def _format_names_with_class(self, cls, objs): """App label/class name interpolation for object names.""" new_objs = [] for obj in objs: obj = obj.clone() obj.name = obj.name % { 'app_label': cls._meta.app_label.lower(), 'class': cls.__name__.lower(), } new_objs.append(obj) return new_objs def _prepare(self, model): if self.order_with_respect_to: # The app registry will not be ready at this point, so we cannot # use get_field(). query = self.order_with_respect_to try: self.order_with_respect_to = next( f for f in self._get_fields(reverse=False) if f.name == query or f.attname == query ) except StopIteration: raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query)) self.ordering = ('_order',) if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields): model.add_to_class('_order', OrderWrt()) else: self.order_with_respect_to = None if self.pk is None: if self.parents: # Promote the first parent link in lieu of adding yet another # field. field = next(iter(self.parents.values())) # Look for a local field with the same name as the # first parent link. If a local field has already been # created, use it instead of promoting the parent already_created = [fld for fld in self.local_fields if fld.name == field.name] if already_created: field = already_created[0] field.primary_key = True self.setup_pk(field) else: auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True) model.add_to_class('id', auto) def add_manager(self, manager): self.local_managers.append(manager) self._expire_cache() def add_field(self, field, private=False): # Insert the given field in the order in which it was created, using # the "creation_counter" attribute of the field. # Move many-to-many related fields from self.fields into # self.many_to_many. if private: self.private_fields.append(field) elif field.is_relation and field.many_to_many: bisect.insort(self.local_many_to_many, field) else: bisect.insort(self.local_fields, field) self.setup_pk(field) # If the field being added is a relation to another known field, # expire the cache on this field and the forward cache on the field # being referenced, because there will be new relationships in the # cache. Otherwise, expire the cache of references *to* this field. # The mechanism for getting at the related model is slightly odd - # ideally, we'd just ask for field.related_model. However, related_model # is a cached property, and all the models haven't been loaded yet, so # we need to make sure we don't cache a string reference. if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model: try: field.remote_field.model._meta._expire_cache(forward=False) except AttributeError: pass self._expire_cache() else: self._expire_cache(reverse=False) def setup_pk(self, field): if not self.pk and field.primary_key: self.pk = field field.serialize = False def setup_proxy(self, target): """ Do the internal setup so that the current model is a proxy for "target". """ self.pk = target._meta.pk self.proxy_for_model = target self.db_table = target._meta.db_table def __repr__(self): return '<Options for %s>' % self.object_name def __str__(self): return self.label_lower def can_migrate(self, connection): """ Return True if the model can/should be migrated on the `connection`. `connection` can be either a real connection or a connection alias. """ if self.proxy or self.swapped or not self.managed: return False if isinstance(connection, str): connection = connections[connection] if self.required_db_vendor: return self.required_db_vendor == connection.vendor if self.required_db_features: return all(getattr(connection.features, feat, False) for feat in self.required_db_features) return True @property def verbose_name_raw(self): """Return the untranslated verbose name.""" with override(None): return str(self.verbose_name) @property def swapped(self): """ Has this model been swapped out for another? If so, return the model name of the replacement; otherwise, return None. For historical reasons, model name lookups using get_model() are case insensitive, so we make sure we are case insensitive here. """ if self.swappable: swapped_for = getattr(settings, self.swappable, None) if swapped_for: try: swapped_label, swapped_object = swapped_for.split('.') except ValueError: # setting not in the format app_label.model_name # raising ImproperlyConfigured here causes problems with # test cleanup code - instead it is raised in get_user_model # or as part of validation. return swapped_for if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower: return swapped_for return None @cached_property def managers(self): managers = [] seen_managers = set() bases = (b for b in self.model.mro() if hasattr(b, '_meta')) for depth, base in enumerate(bases): for manager in base._meta.local_managers: if manager.name in seen_managers: continue manager = copy.copy(manager) manager.model = self.model seen_managers.add(manager.name) managers.append((depth, manager.creation_counter, manager)) return make_immutable_fields_list( "managers", (m[2] for m in sorted(managers)), ) @cached_property def managers_map(self): return {manager.name: manager for manager in self.managers} @cached_property def base_manager(self): base_manager_name = self.base_manager_name if not base_manager_name: # Get the first parent's base_manager_name if there's one. for parent in self.model.mro()[1:]: if hasattr(parent, '_meta'): if parent._base_manager.name != '_base_manager': base_manager_name = parent._base_manager.name break if base_manager_name: try: return self.managers_map[base_manager_name] except KeyError: raise ValueError( "%s has no manager named %r" % ( self.object_name, base_manager_name, ) ) manager = Manager() manager.name = '_base_manager' manager.model = self.model manager.auto_created = True return manager @cached_property def default_manager(self): default_manager_name = self.default_manager_name if not default_manager_name and not self.local_managers: # Get the first parent's default_manager_name if there's one. for parent in self.model.mro()[1:]: if hasattr(parent, '_meta'): default_manager_name = parent._meta.default_manager_name break if default_manager_name: try: return self.managers_map[default_manager_name] except KeyError: raise ValueError( "%s has no manager named %r" % ( self.object_name, default_manager_name, ) ) if self.managers: return self.managers[0] @cached_property def fields(self): """ Return a list of all forward fields on the model and its parents, excluding ManyToManyFields. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ # For legacy reasons, the fields property should only contain forward # fields that are not private or with a m2m cardinality. Therefore we # pass these three filters as filters to the generator. # The third lambda is a longwinded way of checking f.related_model - we don't # use that property directly because related_model is a cached property, # and all the models may not have been loaded yet; we don't want to cache # the string reference to the related_model. def is_not_an_m2m_field(f): return not (f.is_relation and f.many_to_many) def is_not_a_generic_relation(f): return not (f.is_relation and f.one_to_many) def is_not_a_generic_foreign_key(f): return not ( f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model) ) return make_immutable_fields_list( "fields", (f for f in self._get_fields(reverse=False) if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f)) ) @cached_property def concrete_fields(self): """ Return a list of all concrete fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ return make_immutable_fields_list( "concrete_fields", (f for f in self.fields if f.concrete) ) @cached_property def local_concrete_fields(self): """ Return a list of all concrete fields on the model. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ return make_immutable_fields_list( "local_concrete_fields", (f for f in self.local_fields if f.concrete) ) @cached_property def many_to_many(self): """ Return a list of all many to many fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this list. """ return make_immutable_fields_list( "many_to_many", (f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many) ) @cached_property def related_objects(self): """ Return all related objects pointing to the current model. The related objects can come from a one-to-one, one-to-many, or many-to-many field relation type. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True) return make_immutable_fields_list( "related_objects", (obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many) ) @cached_property def _forward_fields_map(self): res = {} fields = self._get_fields(reverse=False) for field in fields: res[field.name] = field # Due to the way Django's internals work, get_field() should also # be able to fetch a field by attname. In the case of a concrete # field with relation, includes the *_id name too try: res[field.attname] = field except AttributeError: pass return res @cached_property def fields_map(self): res = {} fields = self._get_fields(forward=False, include_hidden=True) for field in fields: res[field.name] = field # Due to the way Django's internals work, get_field() should also # be able to fetch a field by attname. In the case of a concrete # field with relation, includes the *_id name too try: res[field.attname] = field except AttributeError: pass return res def get_field(self, field_name): """ Return a field instance given the name of a forward or reverse field. """ try: # In order to avoid premature loading of the relation tree # (expensive) we prefer checking if the field is a forward field. return self._forward_fields_map[field_name] except KeyError: # If the app registry is not ready, reverse fields are # unavailable, therefore we throw a FieldDoesNotExist exception. if not self.apps.models_ready: raise FieldDoesNotExist( "%s has no field named '%s'. The app cache isn't ready yet, " "so if this is an auto-created related field, it won't " "be available yet." % (self.object_name, field_name) ) try: # Retrieve field instance by name from cached or just-computed # field map. return self.fields_map[field_name] except KeyError: raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name)) def get_base_chain(self, model): """ Return a list of parent classes leading to `model` (ordered from closest to most distant ancestor). This has to handle the case where `model` is a grandparent or even more distant relation. """ if not self.parents: return [] if model in self.parents: return [model] for parent in self.parents: res = parent._meta.get_base_chain(model) if res: res.insert(0, parent) return res return [] def get_parent_list(self): """ Return all the ancestors of this model as a list ordered by MRO. Useful for determining if something is an ancestor, regardless of lineage. """ result = OrderedSet(self.parents) for parent in self.parents: for ancestor in parent._meta.get_parent_list(): result.add(ancestor) return list(result) def get_ancestor_link(self, ancestor): """ Return the field on the current model which points to the given "ancestor". This is possible an indirect link (a pointer to a parent model, which points, eventually, to the ancestor). Used when constructing table joins for model inheritance. Return None if the model isn't an ancestor of this one. """ if ancestor in self.parents: return self.parents[ancestor] for parent in self.parents: # Tries to get a link field from the immediate parent parent_link = parent._meta.get_ancestor_link(ancestor) if parent_link: # In case of a proxied model, the first link # of the chain to the ancestor is that parent # links return self.parents[parent] or parent_link def get_path_to_parent(self, parent): """ Return a list of PathInfos containing the path from the current model to the parent model, or an empty list if parent is not a parent of the current model. """ if self.model is parent: return [] # Skip the chain of proxy to the concrete proxied model. proxied_model = self.concrete_model path = [] opts = self for int_model in self.get_base_chain(parent): if int_model is proxied_model: opts = int_model._meta else: final_field = opts.parents[int_model] targets = (final_field.remote_field.get_related_field(),) opts = int_model._meta path.append(PathInfo( from_opts=final_field.model._meta, to_opts=opts, target_fields=targets, join_field=final_field, m2m=False, direct=True, filtered_relation=None, )) return path def get_path_from_parent(self, parent): """ Return a list of PathInfos containing the path from the parent model to the current model, or an empty list if parent is not a parent of the current model. """ if self.model is parent: return [] model = self.concrete_model # Get a reversed base chain including both the current and parent # models. chain = model._meta.get_base_chain(parent) chain.reverse() chain.append(model) # Construct a list of the PathInfos between models in chain. path = [] for i, ancestor in enumerate(chain[:-1]): child = chain[i + 1] link = child._meta.get_ancestor_link(ancestor) path.extend(link.get_reverse_path_info()) return path def _populate_directed_relation_graph(self): """ This method is used by each model to find its reverse objects. As this method is very expensive and is accessed frequently (it looks up every field in a model, in every app), it is computed on first access and then is set as a property on every model. """ related_objects_graph = defaultdict(list) all_models = self.apps.get_models(include_auto_created=True) for model in all_models: opts = model._meta # Abstract model's fields are copied to child models, hence we will # see the fields from the child models. if opts.abstract: continue fields_with_relations = ( f for f in opts._get_fields(reverse=False, include_parents=False) if f.is_relation and f.related_model is not None ) for f in fields_with_relations: if not isinstance(f.remote_field.model, str): related_objects_graph[f.remote_field.model._meta.concrete_model._meta].append(f) for model in all_models: # Set the relation_tree using the internal __dict__. In this way # we avoid calling the cached property. In attribute lookup, # __dict__ takes precedence over a data descriptor (such as # @cached_property). This means that the _meta._relation_tree is # only called if related_objects is not in __dict__. related_objects = related_objects_graph[model._meta.concrete_model._meta] model._meta.__dict__['_relation_tree'] = related_objects # It seems it is possible that self is not in all_models, so guard # against that with default for get(). return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE) @cached_property def _relation_tree(self): return self._populate_directed_relation_graph() def _expire_cache(self, forward=True, reverse=True): # This method is usually called by apps.cache_clear(), when the # registry is finalized, or when a new field is added. if forward: for cache_key in self.FORWARD_PROPERTIES: if cache_key in self.__dict__: delattr(self, cache_key) if reverse and not self.abstract: for cache_key in self.REVERSE_PROPERTIES: if cache_key in self.__dict__: delattr(self, cache_key) self._get_fields_cache = {} def get_fields(self, include_parents=True, include_hidden=False): """ Return a list of fields associated to the model. By default, include forward and reverse fields, fields derived from inheritance, but not hidden fields. The returned fields can be changed using the parameters: - include_parents: include fields derived from inheritance - include_hidden: include fields that have a related_name that starts with a "+" """ if include_parents is False: include_parents = PROXY_PARENTS return self._get_fields(include_parents=include_parents, include_hidden=include_hidden) def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False, seen_models=None): """ Internal helper function to return fields of the model. * If forward=True, then fields defined on this model are returned. * If reverse=True, then relations pointing to this model are returned. * If include_hidden=True, then fields with is_hidden=True are returned. * The include_parents argument toggles if fields from parent models should be included. It has three values: True, False, and PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all fields defined for the current model or any of its parents in the parent chain to the model's concrete model. """ if include_parents not in (True, False, PROXY_PARENTS): raise TypeError("Invalid argument for include_parents: %s" % (include_parents,)) # This helper function is used to allow recursion in ``get_fields()`` # implementation and to provide a fast way for Django's internals to # access specific subsets of fields. # We must keep track of which models we have already seen. Otherwise we # could include the same field multiple times from different models. topmost_call = seen_models is None if topmost_call: seen_models = set() seen_models.add(self.model) # Creates a cache key composed of all arguments cache_key = (forward, reverse, include_parents, include_hidden, topmost_call) try: # In order to avoid list manipulation. Always return a shallow copy # of the results. return self._get_fields_cache[cache_key] except KeyError: pass fields = [] # Recursively call _get_fields() on each parent, with the same # options provided in this call. if include_parents is not False: for parent in self.parents: # In diamond inheritance it is possible that we see the same # model from two different routes. In that case, avoid adding # fields from the same parent again. if parent in seen_models: continue if (parent._meta.concrete_model != self.concrete_model and include_parents == PROXY_PARENTS): continue for obj in parent._meta._get_fields( forward=forward, reverse=reverse, include_parents=include_parents, include_hidden=include_hidden, seen_models=seen_models): if not getattr(obj, 'parent_link', False) or obj.model == self.concrete_model: fields.append(obj) if reverse and not self.proxy: # Tree is computed once and cached until the app cache is expired. # It is composed of a list of fields pointing to the current model # from other models. all_fields = self._relation_tree for field in all_fields: # If hidden fields should be included or the relation is not # intentionally hidden, add to the fields dict. if include_hidden or not field.remote_field.hidden: fields.append(field.remote_field) if forward: fields += self.local_fields fields += self.local_many_to_many # Private fields are recopied to each child model, and they get a # different model as field.model in each child. Hence we have to # add the private fields separately from the topmost call. If we # did this recursively similar to local_fields, we would get field # instances with field.model != self.model. if topmost_call: fields += self.private_fields # In order to avoid list manipulation. Always # return a shallow copy of the results fields = make_immutable_fields_list("get_fields()", fields) # Store result into cache for later access self._get_fields_cache[cache_key] = fields return fields @cached_property def _property_names(self): """Return a set of the names of the properties defined on the model.""" names = [] for name in dir(self.model): attr = inspect.getattr_static(self.model, name) if isinstance(attr, property): names.append(name) return frozenset(names) @cached_property def db_returning_fields(self): """ Private API intended only to be used by Django itself. Fields to be returned after a database insert. """ return [ field for field in self._get_fields(forward=True, reverse=False, include_parents=PROXY_PARENTS) if getattr(field, 'db_returning', False) ]
9cb33f79e07896bff6f92b1c363d7122f4779b9f93bce31f9d4abc11d4950bff
import copy import inspect import warnings from functools import partialmethod from itertools import chain from django.apps import apps from django.conf import settings from django.core import checks from django.core.exceptions import ( NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, ObjectDoesNotExist, ValidationError, ) from django.db import ( DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, connections, router, transaction, ) from django.db.models import ( NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value, ) from django.db.models.constants import LOOKUP_SEP from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, Collector from django.db.models.fields.related import ( ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, ) from django.db.models.functions import Coalesce from django.db.models.manager import Manager from django.db.models.options import Options from django.db.models.query import Q from django.db.models.signals import ( class_prepared, post_init, post_save, pre_init, pre_save, ) from django.db.models.utils import make_model_tuple from django.utils.encoding import force_str from django.utils.hashable import make_hashable from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext_lazy as _ from django.utils.version import get_version class Deferred: def __repr__(self): return '<Deferred field>' def __str__(self): return '<Deferred field>' DEFERRED = Deferred() def subclass_exception(name, bases, module, attached_to): """ Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class. """ return type(name, bases, { '__module__': module, '__qualname__': '%s.%s' % (attached_to.__qualname__, name), }) def _has_contribute_to_class(value): # Only call contribute_to_class() if it's bound. return not inspect.isclass(value) and hasattr(value, 'contribute_to_class') class ModelBase(type): """Metaclass for all models.""" def __new__(cls, name, bases, attrs, **kwargs): super_new = super().__new__ # Also ensure initialization is only performed for subclasses of Model # (excluding Model class itself). parents = [b for b in bases if isinstance(b, ModelBase)] if not parents: return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop('__module__') new_attrs = {'__module__': module} classcell = attrs.pop('__classcell__', None) if classcell is not None: new_attrs['__classcell__'] = classcell attr_meta = attrs.pop('Meta', None) # Pass all attrs without a (Django-specific) contribute_to_class() # method to type.__new__() so that they're properly initialized # (i.e. __set_name__()). contributable_attrs = {} for obj_name, obj in list(attrs.items()): if _has_contribute_to_class(obj): contributable_attrs[obj_name] = obj else: new_attrs[obj_name] = obj new_class = super_new(cls, name, bases, new_attrs, **kwargs) abstract = getattr(attr_meta, 'abstract', False) meta = attr_meta or getattr(new_class, 'Meta', None) base_meta = getattr(new_class, '_meta', None) app_label = None # Look for an application configuration to attach the model to. app_config = apps.get_containing_app_config(module) if getattr(meta, 'app_label', None) is None: if app_config is None: if not abstract: raise RuntimeError( "Model class %s.%s doesn't declare an explicit " "app_label and isn't in an application in " "INSTALLED_APPS." % (module, name) ) else: app_label = app_config.label new_class.add_to_class('_meta', Options(meta, app_label)) if not abstract: new_class.add_to_class( 'DoesNotExist', subclass_exception( 'DoesNotExist', tuple( x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (ObjectDoesNotExist,), module, attached_to=new_class)) new_class.add_to_class( 'MultipleObjectsReturned', subclass_exception( 'MultipleObjectsReturned', tuple( x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (MultipleObjectsReturned,), module, attached_to=new_class)) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, 'ordering'): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, 'get_latest_by'): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)) # Add remaining attributes (those with a contribute_to_class() method) # to the class. for obj_name, obj in contributable_attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = chain( new_class._meta.local_fields, new_class._meta.local_many_to_many, new_class._meta.private_fields ) field_names = {f.name for f in new_fields} # Basic setup for proxy models. if is_proxy: base = None for parent in [kls for kls in parents if hasattr(kls, '_meta')]: if parent._meta.abstract: if parent._meta.fields: raise TypeError( "Abstract base class containing model fields not " "permitted for proxy model '%s'." % name ) else: continue if base is None: base = parent elif parent._meta.concrete_model is not base._meta.concrete_model: raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name) if base is None: raise TypeError("Proxy model '%s' has no non-abstract model base class." % name) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Collect the parent links for multi-table inheritance. parent_links = {} for base in reversed([new_class] + parents): # Conceptually equivalent to `if base is Model`. if not hasattr(base, '_meta'): continue # Skip concrete parent classes. if base != new_class and not base._meta.abstract: continue # Locate OneToOneField instances. for field in base._meta.local_fields: if isinstance(field, OneToOneField) and field.remote_field.parent_link: related = resolve_relation(new_class, field.remote_field.model) parent_links[make_model_tuple(related)] = field # Track fields inherited from base models. inherited_attributes = set() # Do the appropriate setup for any model parents. for base in new_class.mro(): if base not in parents or not hasattr(base, '_meta'): # Things without _meta aren't functional models, so they're # uninteresting parents. inherited_attributes.update(base.__dict__) continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many if not base._meta.abstract: # Check for clashes between locally declared fields and those # on the base classes. for field in parent_fields: if field.name in field_names: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: inherited_attributes.add(field.name) # Concrete classes... base = base._meta.concrete_model base_key = make_model_tuple(base) if base_key in parent_links: field = parent_links[base_key] elif not is_proxy: attr_name = '%s_ptr' % base._meta.model_name field = OneToOneField( base, on_delete=CASCADE, name=attr_name, auto_created=True, parent_link=True, ) if attr_name in field_names: raise FieldError( "Auto-generated field '%s' in class %r for " "parent_link to base class %r clashes with " "declared field of the same name." % ( attr_name, name, base.__name__, ) ) # Only add the ptr field if it's not already present; # e.g. migrations will already have it specified if not hasattr(new_class, attr_name): new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: base_parents = base._meta.parents.copy() # Add fields from abstract base class if it wasn't overridden. for field in parent_fields: if (field.name not in field_names and field.name not in new_class.__dict__ and field.name not in inherited_attributes): new_field = copy.deepcopy(field) new_class.add_to_class(field.name, new_field) # Replace parent links defined on this base by the new # field. It will be appropriately resolved if required. if field.one_to_one: for parent, parent_link in base_parents.items(): if field == parent_link: base_parents[parent] = new_field # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base_parents) # Inherit private fields (like GenericForeignKey) from the parent # class for field in base._meta.private_fields: if field.name in field_names: if not base._meta.abstract: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: field = copy.deepcopy(field) if not base._meta.abstract: field.mti_inherited = True new_class.add_to_class(field.name, field) # Copy indexes so that index names are unique when models extend an # abstract model. new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes] if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() new_class._meta.apps.register_model(new_class._meta.app_label, new_class) return new_class def add_to_class(cls, name, value): if _has_contribute_to_class(value): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """Create some methods once self._meta has been populated.""" opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=False) # Defer creating accessors on the foreign class until it has been # created and registered. If remote_field is None, we're ordering # with respect to a GenericForeignKey and don't know what the # foreign class is - we'll add those accessors later in # contribute_to_class(). if opts.order_with_respect_to.remote_field: wrt = opts.order_with_respect_to remote = wrt.remote_field.model lazy_related_operation(make_foreign_order_accessors, cls, remote) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields)) get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower) if get_absolute_url_override: setattr(cls, 'get_absolute_url', get_absolute_url_override) if not opts.managers: if any(f.name == 'objects' for f in opts.fields): raise ValueError( "Model %s must specify a custom Manager, because it has a " "field named 'objects'." % cls.__name__ ) manager = Manager() manager.auto_created = True cls.add_to_class('objects', manager) # Set the name of _meta.indexes. This can't be done in # Options.contribute_to_class() because fields haven't been added to # the model at that point. for index in cls._meta.indexes: if not index.name: index.set_name_with_model(cls) class_prepared.send(sender=cls) @property def _base_manager(cls): return cls._meta.base_manager @property def _default_manager(cls): return cls._meta.default_manager class ModelStateFieldsCacheDescriptor: def __get__(self, instance, cls=None): if instance is None: return self res = instance.fields_cache = {} return res class ModelState: """Store model instance state.""" db = None # If true, uniqueness validation checks will consider this a new, unsaved # object. Necessary for correct validation of new instances of objects with # explicit (non-auto) PKs. This impacts validation only; it has no effect # on the actual save. adding = True fields_cache = ModelStateFieldsCacheDescriptor() class Model(metaclass=ModelBase): def __init__(self, *args, **kwargs): # Alias some things as locals to avoid repeat global lookups cls = self.__class__ opts = self._meta _setattr = setattr _DEFERRED = DEFERRED pre_init.send(sender=cls, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) else: # Slower, kwargs-ready version. fields_iter = iter(opts.fields) for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) kwargs.pop(field.name, None) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # Virtual field if field.attname not in kwargs and field.column is None: continue if kwargs: if isinstance(field.remote_field, ForeignObjectRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. if rel_obj is not _DEFERRED: _setattr(self, field.name, rel_obj) else: if val is not _DEFERRED: _setattr(self, field.attname, val) if kwargs: property_names = opts._property_names for prop in tuple(kwargs): try: # Any remaining kwargs must correspond to properties or # virtual fields. if prop in property_names or opts.get_field(prop): if kwargs[prop] is not _DEFERRED: _setattr(self, prop, kwargs[prop]) del kwargs[prop] except (AttributeError, FieldDoesNotExist): pass for kwarg in kwargs: raise TypeError("%s() got an unexpected keyword argument '%s'" % (cls.__name__, kwarg)) super().__init__() post_init.send(sender=cls, instance=self) @classmethod def from_db(cls, db, field_names, values): if len(values) != len(cls._meta.concrete_fields): values_iter = iter(values) values = [ next(values_iter) if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields ] new = cls(*values) new._state.adding = False new._state.db = db return new def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def __str__(self): return '%s object (%s)' % (self.__class__.__name__, self.pk) def __eq__(self, other): if not isinstance(other, Model): return NotImplemented if self._meta.concrete_model != other._meta.concrete_model: return False my_pk = self.pk if my_pk is None: return self is other return my_pk == other.pk def __hash__(self): if self.pk is None: raise TypeError("Model instances without primary key value are unhashable") return hash(self.pk) def __reduce__(self): data = self.__getstate__() data[DJANGO_VERSION_PICKLE_KEY] = get_version() class_id = self._meta.app_label, self._meta.object_name return model_unpickle, (class_id,), data def __getstate__(self): """Hook to allow choosing the attributes to pickle.""" return self.__dict__ def __setstate__(self, state): msg = None pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: current_version = get_version() if current_version != pickled_version: msg = ( "Pickled model instance's Django version %s does not match " "the current version %s." % (pickled_version, current_version) ) else: msg = "Pickled model instance's Django version is not specified." if msg: warnings.warn(msg, RuntimeWarning, stacklevel=2) self.__dict__.update(state) def _get_pk_val(self, meta=None): meta = meta or self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): for parent_link in self._meta.parents.values(): if parent_link and parent_link != self._meta.pk: setattr(self, parent_link.target_field.attname, value) return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def get_deferred_fields(self): """ Return a set containing names of deferred fields for this instance. """ return { f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__ } def refresh_from_db(self, using=None, fields=None): """ Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. """ if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, '_prefetched_objects_cache', ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' 'are not allowed in fields.' % LOOKUP_SEP) hints = {'instance': self} db_instance_qs = self.__class__._base_manager.db_manager(using, hints=hints).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db def serializable_value(self, field_name): """ Return the value of the field name for this instance. If the field is a foreign key, return the id value instead of the object. If there's no Field object with this name on the model, return the model attribute's value. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field(field_name) except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Save the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ # Ensure that a model instance without a PK hasn't been assigned to # a ForeignKey or OneToOneField on this model. If the field is # nullable, allowing the save() would result in silent data loss. for field in self._meta.concrete_fields: # If the related field isn't cached, then an instance hasn't # been assigned and there's no need to worry about this check. if field.is_relation and field.is_cached(self): obj = getattr(self, field.name, None) if not obj: continue # A pk may have been assigned manually to a model instance not # saved to the database (or auto-generated in a case like # UUIDField), but we allow the save to proceed and rely on the # database to raise an IntegrityError if applicable. If # constraints aren't supported by the database, there's the # unavoidable risk of data corruption. if obj.pk is None: # Remove the object from a related instance cache. if not field.remote_field.multiple: field.remote_field.delete_cached_value(obj) raise ValueError( "save() prohibited to prevent data loss due to " "unsaved related object '%s'." % field.name ) elif getattr(self, field.attname) is None: # Use pk from related object if it has been saved after # an assignment. setattr(self, field.attname, obj.pk) # If the relationship's pk/to_field was changed, clear the # cached relationship. if getattr(obj, field.target_field.attname) != getattr(self, field.attname): field.delete_cached_value(self) using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") deferred_fields = self.get_deferred_fields() if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if not update_fields: return update_fields = frozenset(update_fields) field_names = set() for field in self._meta.fields: if not field.primary_key: field_names.add(field.name) if field.name != field.attname: field_names.add(field.attname) non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError("The following fields do not exist in this " "model or are m2m fields: %s" % ', '.join(non_model_fields)) # If saving to the same database, and this model is deferred, then # automatically do an "update_fields" save on the loaded fields. elif not force_insert and deferred_fields and using == self._state.db: field_names = set() for field in self._meta.concrete_fields: if not field.primary_key and not hasattr(field, 'through'): field_names.add(field.attname) loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base(using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields) save.alters_data = True def save_base(self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None): """ Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading. """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ # Skip proxies, but keep the origin as the proxy model. if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send( sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields, ) # A transaction isn't needed if one query is issued. if meta.parents: context_manager = transaction.atomic(using=using, savepoint=False) else: context_manager = transaction.mark_for_rollback_on_error(using=using) with context_manager: parent_inserted = False if not raw: parent_inserted = self._save_parents(cls, using, update_fields) updated = self._save_table( raw, cls, force_insert or parent_inserted, force_update, using, update_fields, ) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if not meta.auto_created: post_save.send( sender=origin, instance=self, created=(not updated), update_fields=update_fields, raw=raw, using=using, ) save_base.alters_data = True def _save_parents(self, cls, using, update_fields): """Save all the parents of cls using values from self.""" meta = cls._meta inserted = False for parent, field in meta.parents.items(): # Make sure the link fields are synced between parent and self. if (field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None): setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) parent_inserted = self._save_parents(cls=parent, using=using, update_fields=update_fields) updated = self._save_table( cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted, ) if not updated: inserted = True # Set the parent's PK value to self. if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) # Since we didn't have an instance of the parent handy set # attname directly, bypassing the descriptor. Invalidate # the related object cache, in case it's been accidentally # populated. A fresh instance will be re-built from the # database if necessary. if field.is_cached(self): field.delete_cached_value(self) return inserted def _save_table(self, raw=False, cls=None, force_insert=False, force_update=False, using=None, update_fields=None): """ Do the heavy-lifting involved in saving. Update or insert the data for a single table. """ meta = cls._meta non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] if update_fields: non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields] pk_val = self._get_pk_val(meta) if pk_val is None: pk_val = meta.pk.get_pk_value_on_save(self) setattr(self, meta.pk.attname, pk_val) pk_set = pk_val is not None if not pk_set and (force_update or update_fields): raise ValueError("Cannot force an update in save() with no primary key.") updated = False # Skip an UPDATE when adding an instance and primary key has a default. if ( not raw and not force_insert and self._state.adding and self._meta.pk.default and self._meta.pk.default is not NOT_PROVIDED ): force_insert = True # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False))) for f in non_pks] forced_update = update_fields or force_update updated = self._do_update(base_qs, using, pk_val, values, update_fields, forced_update) if force_update and not updated: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not updated: raise DatabaseError("Save with update_fields did not affect any rows.") if not updated: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to filter_args = field.get_filter_kwargs_for_object(self) self._order = cls._base_manager.using(using).filter(**filter_args).aggregate( _order__max=Coalesce( ExpressionWrapper(Max('_order') + Value(1), output_field=IntegerField()), Value(0), ), )['_order__max'] fields = meta.local_concrete_fields if not pk_set: fields = [f for f in fields if f is not meta.auto_field] returning_fields = meta.db_returning_fields results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw) for result, field in zip(results, returning_fields): setattr(self, field.attname, result) return updated def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): """ Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB). """ filtered = base_qs.filter(pk=pk_val) if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that # case we just say the update succeeded. Another case ending up here # is a model with just PK - in that case check that the PK still # exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and # It may happen that the object is deleted from the DB right after # this check, causing the subsequent UPDATE to return zero matching # rows. The same result can occur in some rare cases when the # database returns zero despite the UPDATE being executed # successfully (a row is matched and updated). In order to # distinguish these two cases, the object's existence in the # database is again checked for if the UPDATE query returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 def _do_insert(self, manager, using, fields, returning_fields, raw): """ Do an INSERT. If returning_fields is defined then this method should return the newly created data for the model. """ return manager._insert( [self], fields=fields, returning_fields=returning_fields, using=using, raw=raw, ) def delete(self, using=None, keep_parents=False): using = using or router.db_for_write(self.__class__, instance=self) assert self.pk is not None, ( "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname) ) collector = Collector(using=using) collector.collect([self], keep_parents=keep_parents) return collector.delete() delete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) choices_dict = dict(make_hashable(field.flatchoices)) # force_str() to coerce lazy strings. return force_str(choices_dict.get(make_hashable(value), value), strings_only=True) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = 'gt' if is_next else 'lt' order = '' if is_next else '-' param = getattr(self, field.attname) q = Q(**{'%s__%s' % (field.name, op): param}) q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk}) qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by( '%s%s' % (order, field.name), '%spk' % order ) try: return qs[0] except IndexError: raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = 'gt' if is_next else 'lt' order = '_order' if is_next else '-_order' order_field = self._meta.order_with_respect_to filter_args = order_field.get_filter_kwargs_for_object(self) obj = self.__class__._default_manager.filter(**filter_args).filter(**{ '_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{ self._meta.pk.name: self.pk }) }).order_by(order)[:1].get() setattr(self, cachename, obj) return getattr(self, cachename) def prepare_database_save(self, field): if self.pk is None: raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self) return getattr(self, field.remote_field.get_related_field().attname) def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Check unique constraints on the model and raise ValidationError if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None): """ Return a list of checks to perform. Since validate_unique() could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = [] unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] constraints = [(self.__class__, self._meta.constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.unique_together: unique_togethers.append((parent_class, parent_class._meta.unique_together)) if parent_class._meta.constraints: constraints.append((parent_class, parent_class._meta.constraints)) for model_class, unique_together in unique_togethers: for check in unique_together: if not any(name in exclude for name in check): # Add the check if the field isn't excluded. unique_checks.append((model_class, tuple(check))) for model_class, model_constraints in constraints: for constraint in model_constraints: if (isinstance(constraint, UniqueConstraint) and # Partial unique constraints can't be validated. constraint.condition is None and not any(name in exclude for name in constraint.fields)): unique_checks.append((model_class, constraint.fields)) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.get_parent_list(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, 'date', name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, 'year', name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, 'month', name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) # TODO: Handle multiple backends with different feature flags. if (lookup_value is None or (lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)): # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check)) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == 'date': lookup_kwargs['%s__day' % unique_for] = date.day lookup_kwargs['%s__month' % unique_for] = date.month lookup_kwargs['%s__year' % unique_for] = date.year else: lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field_name, unique_for): opts = self._meta field = opts.get_field(field_name) return ValidationError( message=field.error_messages['unique_for_date'], code='unique_for_date', params={ 'model': self, 'model_name': capfirst(opts.verbose_name), 'lookup_type': lookup_type, 'field': field_name, 'field_label': capfirst(field.verbose_name), 'date_field': unique_for, 'date_field_label': capfirst(opts.get_field(unique_for).verbose_name), } ) def unique_error_message(self, model_class, unique_check): opts = model_class._meta params = { 'model': self, 'model_class': model_class, 'model_name': capfirst(opts.verbose_name), 'unique_check': unique_check, } # A unique field if len(unique_check) == 1: field = opts.get_field(unique_check[0]) params['field_label'] = capfirst(field.verbose_name) return ValidationError( message=field.error_messages['unique'], code='unique', params=params, ) # unique_together else: field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check] params['field_labels'] = get_text_list(field_labels, _('and')) return ValidationError( message=_("%(model_name)s with this %(field_labels)s already exists."), code='unique_together', params=params, ) def full_clean(self, exclude=None, validate_unique=True): """ Call clean_fields(), clean(), and validate_unique() on the model. Raise a ValidationError for any errors that occur. """ errors = {} if exclude is None: exclude = [] else: exclude = list(exclude) try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. if validate_unique: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.append(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Clean all fields and raise a ValidationError containing a dict of all validation errors if any occur. """ if exclude is None: exclude = [] errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in f.empty_values: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.error_list if errors: raise ValidationError(errors) @classmethod def check(cls, **kwargs): errors = [*cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs)] if not cls._meta.swapped: errors += [ *cls._check_fields(**kwargs), *cls._check_m2m_through_same_relationship(), *cls._check_long_column_names(), ] clash_errors = ( *cls._check_id_field(), *cls._check_field_name_clashes(), *cls._check_model_name_db_lookup_clashes(), *cls._check_property_name_related_field_accessor_clashes(), *cls._check_single_primary_key(), ) errors.extend(clash_errors) # If there are field name clashes, hide consequent column name # clashes. if not clash_errors: errors.extend(cls._check_column_name_clashes()) errors += [ *cls._check_index_together(), *cls._check_unique_together(), *cls._check_indexes(), *cls._check_ordering(), *cls._check_constraints(), ] return errors @classmethod def _check_swappable(cls): """Check if the swapped model exists.""" errors = [] if cls._meta.swapped: try: apps.get_model(cls._meta.swapped) except ValueError: errors.append( checks.Error( "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, id='models.E001', ) ) except LookupError: app_label, model_name = cls._meta.swapped.split('.') errors.append( checks.Error( "'%s' references '%s.%s', which has not been " "installed, or is abstract." % ( cls._meta.swappable, app_label, model_name ), id='models.E002', ) ) return errors @classmethod def _check_model(cls): errors = [] if cls._meta.proxy: if cls._meta.local_fields or cls._meta.local_many_to_many: errors.append( checks.Error( "Proxy model '%s' contains model fields." % cls.__name__, id='models.E017', ) ) return errors @classmethod def _check_managers(cls, **kwargs): """Perform all manager checks.""" errors = [] for manager in cls._meta.managers: errors.extend(manager.check(**kwargs)) return errors @classmethod def _check_fields(cls, **kwargs): """Perform all field checks.""" errors = [] for field in cls._meta.local_fields: errors.extend(field.check(**kwargs)) for field in cls._meta.local_many_to_many: errors.extend(field.check(from_model=cls, **kwargs)) return errors @classmethod def _check_m2m_through_same_relationship(cls): """ Check if no relationship model is used by more than one m2m field. """ errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = (f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id='models.E003', ) ) else: seen_intermediary_signatures.append(signature) return errors @classmethod def _check_id_field(cls): """Check if `id` field is a primary key.""" fields = [f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk] # fields is empty or consists of the invalid "id" field if fields and not fields[0].primary_key and cls._meta.pk.name == 'id': return [ checks.Error( "'id' can only be used as a field name if the field also " "sets 'primary_key=True'.", obj=cls, id='models.E004', ) ] else: return [] @classmethod def _check_field_name_clashes(cls): """Forbid field shadowing in multi-table inheritance.""" errors = [] used_fields = {} # name or attname -> field # Check that multi-inheritance doesn't cause field name shadowing. for parent in cls._meta.get_parent_list(): for f in parent._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None if clash: errors.append( checks.Error( "The field '%s' from parent model " "'%s' clashes with the field '%s' " "from parent model '%s'." % ( clash.name, clash.model._meta, f.name, f.model._meta ), obj=cls, id='models.E005', ) ) used_fields[f.name] = f used_fields[f.attname] = f # Check that fields defined in the model don't clash with fields from # parents, including auto-generated fields like multi-table inheritance # child accessors. for parent in cls._meta.get_parent_list(): for f in parent._meta.get_fields(): if f not in used_fields: used_fields[f.name] = f for f in cls._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None # Note that we may detect clash between user-defined non-unique # field "id" and automatically added unique field "id", both # defined at the same model. This special case is considered in # _check_id_field and here we ignore it. id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls if clash and not id_conflict: errors.append( checks.Error( "The field '%s' clashes with the field '%s' " "from model '%s'." % ( f.name, clash.name, clash.model._meta ), obj=f, id='models.E006', ) ) used_fields[f.name] = f used_fields[f.attname] = f return errors @classmethod def _check_column_name_clashes(cls): # Store a list of column names which have already been used by other fields. used_column_names = [] errors = [] for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Ensure the column name is not already in use. if column_name and column_name in used_column_names: errors.append( checks.Error( "Field '%s' has column name '%s' that is used by " "another field." % (f.name, column_name), hint="Specify a 'db_column' for the field.", obj=cls, id='models.E007' ) ) else: used_column_names.append(column_name) return errors @classmethod def _check_model_name_db_lookup_clashes(cls): errors = [] model_name = cls.__name__ if model_name.startswith('_') or model_name.endswith('_'): errors.append( checks.Error( "The model name '%s' cannot start or end with an underscore " "as it collides with the query lookup syntax." % model_name, obj=cls, id='models.E023' ) ) elif LOOKUP_SEP in model_name: errors.append( checks.Error( "The model name '%s' cannot contain double underscores as " "it collides with the query lookup syntax." % model_name, obj=cls, id='models.E024' ) ) return errors @classmethod def _check_property_name_related_field_accessor_clashes(cls): errors = [] property_names = cls._meta._property_names related_field_accessors = ( f.get_attname() for f in cls._meta._get_fields(reverse=False) if f.is_relation and f.related_model is not None ) for accessor in related_field_accessors: if accessor in property_names: errors.append( checks.Error( "The property '%s' clashes with a related field " "accessor." % accessor, obj=cls, id='models.E025', ) ) return errors @classmethod def _check_single_primary_key(cls): errors = [] if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: errors.append( checks.Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=cls, id='models.E026', ) ) return errors @classmethod def _check_index_together(cls): """Check the value of "index_together" option.""" if not isinstance(cls._meta.index_together, (tuple, list)): return [ checks.Error( "'index_together' must be a list or tuple.", obj=cls, id='models.E008', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together): return [ checks.Error( "All 'index_together' elements must be lists or tuples.", obj=cls, id='models.E009', ) ] else: errors = [] for fields in cls._meta.index_together: errors.extend(cls._check_local_fields(fields, "index_together")) return errors @classmethod def _check_unique_together(cls): """Check the value of "unique_together" option.""" if not isinstance(cls._meta.unique_together, (tuple, list)): return [ checks.Error( "'unique_together' must be a list or tuple.", obj=cls, id='models.E010', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together): return [ checks.Error( "All 'unique_together' elements must be lists or tuples.", obj=cls, id='models.E011', ) ] else: errors = [] for fields in cls._meta.unique_together: errors.extend(cls._check_local_fields(fields, "unique_together")) return errors @classmethod def _check_indexes(cls): """Check the fields and names of indexes.""" errors = [] for index in cls._meta.indexes: # Index name can't start with an underscore or a number, restricted # for cross-database compatibility with Oracle. if index.name[0] == '_' or index.name[0].isdigit(): errors.append( checks.Error( "The index name '%s' cannot start with an underscore " "or a number." % index.name, obj=cls, id='models.E033', ), ) if len(index.name) > index.max_name_length: errors.append( checks.Error( "The index name '%s' cannot be longer than %d " "characters." % (index.name, index.max_name_length), obj=cls, id='models.E034', ), ) fields = [field for index in cls._meta.indexes for field, _ in index.fields_orders] errors.extend(cls._check_local_fields(fields, 'indexes')) return errors @classmethod def _check_local_fields(cls, fields, option): from django.db import models # In order to avoid hitting the relation tree prematurely, we use our # own fields_map instead of using get_field() forward_fields_map = {} for field in cls._meta._get_fields(reverse=False): forward_fields_map[field.name] = field if hasattr(field, 'attname'): forward_fields_map[field.attname] = field errors = [] for field_name in fields: try: field = forward_fields_map[field_name] except KeyError: errors.append( checks.Error( "'%s' refers to the nonexistent field '%s'." % ( option, field_name, ), obj=cls, id='models.E012', ) ) else: if isinstance(field.remote_field, models.ManyToManyRel): errors.append( checks.Error( "'%s' refers to a ManyToManyField '%s', but " "ManyToManyFields are not permitted in '%s'." % ( option, field_name, option, ), obj=cls, id='models.E013', ) ) elif field not in cls._meta.local_fields: errors.append( checks.Error( "'%s' refers to field '%s' which is not local to model '%s'." % (option, field_name, cls._meta.object_name), hint="This issue may be caused by multi-table inheritance.", obj=cls, id='models.E016', ) ) return errors @classmethod def _check_ordering(cls): """ Check "ordering" option -- is it a list of strings and do all fields exist? """ if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id='models.E021', ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by only one field).", obj=cls, id='models.E014', ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != '?') # Convert "-field" to "field". fields = ((f[1:] if f.startswith('-') else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: # pk is an alias that won't be found by opts.get_field. if part == 'pk': fld = _cls._meta.pk else: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.get_path_info()[-1].to_opts.model else: _cls = None except (FieldDoesNotExist, AttributeError): if fld is None or fld.get_transform(part) is None: errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id='models.E015', ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != 'pk'} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set(chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) )) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id='models.E015', ) ) return errors @classmethod def _check_long_column_names(cls): """ Check that any auto-generated column names are shorter than the limits for each database in which the model will be created. """ errors = [] allowed_len = None db_alias = None # Find the minimum max allowed length among all specified db_aliases. for db in settings.DATABASES: # skip databases where the model won't be created if not router.allow_migrate_model(db, cls): continue connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is None or connection.features.truncates_names: continue else: if allowed_len is None: allowed_len = max_name_length db_alias = db elif max_name_length < allowed_len: allowed_len = max_name_length db_alias = db if allowed_len is None: return errors for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Check if auto-generated name for the field is too long # for the database. if f.db_column is None and column_name is not None and len(column_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (column_name, allowed_len, db_alias), hint="Set the column name manually using 'db_column'.", obj=cls, id='models.E018', ) ) for f in cls._meta.local_many_to_many: # Skip nonexistent models. if isinstance(f.remote_field.through, str): continue # Check if auto-generated name for the M2M field is too long # for the database. for m2m in f.remote_field.through._meta.local_fields: _, rel_name = m2m.get_attname_column() if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for M2M field ' '"%s". Maximum length is "%s" for database "%s".' % (rel_name, allowed_len, db_alias), hint=( "Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'." ), obj=cls, id='models.E019', ) ) return errors @classmethod def _check_constraints(cls): errors = [] for db in settings.DATABASES: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if ( connection.features.supports_table_check_constraints or 'supports_table_check_constraints' in cls._meta.required_db_features ): continue if any(isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints): errors.append( checks.Warning( '%s does not support check constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W027', ) ) return errors ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(self, ordered_obj, id_list, using=None): if using is None: using = DEFAULT_DB_ALIAS order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update([ ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list) ], ['_order']) def method_get_order(self, ordered_obj): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) pk_name = ordered_obj._meta.pk.name return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) def make_foreign_order_accessors(model, related_model): setattr( related_model, 'get_%s_order' % model.__name__.lower(), partialmethod(method_get_order, model) ) setattr( related_model, 'set_%s_order' % model.__name__.lower(), partialmethod(method_set_order, model) ) ######## # MISC # ######## def model_unpickle(model_id): """Used to unpickle Model subclasses with deferred fields.""" if isinstance(model_id, tuple): model = apps.get_model(*model_id) else: # Backwards compat - the model was cached directly in earlier versions. model = model_id return model.__new__(model) model_unpickle.__safe_for_unpickle__ = True
3d51819de4b9de38b57e01fb1e14d74a7ff1389305527be3a4b56ab063443b99
import copy import datetime import inspect from decimal import Decimal from django.core.exceptions import EmptyResultSet, FieldError from django.db import NotSupportedError, connection from django.db.models import fields from django.db.models.query_utils import Q from django.utils.deconstruct import deconstructible from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLiteNumericMixin: """ Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered. """ def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() == 'DecimalField': sql = 'CAST(%s AS NUMERIC)' % sql except FieldError: pass return sql, params class Combinable: """ Provide the ability to combine one or two objects with some connector. For example F('foo') + F('bar'). """ # Arithmetic connectors ADD = '+' SUB = '-' MUL = '*' DIV = '/' POW = '^' # The following is a quoted % operator - it is quoted because it can be # used in strings that also have parameter substitution. MOD = '%%' # Bitwise operators - note that these are generated by .bitand() # and .bitor(), the '&' and '|' are reserved for boolean operator # usage. BITAND = '&' BITOR = '|' BITLEFTSHIFT = '<<' BITRIGHTSHIFT = '>>' def _combine(self, other, connector, reversed): if not hasattr(other, 'resolve_expression'): # everything must be resolvable to an expression if isinstance(other, datetime.timedelta): other = DurationValue(other, output_field=fields.DurationField()) else: other = Value(other) if reversed: return CombinedExpression(other, connector, self) return CombinedExpression(self, connector, other) ############# # OPERATORS # ############# def __neg__(self): return self._combine(-1, self.MUL, False) def __add__(self, other): return self._combine(other, self.ADD, False) def __sub__(self, other): return self._combine(other, self.SUB, False) def __mul__(self, other): return self._combine(other, self.MUL, False) def __truediv__(self, other): return self._combine(other, self.DIV, False) def __mod__(self, other): return self._combine(other, self.MOD, False) def __pow__(self, other): return self._combine(other, self.POW, False) def __and__(self, other): if getattr(self, 'conditional', False) and getattr(other, 'conditional', False): return Q(self) & Q(other) raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitand(self, other): return self._combine(other, self.BITAND, False) def bitleftshift(self, other): return self._combine(other, self.BITLEFTSHIFT, False) def bitrightshift(self, other): return self._combine(other, self.BITRIGHTSHIFT, False) def __or__(self, other): if getattr(self, 'conditional', False) and getattr(other, 'conditional', False): return Q(self) | Q(other) raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitor(self, other): return self._combine(other, self.BITOR, False) def __radd__(self, other): return self._combine(other, self.ADD, True) def __rsub__(self, other): return self._combine(other, self.SUB, True) def __rmul__(self, other): return self._combine(other, self.MUL, True) def __rtruediv__(self, other): return self._combine(other, self.DIV, True) def __rmod__(self, other): return self._combine(other, self.MOD, True) def __rpow__(self, other): return self._combine(other, self.POW, True) def __rand__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def __ror__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) @deconstructible class BaseExpression: """Base class for all query expressions.""" # aggregate specific fields is_summary = False _output_field_resolved_to_none = False # Can the expression be used in a WHERE clause? filterable = True # Can the expression can be used as a source expression in Window? window_compatible = False def __init__(self, output_field=None): if output_field is not None: self.output_field = output_field def __getstate__(self): state = self.__dict__.copy() state.pop('convert_value', None) return state def get_db_converters(self, connection): return ( [] if self.convert_value is self._convert_value_noop else [self.convert_value] ) + self.output_field.get_db_converters(connection) def get_source_expressions(self): return [] def set_source_expressions(self, exprs): assert not exprs def _parse_expressions(self, *expressions): return [ arg if hasattr(arg, 'resolve_expression') else ( F(arg) if isinstance(arg, str) else Value(arg) ) for arg in expressions ] def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()") @cached_property def contains_aggregate(self): return any(expr and expr.contains_aggregate for expr in self.get_source_expressions()) @cached_property def contains_over_clause(self): return any(expr and expr.contains_over_clause for expr in self.get_source_expressions()) @cached_property def contains_column_references(self): return any(expr and expr.contains_column_references for expr in self.get_source_expressions()) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions([ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ]) return c @property def conditional(self): return isinstance(self.output_field, fields.BooleanField) @property def field(self): return self.output_field @cached_property def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError('Cannot resolve expression type, unknown output_field') return output_field @cached_property def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise def _resolve_output_field(self): """ Attempt to infer the output type of the expression. If the output fields of all source fields match then, simply infer the same type here. This isn't always correct, but it makes sense most of the time. Consider the difference between `2 + 2` and `2 / 3`. Inferring the type here is a convenience for the common case. The user should supply their own output_field with more complex computations. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ sources_iter = (source for source in self.get_source_fields() if source is not None) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( 'Expression contains mixed types: %s, %s. You must ' 'set output_field.' % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field @staticmethod def _convert_value_noop(value, expression, connection): return value @cached_property def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == 'FloatField': return lambda value, expression, connection: None if value is None else float(value) elif internal_type.endswith('IntegerField'): return lambda value, expression, connection: None if value is None else int(value) elif internal_type == 'DecimalField': return lambda value, expression, connection: None if value is None else Decimal(value) return self._convert_value_noop def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def get_transform(self, name): return self.output_field.get_transform(name) def relabeled_clone(self, change_map): clone = self.copy() clone.set_source_expressions([ e.relabeled_clone(change_map) if e is not None else None for e in self.get_source_expressions() ]) return clone def copy(self): return copy.copy(self) def get_group_by_cols(self, alias=None): if not self.contains_aggregate: return [self] cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()] def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def reverse_ordering(self): return self def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: yield from expr.flatten() def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle. """ return self.output_field.select_format(compiler, sql, params) @cached_property def identity(self): constructor_signature = inspect.signature(self.__init__) args, kwargs = self._constructor_args signature = constructor_signature.bind_partial(*args, **kwargs) signature.apply_defaults() arguments = signature.arguments.items() identity = [self.__class__] for arg, value in arguments: if isinstance(value, fields.Field): if value.name and value.model: value = (value.model._meta.label, value.name) else: value = type(value) else: value = make_hashable(value) identity.append((arg, value)) return tuple(identity) def __eq__(self, other): if not isinstance(other, BaseExpression): return NotImplemented return other.identity == self.identity def __hash__(self): return hash(self.identity) class Expression(BaseExpression, Combinable): """An expression that can be combined with other expressions.""" pass class CombinedExpression(SQLiteNumericMixin, Expression): def __init__(self, lhs, connector, rhs, output_field=None): super().__init__(output_field=output_field) self.connector = connector self.lhs = lhs self.rhs = rhs def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self) def __str__(self): return "{} {} {}".format(self.lhs, self.connector, self.rhs) def get_source_expressions(self): return [self.lhs, self.rhs] def set_source_expressions(self, exprs): self.lhs, self.rhs = exprs def as_sql(self, compiler, connection): try: lhs_output = self.lhs.output_field except FieldError: lhs_output = None try: rhs_output = self.rhs.output_field except FieldError: rhs_output = None if (not connection.features.has_native_duration_field and ((lhs_output and lhs_output.get_internal_type() == 'DurationField') or (rhs_output and rhs_output.get_internal_type() == 'DurationField'))): return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection) if (lhs_output and rhs_output and self.connector == self.SUB and lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and lhs_output.get_internal_type() == rhs_output.get_internal_type()): return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection) expressions = [] expression_params = [] sql, params = compiler.compile(self.lhs) expressions.append(sql) expression_params.extend(params) sql, params = compiler.compile(self.rhs) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c class DurationExpression(CombinedExpression): def compile(self, side, compiler, connection): if not isinstance(side, DurationValue): try: output = side.output_field except FieldError: pass else: if output.get_internal_type() == 'DurationField': sql, params = compiler.compile(side) return connection.ops.format_for_duration_arithmetic(sql), params return compiler.compile(side) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) expressions = [] expression_params = [] sql, params = self.compile(self.lhs, compiler, connection) expressions.append(sql) expression_params.extend(params) sql, params = self.compile(self.rhs, compiler, connection) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_duration_expression(self.connector, expressions) return expression_wrapper % sql, expression_params class TemporalSubtraction(CombinedExpression): output_field = fields.DurationField() def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) lhs = compiler.compile(self.lhs) rhs = compiler.compile(self.rhs) return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs) @deconstructible class F(Combinable): """An object capable of resolving references to existing query objects.""" def __init__(self, name): """ Arguments: * name: the name of the field this expression references """ self.name = name def __repr__(self): return "{}({})".format(self.__class__.__name__, self.name) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): return query.resolve_ref(self.name, allow_joins, reuse, summarize) def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __hash__(self): return hash(self.name) class ResolvedOuterRef(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False def as_sql(self, *args, **kwargs): raise ValueError( 'This queryset contains a reference to an outer query and may ' 'only be used in a subquery.' ) def relabeled_clone(self, relabels): return self class OuterRef(F): def resolve_expression(self, *args, **kwargs): if isinstance(self.name, self.__class__): return self.name return ResolvedOuterRef(self.name) class Func(SQLiteNumericMixin, Expression): """An SQL function call.""" function = None template = '%(function)s(%(expressions)s)' arg_joiner = ', ' arity = None # The number of arguments the function accepts. def __init__(self, *expressions, output_field=None, **extra): if self.arity is not None and len(expressions) != self.arity: raise TypeError( "'%s' takes exactly %s %s (%s given)" % ( self.__class__.__name__, self.arity, "argument" if self.arity == 1 else "arguments", len(expressions), ) ) super().__init__(output_field=output_field) self.source_expressions = self._parse_expressions(*expressions) self.extra = extra def __repr__(self): args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) extra = {**self.extra, **self._get_repr_options()} if extra: extra = ', '.join(str(key) + '=' + str(val) for key, val in sorted(extra.items())) return "{}({}, {})".format(self.__class__.__name__, args, extra) return "{}({})".format(self.__class__.__name__, args) def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {} def get_source_expressions(self): return self.source_expressions def set_source_expressions(self, exprs): self.source_expressions = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, arg in enumerate(c.source_expressions): c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context): connection.ops.check_expression_support(self) sql_parts = [] params = [] for arg in self.source_expressions: arg_sql, arg_params = compiler.compile(arg) sql_parts.append(arg_sql) params.extend(arg_params) data = {**self.extra, **extra_context} # Use the first supplied value in this order: the parameter to this # method, a value supplied in __init__()'s **extra (the value in # `data`), or the value defined on the class. if function is not None: data['function'] = function else: data.setdefault('function', self.function) template = template or data.get('template', self.template) arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner) data['expressions'] = data['field'] = arg_joiner.join(sql_parts) return template % data, params def copy(self): copy = super().copy() copy.source_expressions = self.source_expressions[:] copy.extra = self.extra.copy() return copy class Value(Expression): """Represent a wrapped value as a node within an expression.""" def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value def __repr__(self): return "{}({})".format(self.__class__.__name__, self.value) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) val = self.value output_field = self._output_field_or_none if output_field is not None: if self.for_save: val = output_field.get_db_prep_save(val, connection=connection) else: val = output_field.get_db_prep_value(val, connection=connection) if hasattr(output_field, 'get_placeholder'): return output_field.get_placeholder(val, compiler, connection), [val] if val is None: # cx_Oracle does not always convert None to the appropriate # NULL type (like in case expressions using numbers), so we # use a literal SQL NULL return 'NULL', [] return '%s', [val] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) c.for_save = for_save return c def get_group_by_cols(self, alias=None): return [] class DurationValue(Value): def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) if connection.features.has_native_duration_field: return super().as_sql(compiler, connection) return connection.ops.date_interval_sql(self.value), [] class RawSQL(Expression): def __init__(self, sql, params, output_field=None): if output_field is None: output_field = fields.Field() self.sql, self.params = sql, params super().__init__(output_field=output_field) def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) def as_sql(self, compiler, connection): return '(%s)' % self.sql, self.params def get_group_by_cols(self, alias=None): return [self] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # Resolve parents fields used in raw SQL. for parent in query.model._meta.get_parent_list(): for parent_field in parent._meta.local_fields: _, column_name = parent_field.get_attname_column() if column_name.lower() in self.sql.lower(): query.resolve_ref(parent_field.name, allow_joins, reuse, summarize) break return super().resolve_expression(query, allow_joins, reuse, summarize, for_save) class Star(Expression): def __repr__(self): return "'*'" def as_sql(self, compiler, connection): return '*', [] class Random(Expression): output_field = fields.FloatField() def __repr__(self): return "Random()" def as_sql(self, compiler, connection): return connection.ops.random_function_sql(), [] class Col(Expression): contains_column_references = True def __init__(self, alias, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.alias, self.target = alias, target def __repr__(self): alias, target = self.alias, self.target identifiers = (alias, str(target)) if alias else (str(target),) return '{}({})'.format(self.__class__.__name__, ', '.join(identifiers)) def as_sql(self, compiler, connection): alias, column = self.alias, self.target.column identifiers = (alias, column) if alias else (column,) sql = '.'.join(map(compiler.quote_name_unless_alias, identifiers)) return sql, [] def relabeled_clone(self, relabels): if self.alias is None: return self return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field) def get_group_by_cols(self, alias=None): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return (self.output_field.get_db_converters(connection) + self.target.get_db_converters(connection)) class Ref(Expression): """ Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query. """ def __init__(self, refs, source): super().__init__() self.refs, self.source = refs, source def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) def get_source_expressions(self): return [self.source] def set_source_expressions(self, exprs): self.source, = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # The sub-expression `source` has already been resolved, as this is # just a reference to the name of `source`. return self def relabeled_clone(self, relabels): return self def as_sql(self, compiler, connection): return connection.ops.quote_name(self.refs), [] def get_group_by_cols(self, alias=None): return [self] class ExpressionList(Func): """ An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like an ordering clause. """ template = '%(expressions)s' def __init__(self, *expressions, **extra): if not expressions: raise ValueError('%s requires at least one expression.' % self.__class__.__name__) super().__init__(*expressions, **extra) def __str__(self): return self.arg_joiner.join(str(arg) for arg in self.source_expressions) class ExpressionWrapper(Expression): """ An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field. """ def __init__(self, expression, output_field): super().__init__(output_field=output_field) self.expression = expression def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection): return self.expression.as_sql(compiler, connection) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.expression) class When(Expression): template = 'WHEN %(condition)s THEN %(result)s' # This isn't a complete conditional expression, must be used in Case(). conditional = False def __init__(self, condition=None, then=None, **lookups): if lookups and condition is None: condition, lookups = Q(**lookups), None if condition is None or not getattr(condition, 'conditional', False) or lookups: raise TypeError( 'When() supports a Q object, a boolean expression, or lookups ' 'as a condition.' ) if isinstance(condition, Q) and not condition: raise ValueError("An empty Q() can't be used as a When() condition.") super().__init__(output_field=None) self.condition = condition self.result = self._parse_expressions(then)[0] def __str__(self): return "WHEN %r THEN %r" % (self.condition, self.result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return [self.condition, self.result] def set_source_expressions(self, exprs): self.condition, self.result = exprs def get_source_fields(self): # We're only interested in the fields of the result expressions. return [self.result._output_field_or_none] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize if hasattr(c.condition, 'resolve_expression'): c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False) c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = extra_context sql_params = [] condition_sql, condition_params = compiler.compile(self.condition) template_params['condition'] = condition_sql sql_params.extend(condition_params) result_sql, result_params = compiler.compile(self.result) template_params['result'] = result_sql sql_params.extend(result_params) template = template or self.template return template % template_params, sql_params def get_group_by_cols(self, alias=None): # This is not a complete expression and cannot be used in GROUP BY. cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols class Case(Expression): """ An SQL searched CASE expression: CASE WHEN n > 0 THEN 'positive' WHEN n < 0 THEN 'negative' ELSE 'zero' END """ template = 'CASE %(cases)s ELSE %(default)s END' case_joiner = ' ' def __init__(self, *cases, default=None, output_field=None, **extra): if not all(isinstance(case, When) for case in cases): raise TypeError("Positional arguments must all be When objects.") super().__init__(output_field) self.cases = list(cases) self.default = self._parse_expressions(default)[0] self.extra = extra def __str__(self): return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return self.cases + [self.default] def set_source_expressions(self, exprs): *self.cases, self.default = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, case in enumerate(c.cases): c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def copy(self): c = super().copy() c.cases = c.cases[:] return c def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context): connection.ops.check_expression_support(self) if not self.cases: return compiler.compile(self.default) template_params = {**self.extra, **extra_context} case_parts = [] sql_params = [] for case in self.cases: try: case_sql, case_params = compiler.compile(case) except EmptyResultSet: continue case_parts.append(case_sql) sql_params.extend(case_params) default_sql, default_params = compiler.compile(self.default) if not case_parts: return default_sql, default_params case_joiner = case_joiner or self.case_joiner template_params['cases'] = case_joiner.join(case_parts) template_params['default'] = default_sql sql_params.extend(default_params) template = template or template_params.get('template', self.template) sql = template % template_params if self._output_field_or_none is not None: sql = connection.ops.unification_cast_sql(self.output_field) % sql return sql, sql_params class Subquery(Expression): """ An explicit subquery. It may contain OuterRef() references to the outer query which will be resolved when it is applied to that query. """ template = '(%(subquery)s)' contains_aggregate = False def __init__(self, queryset, output_field=None, **extra): self.query = queryset.query self.extra = extra super().__init__(output_field) def __getstate__(self): state = super().__getstate__() state.pop('_constructor_args', None) return state def get_source_expressions(self): return [self.query] def set_source_expressions(self, exprs): self.query = exprs[0] def _resolve_output_field(self): return self.query.output_field def copy(self): clone = super().copy() clone.query = clone.query.clone() return clone @property def external_aliases(self): return self.query.external_aliases def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = {**self.extra, **extra_context} subquery_sql, sql_params = self.query.as_sql(compiler, connection) template_params['subquery'] = subquery_sql[1:-1] template = template or template_params.get('template', self.template) sql = template % template_params return sql, sql_params def get_group_by_cols(self, alias=None): if alias: return [Ref(alias, self)] return self.query.get_external_cols() class Exists(Subquery): template = 'EXISTS(%(subquery)s)' output_field = fields.BooleanField() def __init__(self, queryset, negated=False, **kwargs): # As a performance optimization, remove ordering since EXISTS doesn't # care about it, just whether or not a row matches. queryset = queryset.order_by() self.negated = negated super().__init__(queryset, **kwargs) def __invert__(self): clone = self.copy() clone.negated = not self.negated return clone def as_sql(self, compiler, connection, template=None, **extra_context): sql, params = super().as_sql(compiler, connection, template, **extra_context) if self.negated: sql = 'NOT {}'.format(sql) return sql, params def select_format(self, compiler, sql, params): # Wrap EXISTS() with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in the SELECT list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql) return sql, params class OrderBy(BaseExpression): template = '%(expression)s %(ordering)s' conditional = False def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False): if nulls_first and nulls_last: raise ValueError('nulls_first and nulls_last are mutually exclusive') self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, 'resolve_expression'): raise ValueError('expression must be an expression type') self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): template = template or self.template if connection.features.supports_order_by_nulls_modifier: if self.nulls_last: template = '%s NULLS LAST' % template elif self.nulls_first: template = '%s NULLS FIRST' % template else: if self.nulls_last: template = '%%(expression)s IS NULL, %s' % template elif self.nulls_first: template = '%%(expression)s IS NOT NULL, %s' % template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { 'expression': expression_sql, 'ordering': 'DESC' if self.descending else 'ASC', **extra_context, } template = template or self.template params *= template.count('%(expression)s') return (template % placeholders).rstrip(), params def as_oracle(self, compiler, connection): # Oracle doesn't allow ORDER BY EXISTS() unless it's wrapped in # a CASE WHEN. if isinstance(self.expression, Exists): copy = self.copy() copy.expression = Case( When(self.expression, then=True), default=False, output_field=fields.BooleanField(), ) return copy.as_sql(compiler, connection) return self.as_sql(compiler, connection) def get_group_by_cols(self, alias=None): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first or self.nulls_last: self.nulls_first = not self.nulls_first self.nulls_last = not self.nulls_last return self def asc(self): self.descending = False def desc(self): self.descending = True class Window(Expression): template = '%(expression)s OVER (%(window)s)' # Although the main expression may either be an aggregate or an # expression with an aggregate function, the GROUP BY that will # be introduced in the query as a result is not desired. contains_aggregate = False contains_over_clause = True filterable = False def __init__(self, expression, partition_by=None, order_by=None, frame=None, output_field=None): self.partition_by = partition_by self.order_by = order_by self.frame = frame if not getattr(expression, 'window_compatible', False): raise ValueError( "Expression '%s' isn't compatible with OVER clauses." % expression.__class__.__name__ ) if self.partition_by is not None: if not isinstance(self.partition_by, (tuple, list)): self.partition_by = (self.partition_by,) self.partition_by = ExpressionList(*self.partition_by) if self.order_by is not None: if isinstance(self.order_by, (list, tuple)): self.order_by = ExpressionList(*self.order_by) elif not isinstance(self.order_by, BaseExpression): raise ValueError( 'order_by must be either an Expression or a sequence of ' 'expressions.' ) super().__init__(output_field=output_field) self.source_expression = self._parse_expressions(expression)[0] def _resolve_output_field(self): return self.source_expression.output_field def get_source_expressions(self): return [self.source_expression, self.partition_by, self.order_by, self.frame] def set_source_expressions(self, exprs): self.source_expression, self.partition_by, self.order_by, self.frame = exprs def as_sql(self, compiler, connection, template=None): connection.ops.check_expression_support(self) if not connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') expr_sql, params = compiler.compile(self.source_expression) window_sql, window_params = [], [] if self.partition_by is not None: sql_expr, sql_params = self.partition_by.as_sql( compiler=compiler, connection=connection, template='PARTITION BY %(expressions)s', ) window_sql.extend(sql_expr) window_params.extend(sql_params) if self.order_by is not None: window_sql.append(' ORDER BY ') order_sql, order_params = compiler.compile(self.order_by) window_sql.extend(order_sql) window_params.extend(order_params) if self.frame: frame_sql, frame_params = compiler.compile(self.frame) window_sql.append(' ' + frame_sql) window_params.extend(frame_params) params.extend(window_params) template = template or self.template return template % { 'expression': expr_sql, 'window': ''.join(window_sql).strip() }, params def __str__(self): return '{} OVER ({}{}{})'.format( str(self.source_expression), 'PARTITION BY ' + str(self.partition_by) if self.partition_by else '', 'ORDER BY ' + str(self.order_by) if self.order_by else '', str(self.frame or ''), ) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] class WindowFrame(Expression): """ Model the frame clause in window expressions. There are two types of frame clauses which are subclasses, however, all processing and validation (by no means intended to be complete) is done here. Thus, providing an end for a frame is optional (the default is UNBOUNDED FOLLOWING, which is the last row in the frame). """ template = '%(frame_type)s BETWEEN %(start)s AND %(end)s' def __init__(self, start=None, end=None): self.start = Value(start) self.end = Value(end) def set_source_expressions(self, exprs): self.start, self.end = exprs def get_source_expressions(self): return [self.start, self.end] def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) start, end = self.window_frame_start_end(connection, self.start.value, self.end.value) return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, }, [] def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] def __str__(self): if self.start.value is not None and self.start.value < 0: start = '%d %s' % (abs(self.start.value), connection.ops.PRECEDING) elif self.start.value is not None and self.start.value == 0: start = connection.ops.CURRENT_ROW else: start = connection.ops.UNBOUNDED_PRECEDING if self.end.value is not None and self.end.value > 0: end = '%d %s' % (self.end.value, connection.ops.FOLLOWING) elif self.end.value is not None and self.end.value == 0: end = connection.ops.CURRENT_ROW else: end = connection.ops.UNBOUNDED_FOLLOWING return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, } def window_frame_start_end(self, connection, start, end): raise NotImplementedError('Subclasses must implement window_frame_start_end().') class RowRange(WindowFrame): frame_type = 'ROWS' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_rows_start_end(start, end) class ValueRange(WindowFrame): frame_type = 'RANGE' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_range_start_end(start, end)
60889b0b4b94ec745a64027516e9f1c4ed8021c3d912f2f519a4374a08865155
import operator from collections import Counter, defaultdict from functools import partial, reduce from itertools import chain from operator import attrgetter from django.db import IntegrityError, connections, transaction from django.db.models import query_utils, signals, sql class ProtectedError(IntegrityError): def __init__(self, msg, protected_objects): self.protected_objects = protected_objects super().__init__(msg, protected_objects) class RestrictedError(IntegrityError): def __init__(self, msg, restricted_objects): self.restricted_objects = restricted_objects super().__init__(msg, restricted_objects) def CASCADE(collector, field, sub_objs, using): collector.collect( sub_objs, source=field.remote_field.model, source_attr=field.name, nullable=field.null, fail_on_restricted=False, ) if field.null and not connections[using].features.can_defer_constraint_checks: collector.add_field_update(field, None, sub_objs) def PROTECT(collector, field, sub_objs, using): raise ProtectedError( "Cannot delete some instances of model '%s' because they are " "referenced through a protected foreign key: '%s.%s'" % ( field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name ), sub_objs ) def RESTRICT(collector, field, sub_objs, using): collector.add_restricted_objects(field, sub_objs) collector.add_dependency(field.remote_field.model, field.model) def SET(value): if callable(value): def set_on_delete(collector, field, sub_objs, using): collector.add_field_update(field, value(), sub_objs) else: def set_on_delete(collector, field, sub_objs, using): collector.add_field_update(field, value, sub_objs) set_on_delete.deconstruct = lambda: ('django.db.models.SET', (value,), {}) return set_on_delete def SET_NULL(collector, field, sub_objs, using): collector.add_field_update(field, None, sub_objs) def SET_DEFAULT(collector, field, sub_objs, using): collector.add_field_update(field, field.get_default(), sub_objs) def DO_NOTHING(collector, field, sub_objs, using): pass def get_candidate_relations_to_delete(opts): # The candidate relations are the ones that come from N-1 and 1-1 relations. # N-N (i.e., many-to-many) relations aren't candidates for deletion. return ( f for f in opts.get_fields(include_hidden=True) if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many) ) class Collector: def __init__(self, using): self.using = using # Initially, {model: {instances}}, later values become lists. self.data = defaultdict(set) # {model: {(field, value): {instances}}} self.field_updates = defaultdict(partial(defaultdict, set)) # {model: {field: {instances}}} self.restricted_objects = defaultdict(partial(defaultdict, set)) # fast_deletes is a list of queryset-likes that can be deleted without # fetching the objects into memory. self.fast_deletes = [] # Tracks deletion-order dependency for databases without transactions # or ability to defer constraint checks. Only concrete model classes # should be included, as the dependencies exist only between actual # database tables; proxy models are represented here by their concrete # parent. self.dependencies = defaultdict(set) # {model: {models}} def add(self, objs, source=None, nullable=False, reverse_dependency=False): """ Add 'objs' to the collection of objects to be deleted. If the call is the result of a cascade, 'source' should be the model that caused it, and 'nullable' should be set to True if the relation can be null. Return a list of all objects that were not already collected. """ if not objs: return [] new_objs = [] model = objs[0].__class__ instances = self.data[model] for obj in objs: if obj not in instances: new_objs.append(obj) instances.update(new_objs) # Nullable relationships can be ignored -- they are nulled out before # deleting, and therefore do not affect the order in which objects have # to be deleted. if source is not None and not nullable: self.add_dependency(source, model, reverse_dependency=reverse_dependency) return new_objs def add_dependency(self, model, dependency, reverse_dependency=False): if reverse_dependency: model, dependency = dependency, model self.dependencies[model._meta.concrete_model].add(dependency._meta.concrete_model) self.data.setdefault(dependency, self.data.default_factory()) def add_field_update(self, field, value, objs): """ Schedule a field update. 'objs' must be a homogeneous iterable collection of model instances (e.g. a QuerySet). """ if not objs: return model = objs[0].__class__ self.field_updates[model][field, value].update(objs) def add_restricted_objects(self, field, objs): if objs: model = objs[0].__class__ self.restricted_objects[model][field].update(objs) def clear_restricted_objects_from_set(self, model, objs): if model in self.restricted_objects: self.restricted_objects[model] = { field: items - objs for field, items in self.restricted_objects[model].items() } def clear_restricted_objects_from_queryset(self, model, qs): if model in self.restricted_objects: objs = set(qs.filter(pk__in=[ obj.pk for objs in self.restricted_objects[model].values() for obj in objs ])) self.clear_restricted_objects_from_set(model, objs) def _has_signal_listeners(self, model): return ( signals.pre_delete.has_listeners(model) or signals.post_delete.has_listeners(model) ) def can_fast_delete(self, objs, from_field=None): """ Determine if the objects in the given queryset-like or single object can be fast-deleted. This can be done if there are no cascades, no parents and no signal listeners for the object class. The 'from_field' tells where we are coming from - we need this to determine if the objects are in fact to be deleted. Allow also skipping parent -> child -> parent chain preventing fast delete of the child. """ if from_field and from_field.remote_field.on_delete is not CASCADE: return False if hasattr(objs, '_meta'): model = objs._meta.model elif hasattr(objs, 'model') and hasattr(objs, '_raw_delete'): model = objs.model else: return False if self._has_signal_listeners(model): return False # The use of from_field comes from the need to avoid cascade back to # parent when parent delete is cascading to child. opts = model._meta return ( all(link == from_field for link in opts.concrete_model._meta.parents.values()) and # Foreign keys pointing to this model. all( related.field.remote_field.on_delete is DO_NOTHING for related in get_candidate_relations_to_delete(opts) ) and ( # Something like generic foreign key. not any(hasattr(field, 'bulk_related_objects') for field in opts.private_fields) ) ) def get_del_batches(self, objs, fields): """ Return the objs in suitably sized batches for the used connection. """ field_names = [field.name for field in fields] conn_batch_size = max( connections[self.using].ops.bulk_batch_size(field_names, objs), 1) if len(objs) > conn_batch_size: return [objs[i:i + conn_batch_size] for i in range(0, len(objs), conn_batch_size)] else: return [objs] def collect(self, objs, source=None, nullable=False, collect_related=True, source_attr=None, reverse_dependency=False, keep_parents=False, fail_on_restricted=True): """ Add 'objs' to the collection of objects to be deleted as well as all parent instances. 'objs' must be a homogeneous iterable collection of model instances (e.g. a QuerySet). If 'collect_related' is True, related objects will be handled by their respective on_delete handler. If the call is the result of a cascade, 'source' should be the model that caused it and 'nullable' should be set to True, if the relation can be null. If 'reverse_dependency' is True, 'source' will be deleted before the current model, rather than after. (Needed for cascading to parent models, the one case in which the cascade follows the forwards direction of an FK rather than the reverse direction.) If 'keep_parents' is True, data of parent model's will be not deleted. If 'fail_on_restricted' is False, error won't be raised even if it's prohibited to delete such objects due to RESTRICT, that defers restricted object checking in recursive calls where the top-level call may need to collect more objects to determine whether restricted ones can be deleted. """ if self.can_fast_delete(objs): self.fast_deletes.append(objs) return new_objs = self.add(objs, source, nullable, reverse_dependency=reverse_dependency) if not new_objs: return model = new_objs[0].__class__ if not keep_parents: # Recursively collect concrete model's parent models, but not their # related objects. These will be found by meta.get_fields() concrete_model = model._meta.concrete_model for ptr in concrete_model._meta.parents.values(): if ptr: parent_objs = [getattr(obj, ptr.name) for obj in new_objs] self.collect(parent_objs, source=model, source_attr=ptr.remote_field.related_name, collect_related=False, reverse_dependency=True, fail_on_restricted=False) if not collect_related: return if keep_parents: parents = set(model._meta.get_parent_list()) model_fast_deletes = defaultdict(list) protected_objects = defaultdict(list) for related in get_candidate_relations_to_delete(model._meta): # Preserve parent reverse relationships if keep_parents=True. if keep_parents and related.model in parents: continue field = related.field if field.remote_field.on_delete == DO_NOTHING: continue related_model = related.related_model if self.can_fast_delete(related_model, from_field=field): model_fast_deletes[related_model].append(field) continue batches = self.get_del_batches(new_objs, [field]) for batch in batches: sub_objs = self.related_objects(related_model, [field], batch) # Non-referenced fields can be deferred if no signal receivers # are connected for the related model as they'll never be # exposed to the user. Skip field deferring when some # relationships are select_related as interactions between both # features are hard to get right. This should only happen in # the rare cases where .related_objects is overridden anyway. if not (sub_objs.query.select_related or self._has_signal_listeners(related_model)): referenced_fields = set(chain.from_iterable( (rf.attname for rf in rel.field.foreign_related_fields) for rel in get_candidate_relations_to_delete(related_model._meta) )) sub_objs = sub_objs.only(*tuple(referenced_fields)) if sub_objs: try: field.remote_field.on_delete(self, field, sub_objs, self.using) except ProtectedError as error: key = "'%s.%s'" % (field.model.__name__, field.name) protected_objects[key] += error.protected_objects if protected_objects: raise ProtectedError( 'Cannot delete some instances of model %r because they are ' 'referenced through protected foreign keys: %s.' % ( model.__name__, ', '.join(protected_objects), ), chain.from_iterable(protected_objects.values()), ) for related_model, related_fields in model_fast_deletes.items(): batches = self.get_del_batches(new_objs, related_fields) for batch in batches: sub_objs = self.related_objects(related_model, related_fields, batch) self.fast_deletes.append(sub_objs) for field in model._meta.private_fields: if hasattr(field, 'bulk_related_objects'): # It's something like generic foreign key. sub_objs = field.bulk_related_objects(new_objs, self.using) self.collect(sub_objs, source=model, nullable=True, fail_on_restricted=False) if fail_on_restricted: # Raise an error if collected restricted objects (RESTRICT) aren't # candidates for deletion also collected via CASCADE. for related_model, instances in self.data.items(): self.clear_restricted_objects_from_set(related_model, instances) for qs in self.fast_deletes: self.clear_restricted_objects_from_queryset(qs.model, qs) if self.restricted_objects.values(): restricted_objects = defaultdict(list) for related_model, fields in self.restricted_objects.items(): for field, objs in fields.items(): if objs: key = "'%s.%s'" % (related_model.__name__, field.name) restricted_objects[key] += objs if restricted_objects: raise RestrictedError( 'Cannot delete some instances of model %r because ' 'they are referenced through restricted foreign keys: ' '%s.' % ( model.__name__, ', '.join(restricted_objects), ), chain.from_iterable(restricted_objects.values()), ) def related_objects(self, related_model, related_fields, objs): """ Get a QuerySet of the related model to objs via related fields. """ predicate = reduce(operator.or_, ( query_utils.Q(**{'%s__in' % related_field.name: objs}) for related_field in related_fields )) return related_model._base_manager.using(self.using).filter(predicate) def instances_with_model(self): for model, instances in self.data.items(): for obj in instances: yield model, obj def sort(self): sorted_models = [] concrete_models = set() models = list(self.data) while len(sorted_models) < len(models): found = False for model in models: if model in sorted_models: continue dependencies = self.dependencies.get(model._meta.concrete_model) if not (dependencies and dependencies.difference(concrete_models)): sorted_models.append(model) concrete_models.add(model._meta.concrete_model) found = True if not found: return self.data = {model: self.data[model] for model in sorted_models} def delete(self): # sort instance collections for model, instances in self.data.items(): self.data[model] = sorted(instances, key=attrgetter("pk")) # if possible, bring the models in an order suitable for databases that # don't support transactions or cannot defer constraint checks until the # end of a transaction. self.sort() # number of objects deleted for each model label deleted_counter = Counter() # Optimize for the case with a single obj and no dependencies if len(self.data) == 1 and len(instances) == 1: instance = list(instances)[0] if self.can_fast_delete(instance): with transaction.mark_for_rollback_on_error(): count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using) setattr(instance, model._meta.pk.attname, None) return count, {model._meta.label: count} with transaction.atomic(using=self.using, savepoint=False): # send pre_delete signals for model, obj in self.instances_with_model(): if not model._meta.auto_created: signals.pre_delete.send( sender=model, instance=obj, using=self.using ) # fast deletes for qs in self.fast_deletes: count = qs._raw_delete(using=self.using) deleted_counter[qs.model._meta.label] += count # update fields for model, instances_for_fieldvalues in self.field_updates.items(): for (field, value), instances in instances_for_fieldvalues.items(): query = sql.UpdateQuery(model) query.update_batch([obj.pk for obj in instances], {field.name: value}, self.using) # reverse instance collections for instances in self.data.values(): instances.reverse() # delete instances for model, instances in self.data.items(): query = sql.DeleteQuery(model) pk_list = [obj.pk for obj in instances] count = query.delete_batch(pk_list, self.using) deleted_counter[model._meta.label] += count if not model._meta.auto_created: for obj in instances: signals.post_delete.send( sender=model, instance=obj, using=self.using ) # update collected instances for instances_for_fieldvalues in self.field_updates.values(): for (field, value), instances in instances_for_fieldvalues.items(): for obj in instances: setattr(obj, field.attname, value) for model, instances in self.data.items(): for instance in instances: setattr(instance, model._meta.pk.attname, None) return sum(deleted_counter.values()), dict(deleted_counter)
ddfbdcca90430f44cee3604e2e133c4a84812d3e5af40e87cc56b815409b33fb
import datetime import decimal import functools import hashlib import logging import time from contextlib import contextmanager from django.db import NotSupportedError logger = logging.getLogger('django.db.backends') class CursorWrapper: def __init__(self, cursor, db): self.cursor = cursor self.db = db WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset']) def __getattr__(self, attr): cursor_attr = getattr(self.cursor, attr) if attr in CursorWrapper.WRAP_ERROR_ATTRS: return self.db.wrap_database_errors(cursor_attr) else: return cursor_attr def __iter__(self): with self.db.wrap_database_errors: yield from self.cursor def __enter__(self): return self def __exit__(self, type, value, traceback): # Close instead of passing through to avoid backend-specific behavior # (#17671). Catch errors liberally because errors in cleanup code # aren't useful. try: self.close() except self.db.Database.Error: pass # The following methods cannot be implemented in __getattr__, because the # code must run when the method is invoked, not just when it is accessed. def callproc(self, procname, params=None, kparams=None): # Keyword parameters for callproc aren't supported in PEP 249, but the # database driver may support them (e.g. cx_Oracle). if kparams is not None and not self.db.features.supports_callproc_kwargs: raise NotSupportedError( 'Keyword parameters for callproc are not supported on this ' 'database backend.' ) self.db.validate_no_broken_transaction() with self.db.wrap_database_errors: if params is None and kparams is None: return self.cursor.callproc(procname) elif kparams is None: return self.cursor.callproc(procname, params) else: params = params or () return self.cursor.callproc(procname, params, kparams) def execute(self, sql, params=None): return self._execute_with_wrappers(sql, params, many=False, executor=self._execute) def executemany(self, sql, param_list): return self._execute_with_wrappers(sql, param_list, many=True, executor=self._executemany) def _execute_with_wrappers(self, sql, params, many, executor): context = {'connection': self.db, 'cursor': self} for wrapper in reversed(self.db.execute_wrappers): executor = functools.partial(wrapper, executor) return executor(sql, params, many, context) def _execute(self, sql, params, *ignored_wrapper_args): self.db.validate_no_broken_transaction() with self.db.wrap_database_errors: if params is None: # params default might be backend specific. return self.cursor.execute(sql) else: return self.cursor.execute(sql, params) def _executemany(self, sql, param_list, *ignored_wrapper_args): self.db.validate_no_broken_transaction() with self.db.wrap_database_errors: return self.cursor.executemany(sql, param_list) class CursorDebugWrapper(CursorWrapper): # XXX callproc isn't instrumented at this time. def execute(self, sql, params=None): with self.debug_sql(sql, params, use_last_executed_query=True): return super().execute(sql, params) def executemany(self, sql, param_list): with self.debug_sql(sql, param_list, many=True): return super().executemany(sql, param_list) @contextmanager def debug_sql(self, sql=None, params=None, use_last_executed_query=False, many=False): start = time.monotonic() try: yield finally: stop = time.monotonic() duration = stop - start if use_last_executed_query: sql = self.db.ops.last_executed_query(self.cursor, sql, params) try: times = len(params) if many else '' except TypeError: # params could be an iterator. times = '?' self.db.queries_log.append({ 'sql': '%s times: %s' % (times, sql) if many else sql, 'time': '%.3f' % duration, }) logger.debug( '(%.3f) %s; args=%s', duration, sql, params, extra={'duration': duration, 'sql': sql, 'params': params}, ) ############################################### # Converters from database (string) to Python # ############################################### def typecast_date(s): return datetime.date(*map(int, s.split('-'))) if s else None # return None if s is null def typecast_time(s): # does NOT store time zone information if not s: return None hour, minutes, seconds = s.split(':') if '.' in seconds: # check whether seconds have a fractional part seconds, microseconds = seconds.split('.') else: microseconds = '0' return datetime.time(int(hour), int(minutes), int(seconds), int((microseconds + '000000')[:6])) def typecast_timestamp(s): # does NOT store time zone information # "2005-07-29 15:48:00.590358-05" # "2005-07-29 09:56:00-05" if not s: return None if ' ' not in s: return typecast_date(s) d, t = s.split() # Remove timezone information. if '-' in t: t, _ = t.split('-', 1) elif '+' in t: t, _ = t.split('+', 1) dates = d.split('-') times = t.split(':') seconds = times[2] if '.' in seconds: # check whether seconds have a fractional part seconds, microseconds = seconds.split('.') else: microseconds = '0' return datetime.datetime( int(dates[0]), int(dates[1]), int(dates[2]), int(times[0]), int(times[1]), int(seconds), int((microseconds + '000000')[:6]) ) ############################################### # Converters from Python to database (string) # ############################################### def split_identifier(identifier): """ Split a SQL identifier into a two element tuple of (namespace, name). The identifier could be a table, column, or sequence name might be prefixed by a namespace. """ try: namespace, name = identifier.split('"."') except ValueError: namespace, name = '', identifier return namespace.strip('"'), name.strip('"') def truncate_name(identifier, length=None, hash_len=4): """ Shorten a SQL identifier to a repeatable mangled version with the given length. If a quote stripped name contains a namespace, e.g. USERNAME"."TABLE, truncate the table portion only. """ namespace, name = split_identifier(identifier) if length is None or len(name) <= length: return identifier digest = names_digest(name, length=hash_len) return '%s%s%s' % ('%s"."' % namespace if namespace else '', name[:length - hash_len], digest) def names_digest(*args, length): """ Generate a 32-bit digest of a set of arguments that can be used to shorten identifying names. """ h = hashlib.md5() for arg in args: h.update(arg.encode()) return h.hexdigest()[:length] def format_number(value, max_digits, decimal_places): """ Format a number into a string with the requisite number of digits and decimal places. """ if value is None: return None context = decimal.getcontext().copy() if max_digits is not None: context.prec = max_digits if decimal_places is not None: value = value.quantize(decimal.Decimal(1).scaleb(-decimal_places), context=context) else: context.traps[decimal.Rounded] = 1 value = context.create_decimal(value) return "{:f}".format(value) def strip_quotes(table_name): """ Strip quotes off of quoted table names to make them safe for use in index names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming scheme) becomes 'USER"."TABLE'. """ has_quotes = table_name.startswith('"') and table_name.endswith('"') return table_name[1:-1] if has_quotes else table_name
657e863e681014467806fabdedf825fb9a90ad7a7b1a3de08f60e046d5c1c06f
from django.core.exceptions import FieldDoesNotExist from django.db.models import NOT_PROVIDED from django.utils.functional import cached_property from .base import Operation from .utils import ( ModelTuple, field_references_model, is_referenced_by_foreign_key, ) class FieldOperation(Operation): def __init__(self, model_name, name, field=None): self.model_name = model_name self.name = name self.field = field @cached_property def model_name_lower(self): return self.model_name.lower() @cached_property def name_lower(self): return self.name.lower() def is_same_model_operation(self, operation): return self.model_name_lower == operation.model_name_lower def is_same_field_operation(self, operation): return self.is_same_model_operation(operation) and self.name_lower == operation.name_lower def references_model(self, name, app_label=None): name_lower = name.lower() if name_lower == self.model_name_lower: return True if self.field: return field_references_model(self.field, ModelTuple(app_label, name_lower)) return False def references_field(self, model_name, name, app_label=None): model_name_lower = model_name.lower() # Check if this operation locally references the field. if model_name_lower == self.model_name_lower: if name == self.name: return True elif self.field and hasattr(self.field, 'from_fields') and name in self.field.from_fields: return True # Check if this operation remotely references the field. if self.field: model_tuple = ModelTuple(app_label, model_name_lower) remote_field = self.field.remote_field if remote_field: if (ModelTuple.from_model(remote_field.model) == model_tuple and (not hasattr(self.field, 'to_fields') or name in self.field.to_fields or None in self.field.to_fields)): return True through = getattr(remote_field, 'through', None) if (through and ModelTuple.from_model(through) == model_tuple and (getattr(remote_field, 'through_fields', None) is None or name in remote_field.through_fields)): return True return False def reduce(self, operation, app_label=None): return ( super().reduce(operation, app_label=app_label) or not operation.references_field(self.model_name, self.name, app_label) ) class AddField(FieldOperation): """Add a field to a model.""" def __init__(self, model_name, name, field, preserve_default=True): self.preserve_default = preserve_default super().__init__(model_name, name, field) def deconstruct(self): kwargs = { 'model_name': self.model_name, 'name': self.name, 'field': self.field, } if self.preserve_default is not True: kwargs['preserve_default'] = self.preserve_default return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): # If preserve default is off, don't use the default for future state if not self.preserve_default: field = self.field.clone() field.default = NOT_PROVIDED else: field = self.field state.models[app_label, self.model_name_lower].fields.append((self.name, field)) # Delay rendering of relationships if it's not a relational field delay = not field.is_relation state.reload_model(app_label, self.model_name_lower, delay=delay) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) field = to_model._meta.get_field(self.name) if not self.preserve_default: field.default = self.field.default schema_editor.add_field( from_model, field, ) if not self.preserve_default: field.default = NOT_PROVIDED def database_backwards(self, app_label, schema_editor, from_state, to_state): from_model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, from_model): schema_editor.remove_field(from_model, from_model._meta.get_field(self.name)) def describe(self): return "Add field %s to %s" % (self.name, self.model_name) def reduce(self, operation, app_label=None): if isinstance(operation, FieldOperation) and self.is_same_field_operation(operation): if isinstance(operation, AlterField): return [ AddField( model_name=self.model_name, name=operation.name, field=operation.field, ), ] elif isinstance(operation, RemoveField): return [] elif isinstance(operation, RenameField): return [ AddField( model_name=self.model_name, name=operation.new_name, field=self.field, ), ] return super().reduce(operation, app_label=app_label) class RemoveField(FieldOperation): """Remove a field from a model.""" def deconstruct(self): kwargs = { 'model_name': self.model_name, 'name': self.name, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): new_fields = [] old_field = None for name, instance in state.models[app_label, self.model_name_lower].fields: if name != self.name: new_fields.append((name, instance)) else: old_field = instance state.models[app_label, self.model_name_lower].fields = new_fields # Delay rendering of relationships if it's not a relational field delay = not old_field.is_relation state.reload_model(app_label, self.model_name_lower, delay=delay) def database_forwards(self, app_label, schema_editor, from_state, to_state): from_model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, from_model): schema_editor.remove_field(from_model, from_model._meta.get_field(self.name)) def database_backwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) schema_editor.add_field(from_model, to_model._meta.get_field(self.name)) def describe(self): return "Remove field %s from %s" % (self.name, self.model_name) def reduce(self, operation, app_label=None): from .models import DeleteModel if isinstance(operation, DeleteModel) and operation.name_lower == self.model_name_lower: return [operation] return super().reduce(operation, app_label=app_label) class AlterField(FieldOperation): """ Alter a field's database column (e.g. null, max_length) to the provided new field. """ def __init__(self, model_name, name, field, preserve_default=True): self.preserve_default = preserve_default super().__init__(model_name, name, field) def deconstruct(self): kwargs = { 'model_name': self.model_name, 'name': self.name, 'field': self.field, } if self.preserve_default is not True: kwargs['preserve_default'] = self.preserve_default return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): if not self.preserve_default: field = self.field.clone() field.default = NOT_PROVIDED else: field = self.field state.models[app_label, self.model_name_lower].fields = [ (n, field if n == self.name else f) for n, f in state.models[app_label, self.model_name_lower].fields ] # TODO: investigate if old relational fields must be reloaded or if it's # sufficient if the new field is (#27737). # Delay rendering of relationships if it's not a relational field and # not referenced by a foreign key. delay = ( not field.is_relation and not is_referenced_by_foreign_key(state, self.model_name_lower, self.field, self.name) ) state.reload_model(app_label, self.model_name_lower, delay=delay) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) from_field = from_model._meta.get_field(self.name) to_field = to_model._meta.get_field(self.name) if not self.preserve_default: to_field.default = self.field.default schema_editor.alter_field(from_model, from_field, to_field) if not self.preserve_default: to_field.default = NOT_PROVIDED def database_backwards(self, app_label, schema_editor, from_state, to_state): self.database_forwards(app_label, schema_editor, from_state, to_state) def describe(self): return "Alter field %s on %s" % (self.name, self.model_name) def reduce(self, operation, app_label=None): if isinstance(operation, RemoveField) and self.is_same_field_operation(operation): return [operation] elif isinstance(operation, RenameField) and self.is_same_field_operation(operation): return [ operation, AlterField( model_name=self.model_name, name=operation.new_name, field=self.field, ), ] return super().reduce(operation, app_label=app_label) class RenameField(FieldOperation): """Rename a field on the model. Might affect db_column too.""" def __init__(self, model_name, old_name, new_name): self.old_name = old_name self.new_name = new_name super().__init__(model_name, old_name) @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { 'model_name': self.model_name, 'old_name': self.old_name, 'new_name': self.new_name, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.model_name_lower] # Rename the field fields = model_state.fields found = False delay = True for index, (name, field) in enumerate(fields): if not found and name == self.old_name: fields[index] = (self.new_name, field) found = True # Fix from_fields to refer to the new field. from_fields = getattr(field, 'from_fields', None) if from_fields: field.from_fields = tuple([ self.new_name if from_field_name == self.old_name else from_field_name for from_field_name in from_fields ]) # Delay rendering of relationships if it's not a relational # field and not referenced by a foreign key. delay = delay and ( not field.is_relation and not is_referenced_by_foreign_key(state, self.model_name_lower, field, self.name) ) if not found: raise FieldDoesNotExist( "%s.%s has no field named '%s'" % (app_label, self.model_name, self.old_name) ) # Fix index/unique_together to refer to the new field options = model_state.options for option in ('index_together', 'unique_together'): if option in options: options[option] = [ [self.new_name if n == self.old_name else n for n in together] for together in options[option] ] # Fix to_fields to refer to the new field. model_tuple = app_label, self.model_name_lower for (model_app_label, model_name), model_state in state.models.items(): for index, (name, field) in enumerate(model_state.fields): remote_field = field.remote_field if remote_field: remote_model_tuple = self._get_model_tuple( remote_field.model, model_app_label, model_name ) if remote_model_tuple == model_tuple: if getattr(remote_field, 'field_name', None) == self.old_name: remote_field.field_name = self.new_name to_fields = getattr(field, 'to_fields', None) if to_fields: field.to_fields = tuple([ self.new_name if to_field_name == self.old_name else to_field_name for to_field_name in to_fields ]) state.reload_model(app_label, self.model_name_lower, delay=delay) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) schema_editor.alter_field( from_model, from_model._meta.get_field(self.old_name), to_model._meta.get_field(self.new_name), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) schema_editor.alter_field( from_model, from_model._meta.get_field(self.new_name), to_model._meta.get_field(self.old_name), ) def describe(self): return "Rename field %s on %s to %s" % (self.old_name, self.model_name, self.new_name) def references_field(self, model_name, name, app_label=None): return self.references_model(model_name) and ( name.lower() == self.old_name_lower or name.lower() == self.new_name_lower ) def reduce(self, operation, app_label=None): if (isinstance(operation, RenameField) and self.is_same_model_operation(operation) and self.new_name_lower == operation.old_name_lower): return [ RenameField( self.model_name, self.old_name, operation.new_name, ), ] # Skip `FieldOperation.reduce` as we want to run `references_field` # against self.new_name. return ( super(FieldOperation, self).reduce(operation, app_label=app_label) or not operation.references_field(self.model_name, self.new_name, app_label) )
2e75b24c6fc9383d40a071b5f6dce939b896411935c3dcff274995940ce8ebef
import functools import inspect from functools import partial from django import forms from django.apps import apps from django.conf import SettingsReference from django.core import checks, exceptions from django.db import connection, router from django.db.backends import utils from django.db.models import Q from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL from django.db.models.query_utils import PathInfo from django.db.models.utils import make_model_tuple from django.utils.functional import cached_property from django.utils.translation import gettext_lazy as _ from . import Field from .mixins import FieldCacheMixin from .related_descriptors import ( ForeignKeyDeferredAttribute, ForwardManyToOneDescriptor, ForwardOneToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from .related_lookups import ( RelatedExact, RelatedGreaterThan, RelatedGreaterThanOrEqual, RelatedIn, RelatedIsNull, RelatedLessThan, RelatedLessThanOrEqual, ) from .reverse_related import ( ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel, ) RECURSIVE_RELATIONSHIP_CONSTANT = 'self' def resolve_relation(scope_model, relation): """ Transform relation into a model or fully-qualified model string of the form "app_label.ModelName", relative to scope_model. The relation argument can be: * RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case the model argument will be returned. * A bare model name without an app_label, in which case scope_model's app_label will be prepended. * An "app_label.ModelName" string. * A model class, which will be returned unchanged. """ # Check for recursive relations if relation == RECURSIVE_RELATIONSHIP_CONSTANT: relation = scope_model # Look for an "app.Model" relation if isinstance(relation, str): if "." not in relation: relation = "%s.%s" % (scope_model._meta.app_label, relation) return relation def lazy_related_operation(function, model, *related_models, **kwargs): """ Schedule `function` to be called once `model` and all `related_models` have been imported and registered with the app registry. `function` will be called with the newly-loaded model classes as its positional arguments, plus any optional keyword arguments. The `model` argument must be a model class. Each subsequent positional argument is another model, or a reference to another model - see `resolve_relation()` for the various forms these may take. Any relative references will be resolved relative to `model`. This is a convenience wrapper for `Apps.lazy_model_operation` - the app registry model used is the one found in `model._meta.apps`. """ models = [model] + [resolve_relation(model, rel) for rel in related_models] model_keys = (make_model_tuple(m) for m in models) apps = model._meta.apps return apps.lazy_model_operation(partial(function, **kwargs), *model_keys) class RelatedField(FieldCacheMixin, Field): """Base class that all relational fields inherit from.""" # Field flags one_to_many = False one_to_one = False many_to_many = False many_to_one = False @cached_property def related_model(self): # Can't cache this property until all the models are loaded. apps.check_models_ready() return self.remote_field.model def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_related_name_is_valid(), *self._check_related_query_name_is_valid(), *self._check_relation_model_exists(), *self._check_referencing_to_swapped_model(), *self._check_clashes(), ] def _check_related_name_is_valid(self): import keyword related_name = self.remote_field.related_name if related_name is None: return [] is_valid_id = not keyword.iskeyword(related_name) and related_name.isidentifier() if not (is_valid_id or related_name.endswith('+')): return [ checks.Error( "The name '%s' is invalid related_name for field %s.%s" % (self.remote_field.related_name, self.model._meta.object_name, self.name), hint="Related name must be a valid Python identifier or end with a '+'", obj=self, id='fields.E306', ) ] return [] def _check_related_query_name_is_valid(self): if self.remote_field.is_hidden(): return [] rel_query_name = self.related_query_name() errors = [] if rel_query_name.endswith('_'): errors.append( checks.Error( "Reverse query name '%s' must not end with an underscore." % (rel_query_name,), hint=("Add or change a related_name or related_query_name " "argument for this field."), obj=self, id='fields.E308', ) ) if LOOKUP_SEP in rel_query_name: errors.append( checks.Error( "Reverse query name '%s' must not contain '%s'." % (rel_query_name, LOOKUP_SEP), hint=("Add or change a related_name or related_query_name " "argument for this field."), obj=self, id='fields.E309', ) ) return errors def _check_relation_model_exists(self): rel_is_missing = self.remote_field.model not in self.opts.apps.get_models() rel_is_string = isinstance(self.remote_field.model, str) model_name = self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name if rel_is_missing and (rel_is_string or not self.remote_field.model._meta.swapped): return [ checks.Error( "Field defines a relation with model '%s', which is either " "not installed, or is abstract." % model_name, obj=self, id='fields.E300', ) ] return [] def _check_referencing_to_swapped_model(self): if (self.remote_field.model not in self.opts.apps.get_models() and not isinstance(self.remote_field.model, str) and self.remote_field.model._meta.swapped): model = "%s.%s" % ( self.remote_field.model._meta.app_label, self.remote_field.model._meta.object_name ) return [ checks.Error( "Field defines a relation with the model '%s', which has " "been swapped out." % model, hint="Update the relation to point at 'settings.%s'." % self.remote_field.model._meta.swappable, obj=self, id='fields.E301', ) ] return [] def _check_clashes(self): """Check accessor and reverse query name clashes.""" from django.db.models.base import ModelBase errors = [] opts = self.model._meta # `f.remote_field.model` may be a string instead of a model. Skip if model name is # not resolved. if not isinstance(self.remote_field.model, ModelBase): return [] # Consider that we are checking field `Model.foreign` and the models # are: # # class Target(models.Model): # model = models.IntegerField() # model_set = models.IntegerField() # # class Model(models.Model): # foreign = models.ForeignKey(Target) # m2m = models.ManyToManyField(Target) # rel_opts.object_name == "Target" rel_opts = self.remote_field.model._meta # If the field doesn't install a backward relation on the target model # (so `is_hidden` returns True), then there are no clashes to check # and we can skip these fields. rel_is_hidden = self.remote_field.is_hidden() rel_name = self.remote_field.get_accessor_name() # i. e. "model_set" rel_query_name = self.related_query_name() # i. e. "model" field_name = "%s.%s" % (opts.object_name, self.name) # i. e. "Model.field" # Check clashes between accessor or reverse query name of `field` # and any other field name -- i.e. accessor for Model.foreign is # model_set and it clashes with Target.model_set. potential_clashes = rel_opts.fields + rel_opts.many_to_many for clash_field in potential_clashes: clash_name = "%s.%s" % (rel_opts.object_name, clash_field.name) # i.e. "Target.model_set" if not rel_is_hidden and clash_field.name == rel_name: errors.append( checks.Error( "Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name), hint=("Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'.") % (clash_name, field_name), obj=self, id='fields.E302', ) ) if clash_field.name == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name), hint=("Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'.") % (clash_name, field_name), obj=self, id='fields.E303', ) ) # Check clashes between accessors/reverse query names of `field` and # any other field accessor -- i. e. Model.foreign accessor clashes with # Model.m2m accessor. potential_clashes = (r for r in rel_opts.related_objects if r.field is not self) for clash_field in potential_clashes: clash_name = "%s.%s" % ( # i. e. "Model.m2m" clash_field.related_model._meta.object_name, clash_field.field.name) if not rel_is_hidden and clash_field.get_accessor_name() == rel_name: errors.append( checks.Error( "Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name), hint=("Add or change a related_name argument " "to the definition for '%s' or '%s'.") % (field_name, clash_name), obj=self, id='fields.E304', ) ) if clash_field.get_accessor_name() == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with reverse query name for '%s'." % (field_name, clash_name), hint=("Add or change a related_name argument " "to the definition for '%s' or '%s'.") % (field_name, clash_name), obj=self, id='fields.E305', ) ) return errors def db_type(self, connection): # By default related field will not have a column as it relates to # columns from another table. return None def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) self.opts = cls._meta if not cls._meta.abstract: if self.remote_field.related_name: related_name = self.remote_field.related_name else: related_name = self.opts.default_related_name if related_name: related_name = related_name % { 'class': cls.__name__.lower(), 'model_name': cls._meta.model_name.lower(), 'app_label': cls._meta.app_label.lower() } self.remote_field.related_name = related_name if self.remote_field.related_query_name: related_query_name = self.remote_field.related_query_name % { 'class': cls.__name__.lower(), 'app_label': cls._meta.app_label.lower(), } self.remote_field.related_query_name = related_query_name def resolve_related_class(model, related, field): field.remote_field.model = related field.do_related_class(related, model) lazy_related_operation(resolve_related_class, cls, self.remote_field.model, field=self) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.remote_field.limit_choices_to: kwargs['limit_choices_to'] = self.remote_field.limit_choices_to if self.remote_field.related_name is not None: kwargs['related_name'] = self.remote_field.related_name if self.remote_field.related_query_name is not None: kwargs['related_query_name'] = self.remote_field.related_query_name return name, path, args, kwargs def get_forward_related_filter(self, obj): """ Return the keyword arguments that when supplied to self.model.object.filter(), would select all instances related through this field to the remote obj. This is used to build the querysets returned by related descriptors. obj is an instance of self.related_field.model. """ return { '%s__%s' % (self.name, rh_field.name): getattr(obj, rh_field.attname) for _, rh_field in self.related_fields } def get_reverse_related_filter(self, obj): """ Complement to get_forward_related_filter(). Return the keyword arguments that when passed to self.related_field.model.object.filter() select all instances of self.related_field.model related through this field to obj. obj is an instance of self.model. """ base_filter = { rh_field.attname: getattr(obj, lh_field.attname) for lh_field, rh_field in self.related_fields } descriptor_filter = self.get_extra_descriptor_filter(obj) base_q = Q(**base_filter) if isinstance(descriptor_filter, dict): return base_q & Q(**descriptor_filter) elif descriptor_filter: return base_q & descriptor_filter return base_q @property def swappable_setting(self): """ Get the setting that this is powered from for swapping, or None if it's not swapped in / marked with swappable=False. """ if self.swappable: # Work out string form of "to" if isinstance(self.remote_field.model, str): to_string = self.remote_field.model else: to_string = self.remote_field.model._meta.label return apps.get_swappable_settings_name(to_string) return None def set_attributes_from_rel(self): self.name = ( self.name or (self.remote_field.model._meta.model_name + '_' + self.remote_field.model._meta.pk.name) ) if self.verbose_name is None: self.verbose_name = self.remote_field.model._meta.verbose_name self.remote_field.set_field_name() def do_related_class(self, other, cls): self.set_attributes_from_rel() self.contribute_to_related_class(other, self.remote_field) def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this model field. If it is a callable, it will be invoked and the result will be returned. """ if callable(self.remote_field.limit_choices_to): return self.remote_field.limit_choices_to() return self.remote_field.limit_choices_to def formfield(self, **kwargs): """ Pass ``limit_choices_to`` to the field being constructed. Only passes it if there is a type that supports related fields. This is a similar strategy used to pass the ``queryset`` to the field being constructed. """ defaults = {} if hasattr(self.remote_field, 'get_related_field'): # If this is a callable, do not invoke it here. Just pass # it in the defaults for when the form class will later be # instantiated. limit_choices_to = self.remote_field.limit_choices_to defaults.update({ 'limit_choices_to': limit_choices_to, }) defaults.update(kwargs) return super().formfield(**defaults) def related_query_name(self): """ Define the name that can be used to identify this related object in a table-spanning query. """ return self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name @property def target_field(self): """ When filtering against this relation, return the field on the remote model against which the filtering should happen. """ target_fields = self.get_path_info()[-1].target_fields if len(target_fields) > 1: raise exceptions.FieldError( "The relation has multiple target fields, but only single target field was asked for") return target_fields[0] def get_cache_name(self): return self.name class ForeignObject(RelatedField): """ Abstraction of the ForeignKey relation to support multi-column relations. """ # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False requires_unique_target = True related_accessor_class = ReverseManyToOneDescriptor forward_related_accessor_class = ForwardManyToOneDescriptor rel_class = ForeignObjectRel def __init__(self, to, on_delete, from_fields, to_fields, rel=None, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, swappable=True, **kwargs): if rel is None: rel = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) super().__init__(rel=rel, **kwargs) self.from_fields = from_fields self.to_fields = to_fields self.swappable = swappable def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_to_fields_exist(), *self._check_unique_target(), ] def _check_to_fields_exist(self): # Skip nonexistent models. if isinstance(self.remote_field.model, str): return [] errors = [] for to_field in self.to_fields: if to_field: try: self.remote_field.model._meta.get_field(to_field) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The to_field '%s' doesn't exist on the related " "model '%s'." % (to_field, self.remote_field.model._meta.label), obj=self, id='fields.E312', ) ) return errors def _check_unique_target(self): rel_is_string = isinstance(self.remote_field.model, str) if rel_is_string or not self.requires_unique_target: return [] try: self.foreign_related_fields except exceptions.FieldDoesNotExist: return [] if not self.foreign_related_fields: return [] unique_foreign_fields = { frozenset([f.name]) for f in self.remote_field.model._meta.get_fields() if getattr(f, 'unique', False) } unique_foreign_fields.update({ frozenset(ut) for ut in self.remote_field.model._meta.unique_together }) foreign_fields = {f.name for f in self.foreign_related_fields} has_unique_constraint = any(u <= foreign_fields for u in unique_foreign_fields) if not has_unique_constraint and len(self.foreign_related_fields) > 1: field_combination = ', '.join( "'%s'" % rel_field.name for rel_field in self.foreign_related_fields ) model_name = self.remote_field.model.__name__ return [ checks.Error( "No subset of the fields %s on model '%s' is unique." % (field_combination, model_name), hint=( "Add unique=True on any of those fields or add at " "least a subset of them to a unique_together constraint." ), obj=self, id='fields.E310', ) ] elif not has_unique_constraint: field_name = self.foreign_related_fields[0].name model_name = self.remote_field.model.__name__ return [ checks.Error( "'%s.%s' must set unique=True because it is referenced by " "a foreign key." % (model_name, field_name), obj=self, id='fields.E311', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() kwargs['on_delete'] = self.remote_field.on_delete kwargs['from_fields'] = self.from_fields kwargs['to_fields'] = self.to_fields if self.remote_field.parent_link: kwargs['parent_link'] = self.remote_field.parent_link # Work out string form of "to" if isinstance(self.remote_field.model, str): kwargs['to'] = self.remote_field.model else: kwargs['to'] = "%s.%s" % ( self.remote_field.model._meta.app_label, self.remote_field.model._meta.object_name, ) # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error if hasattr(kwargs['to'], "setting_name"): if kwargs['to'].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ForeignKey pointing to a model " "that is swapped in place of more than one model (%s and %s)" % (kwargs['to'].setting_name, swappable_setting) ) # Set it kwargs['to'] = SettingsReference( kwargs['to'], swappable_setting, ) return name, path, args, kwargs def resolve_related_fields(self): if not self.from_fields or len(self.from_fields) != len(self.to_fields): raise ValueError('Foreign Object from and to fields must be the same non-zero length') if isinstance(self.remote_field.model, str): raise ValueError('Related model %r cannot be resolved' % self.remote_field.model) related_fields = [] for index in range(len(self.from_fields)): from_field_name = self.from_fields[index] to_field_name = self.to_fields[index] from_field = ( self if from_field_name == RECURSIVE_RELATIONSHIP_CONSTANT else self.opts.get_field(from_field_name) ) to_field = (self.remote_field.model._meta.pk if to_field_name is None else self.remote_field.model._meta.get_field(to_field_name)) related_fields.append((from_field, to_field)) return related_fields @cached_property def related_fields(self): return self.resolve_related_fields() @cached_property def reverse_related_fields(self): return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields] @cached_property def local_related_fields(self): return tuple(lhs_field for lhs_field, rhs_field in self.related_fields) @cached_property def foreign_related_fields(self): return tuple(rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field) def get_local_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.local_related_fields) def get_foreign_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.foreign_related_fields) @staticmethod def get_instance_value_for_fields(instance, fields): ret = [] opts = instance._meta for field in fields: # Gotcha: in some cases (like fixture loading) a model can have # different values in parent_ptr_id and parent's id. So, use # instance.pk (that is, parent_ptr_id) when asked for instance.id. if field.primary_key: possible_parent_link = opts.get_ancestor_link(field.model) if (not possible_parent_link or possible_parent_link.primary_key or possible_parent_link.model._meta.abstract): ret.append(instance.pk) continue ret.append(getattr(instance, field.attname)) return tuple(ret) def get_attname_column(self): attname, column = super().get_attname_column() return attname, None def get_joining_columns(self, reverse_join=False): source = self.reverse_related_fields if reverse_join else self.related_fields return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source) def get_reverse_joining_columns(self): return self.get_joining_columns(reverse_join=True) def get_extra_descriptor_filter(self, instance): """ Return an extra filter condition for related object fetching when user does 'instance.fieldname', that is the extra filter is used in the descriptor of the field. The filter should be either a dict usable in .filter(**kwargs) call or a Q-object. The condition will be ANDed together with the relation's joining columns. A parallel method is get_extra_restriction() which is used in JOIN and subquery conditions. """ return {} def get_extra_restriction(self, where_class, alias, related_alias): """ Return a pair condition used for joining and subquery pushdown. The condition is something that responds to as_sql(compiler, connection) method. Note that currently referring both the 'alias' and 'related_alias' will not work in some conditions, like subquery pushdown. A parallel method is get_extra_descriptor_filter() which is used in instance.fieldname related object fetching. """ return None def get_path_info(self, filtered_relation=None): """Get path from this field to the related model.""" opts = self.remote_field.model._meta from_opts = self.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=self.foreign_related_fields, join_field=self, m2m=False, direct=True, filtered_relation=filtered_relation, )] def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, )] @classmethod @functools.lru_cache(maxsize=None) def get_lookups(cls): bases = inspect.getmro(cls) bases = bases[:bases.index(ForeignObject) + 1] class_lookups = [parent.__dict__.get('class_lookups', {}) for parent in bases] return cls.merge_dicts(class_lookups) def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) setattr(cls, self.name, self.forward_related_accessor_class(self)) def contribute_to_related_class(self, cls, related): # Internal FK's - i.e., those with a related name ending with '+' - # and swapped models don't get a related descriptor. if not self.remote_field.is_hidden() and not related.related_model._meta.swapped: setattr(cls._meta.concrete_model, related.get_accessor_name(), self.related_accessor_class(related)) # While 'limit_choices_to' might be a callable, simply pass # it along for later - this is too early because it's still # model load time. if self.remote_field.limit_choices_to: cls._meta.related_fkey_lookups.append(self.remote_field.limit_choices_to) ForeignObject.register_lookup(RelatedIn) ForeignObject.register_lookup(RelatedExact) ForeignObject.register_lookup(RelatedLessThan) ForeignObject.register_lookup(RelatedGreaterThan) ForeignObject.register_lookup(RelatedGreaterThanOrEqual) ForeignObject.register_lookup(RelatedLessThanOrEqual) ForeignObject.register_lookup(RelatedIsNull) class ForeignKey(ForeignObject): """ Provide a many-to-one relation by adding a column to the local model to hold the remote value. By default ForeignKey will target the pk of the remote model but this behavior can be changed by using the ``to_field`` argument. """ descriptor_class = ForeignKeyDeferredAttribute # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False rel_class = ManyToOneRel empty_strings_allowed = False default_error_messages = { 'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.') } description = _("Foreign Key (type determined by related field)") def __init__(self, to, on_delete, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, to_field=None, db_constraint=True, **kwargs): try: to._meta.model_name except AttributeError: assert isinstance(to, str), ( "%s(%r) is invalid. First parameter to ForeignKey must be " "either a model, a model name, or the string %r" % ( self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT, ) ) else: # For backwards compatibility purposes, we need to *try* and set # the to_field during FK construction. It won't be guaranteed to # be correct until contribute_to_class is called. Refs #12190. to_field = to_field or (to._meta.pk and to._meta.pk.name) if not callable(on_delete): raise TypeError('on_delete must be callable.') kwargs['rel'] = self.rel_class( self, to, to_field, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) kwargs.setdefault('db_index', True) super().__init__( to, on_delete, from_fields=[RECURSIVE_RELATIONSHIP_CONSTANT], to_fields=[to_field], **kwargs, ) self.db_constraint = db_constraint def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_on_delete(), *self._check_unique(), ] def _check_on_delete(self): on_delete = getattr(self.remote_field, 'on_delete', None) if on_delete == SET_NULL and not self.null: return [ checks.Error( 'Field specifies on_delete=SET_NULL, but cannot be null.', hint='Set null=True argument on the field, or change the on_delete rule.', obj=self, id='fields.E320', ) ] elif on_delete == SET_DEFAULT and not self.has_default(): return [ checks.Error( 'Field specifies on_delete=SET_DEFAULT, but has no default value.', hint='Set a default value, or change the on_delete rule.', obj=self, id='fields.E321', ) ] else: return [] def _check_unique(self, **kwargs): return [ checks.Warning( 'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.', hint='ForeignKey(unique=True) is usually better served by a OneToOneField.', obj=self, id='fields.W342', ) ] if self.unique else [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['to_fields'] del kwargs['from_fields'] # Handle the simpler arguments if self.db_index: del kwargs['db_index'] else: kwargs['db_index'] = False if self.db_constraint is not True: kwargs['db_constraint'] = self.db_constraint # Rel needs more work. to_meta = getattr(self.remote_field.model, "_meta", None) if self.remote_field.field_name and ( not to_meta or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name)): kwargs['to_field'] = self.remote_field.field_name return name, path, args, kwargs def to_python(self, value): return self.target_field.to_python(value) @property def target_field(self): return self.foreign_related_fields[0] def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, )] def validate(self, value, model_instance): if self.remote_field.parent_link: return super().validate(value, model_instance) if value is None: return using = router.db_for_read(self.remote_field.model, instance=model_instance) qs = self.remote_field.model._default_manager.using(using).filter( **{self.remote_field.field_name: value} ) qs = qs.complex_filter(self.get_limit_choices_to()) if not qs.exists(): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={ 'model': self.remote_field.model._meta.verbose_name, 'pk': value, 'field': self.remote_field.field_name, 'value': value, }, # 'pk' is included for backwards compatibility ) def resolve_related_fields(self): related_fields = super().resolve_related_fields() for from_field, to_field in related_fields: if to_field and to_field.model != self.remote_field.model._meta.concrete_model: raise exceptions.FieldError( "'%s.%s' refers to field '%s' which is not local to model " "'%s'." % ( self.model._meta.label, self.name, to_field.name, self.remote_field.model._meta.concrete_model._meta.label, ) ) return related_fields def get_attname(self): return '%s_id' % self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_default(self): """Return the to_field if the default value is an object.""" field_default = super().get_default() if isinstance(field_default, self.remote_field.model): return getattr(field_default, self.target_field.attname) return field_default def get_db_prep_save(self, value, connection): if value is None or (value == '' and (not self.target_field.empty_strings_allowed or connection.features.interprets_empty_strings_as_nulls)): return None else: return self.target_field.get_db_prep_save(value, connection=connection) def get_db_prep_value(self, value, connection, prepared=False): return self.target_field.get_db_prep_value(value, connection, prepared) def get_prep_value(self, value): return self.target_field.get_prep_value(value) def contribute_to_related_class(self, cls, related): super().contribute_to_related_class(cls, related) if self.remote_field.field_name is None: self.remote_field.field_name = cls._meta.pk.name def formfield(self, *, using=None, **kwargs): if isinstance(self.remote_field.model, str): raise ValueError("Cannot create form field for %r yet, because " "its related model %r has not been loaded yet" % (self.name, self.remote_field.model)) return super().formfield(**{ 'form_class': forms.ModelChoiceField, 'queryset': self.remote_field.model._default_manager.using(using), 'to_field_name': self.remote_field.field_name, **kwargs, }) def db_check(self, connection): return [] def db_type(self, connection): return self.target_field.rel_db_type(connection=connection) def db_parameters(self, connection): return {"type": self.db_type(connection), "check": self.db_check(connection)} def convert_empty_strings(self, value, expression, connection): if (not value) and isinstance(value, str): return None return value def get_db_converters(self, connection): converters = super().get_db_converters(connection) if connection.features.interprets_empty_strings_as_nulls: converters += [self.convert_empty_strings] return converters def get_col(self, alias, output_field=None): if output_field is None: output_field = self.target_field while isinstance(output_field, ForeignKey): output_field = output_field.target_field if output_field is self: raise ValueError('Cannot resolve output_field.') return super().get_col(alias, output_field) class OneToOneField(ForeignKey): """ A OneToOneField is essentially the same as a ForeignKey, with the exception that it always carries a "unique" constraint with it and the reverse relation always returns the object pointed to (since there will only ever be one), rather than returning a list. """ # Field flags many_to_many = False many_to_one = False one_to_many = False one_to_one = True related_accessor_class = ReverseOneToOneDescriptor forward_related_accessor_class = ForwardOneToOneDescriptor rel_class = OneToOneRel description = _("One-to-one relationship") def __init__(self, to, on_delete, to_field=None, **kwargs): kwargs['unique'] = True super().__init__(to, on_delete, to_field=to_field, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if "unique" in kwargs: del kwargs['unique'] return name, path, args, kwargs def formfield(self, **kwargs): if self.remote_field.parent_link: return None return super().formfield(**kwargs) def save_form_data(self, instance, data): if isinstance(data, self.remote_field.model): setattr(instance, self.name, data) else: setattr(instance, self.attname, data) # Remote field object must be cleared otherwise Model.save() # will reassign attname using the related object pk. if data is None: setattr(instance, self.name, data) def _check_unique(self, **kwargs): # Override ForeignKey since check isn't applicable here. return [] def create_many_to_many_intermediary_model(field, klass): from django.db import models def set_managed(model, related, through): through._meta.managed = model._meta.managed or related._meta.managed to_model = resolve_relation(klass, field.remote_field.model) name = '%s_%s' % (klass._meta.object_name, field.name) lazy_related_operation(set_managed, klass, to_model, name) to = make_model_tuple(to_model)[1] from_ = klass._meta.model_name if to == from_: to = 'to_%s' % to from_ = 'from_%s' % from_ meta = type('Meta', (), { 'db_table': field._get_m2m_db_table(klass._meta), 'auto_created': klass, 'app_label': klass._meta.app_label, 'db_tablespace': klass._meta.db_tablespace, 'unique_together': (from_, to), 'verbose_name': _('%(from)s-%(to)s relationship') % {'from': from_, 'to': to}, 'verbose_name_plural': _('%(from)s-%(to)s relationships') % {'from': from_, 'to': to}, 'apps': field.model._meta.apps, }) # Construct and return the new class. return type(name, (models.Model,), { 'Meta': meta, '__module__': klass.__module__, from_: models.ForeignKey( klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ), to: models.ForeignKey( to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ) }) class ManyToManyField(RelatedField): """ Provide a many-to-many relation by using an intermediary model that holds two ForeignKey fields pointed at the two sides of the relation. Unless a ``through`` model was provided, ManyToManyField will use the create_many_to_many_intermediary_model factory to automatically generate the intermediary model. """ # Field flags many_to_many = True many_to_one = False one_to_many = False one_to_one = False rel_class = ManyToManyRel description = _("Many-to-many relationship") def __init__(self, to, related_name=None, related_query_name=None, limit_choices_to=None, symmetrical=None, through=None, through_fields=None, db_constraint=True, db_table=None, swappable=True, **kwargs): try: to._meta except AttributeError: assert isinstance(to, str), ( "%s(%r) is invalid. First parameter to ManyToManyField must be " "either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT) ) if symmetrical is None: symmetrical = (to == RECURSIVE_RELATIONSHIP_CONSTANT) if through is not None: assert db_table is None, ( "Cannot specify a db_table if an intermediary model is used." ) kwargs['rel'] = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, symmetrical=symmetrical, through=through, through_fields=through_fields, db_constraint=db_constraint, ) self.has_null_arg = 'null' in kwargs super().__init__(**kwargs) self.db_table = db_table self.swappable = swappable def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_unique(**kwargs), *self._check_relationship_model(**kwargs), *self._check_ignored_options(**kwargs), *self._check_table_uniqueness(**kwargs), ] def _check_unique(self, **kwargs): if self.unique: return [ checks.Error( 'ManyToManyFields cannot be unique.', obj=self, id='fields.E330', ) ] return [] def _check_ignored_options(self, **kwargs): warnings = [] if self.has_null_arg: warnings.append( checks.Warning( 'null has no effect on ManyToManyField.', obj=self, id='fields.W340', ) ) if self._validators: warnings.append( checks.Warning( 'ManyToManyField does not support validators.', obj=self, id='fields.W341', ) ) if (self.remote_field.limit_choices_to and self.remote_field.through and not self.remote_field.through._meta.auto_created): warnings.append( checks.Warning( 'limit_choices_to has no effect on ManyToManyField ' 'with a through model.', obj=self, id='fields.W343', ) ) return warnings def _check_relationship_model(self, from_model=None, **kwargs): if hasattr(self.remote_field.through, '_meta'): qualified_model_name = "%s.%s" % ( self.remote_field.through._meta.app_label, self.remote_field.through.__name__) else: qualified_model_name = self.remote_field.through errors = [] if self.remote_field.through not in self.opts.apps.get_models(include_auto_created=True): # The relationship model is not installed. errors.append( checks.Error( "Field specifies a many-to-many relation through model " "'%s', which has not been installed." % qualified_model_name, obj=self, id='fields.E331', ) ) else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) # Set some useful local variables to_model = resolve_relation(from_model, self.remote_field.model) from_model_name = from_model._meta.object_name if isinstance(to_model, str): to_model_name = to_model else: to_model_name = to_model._meta.object_name relationship_model_name = self.remote_field.through._meta.object_name self_referential = from_model == to_model # Count foreign keys in intermediate model if self_referential: seen_self = sum( from_model == getattr(field.remote_field, 'model', None) for field in self.remote_field.through._meta.fields ) if seen_self > 2 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than two foreign keys " "to '%s', which is ambiguous. You must specify " "which two foreign keys Django should use via the " "through_fields keyword argument." % (self, from_model_name), hint="Use through_fields to specify which two foreign keys Django should use.", obj=self.remote_field.through, id='fields.E333', ) ) else: # Count foreign keys in relationship model seen_from = sum( from_model == getattr(field.remote_field, 'model', None) for field in self.remote_field.through._meta.fields ) seen_to = sum( to_model == getattr(field.remote_field, 'model', None) for field in self.remote_field.through._meta.fields ) if seen_from > 1 and not self.remote_field.through_fields: errors.append( checks.Error( ("The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "from '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument.") % (self, from_model_name), hint=( 'If you want to create a recursive relationship, ' 'use ForeignKey("%s", symmetrical=False, through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id='fields.E334', ) ) if seen_to > 1 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "to '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument." % (self, to_model_name), hint=( 'If you want to create a recursive relationship, ' 'use ForeignKey("%s", symmetrical=False, through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id='fields.E335', ) ) if seen_from == 0 or seen_to == 0: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it does not have a foreign key to '%s' or '%s'." % ( self, from_model_name, to_model_name ), obj=self.remote_field.through, id='fields.E336', ) ) # Validate `through_fields`. if self.remote_field.through_fields is not None: # Validate that we're given an iterable of at least two items # and that none of them is "falsy". if not (len(self.remote_field.through_fields) >= 2 and self.remote_field.through_fields[0] and self.remote_field.through_fields[1]): errors.append( checks.Error( "Field specifies 'through_fields' but does not provide " "the names of the two link fields that should be used " "for the relation through model '%s'." % qualified_model_name, hint="Make sure you specify 'through_fields' as through_fields=('field1', 'field2')", obj=self, id='fields.E337', ) ) # Validate the given through fields -- they should be actual # fields on the through model, and also be foreign keys to the # expected models. else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) source, through, target = from_model, self.remote_field.through, self.remote_field.model source_field_name, target_field_name = self.remote_field.through_fields[:2] for field_name, related_model in ((source_field_name, source), (target_field_name, target)): possible_field_names = [] for f in through._meta.fields: if hasattr(f, 'remote_field') and getattr(f.remote_field, 'model', None) == related_model: possible_field_names.append(f.name) if possible_field_names: hint = "Did you mean one of the following foreign keys to '%s': %s?" % ( related_model._meta.object_name, ', '.join(possible_field_names), ) else: hint = None try: field = through._meta.get_field(field_name) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The intermediary model '%s' has no field '%s'." % (qualified_model_name, field_name), hint=hint, obj=self, id='fields.E338', ) ) else: if not (hasattr(field, 'remote_field') and getattr(field.remote_field, 'model', None) == related_model): errors.append( checks.Error( "'%s.%s' is not a foreign key to '%s'." % ( through._meta.object_name, field_name, related_model._meta.object_name, ), hint=hint, obj=self, id='fields.E339', ) ) return errors def _check_table_uniqueness(self, **kwargs): if isinstance(self.remote_field.through, str) or not self.remote_field.through._meta.managed: return [] registered_tables = { model._meta.db_table: model for model in self.opts.apps.get_models(include_auto_created=True) if model != self.remote_field.through and model._meta.managed } m2m_db_table = self.m2m_db_table() model = registered_tables.get(m2m_db_table) # The second condition allows multiple m2m relations on a model if # some point to a through model that proxies another through model. if model and model._meta.concrete_model != self.remote_field.through._meta.concrete_model: if model._meta.auto_created: def _get_field_name(model): for field in model._meta.auto_created._meta.many_to_many: if field.remote_field.through is model: return field.name opts = model._meta.auto_created._meta clashing_obj = '%s.%s' % (opts.label, _get_field_name(model)) else: clashing_obj = model._meta.label return [ checks.Error( "The field's intermediary table '%s' clashes with the " "table name of '%s'." % (m2m_db_table, clashing_obj), obj=self, id='fields.E340', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() # Handle the simpler arguments. if self.db_table is not None: kwargs['db_table'] = self.db_table if self.remote_field.db_constraint is not True: kwargs['db_constraint'] = self.remote_field.db_constraint # Rel needs more work. if isinstance(self.remote_field.model, str): kwargs['to'] = self.remote_field.model else: kwargs['to'] = "%s.%s" % ( self.remote_field.model._meta.app_label, self.remote_field.model._meta.object_name, ) if getattr(self.remote_field, 'through', None) is not None: if isinstance(self.remote_field.through, str): kwargs['through'] = self.remote_field.through elif not self.remote_field.through._meta.auto_created: kwargs['through'] = "%s.%s" % ( self.remote_field.through._meta.app_label, self.remote_field.through._meta.object_name, ) # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error. if hasattr(kwargs['to'], "setting_name"): if kwargs['to'].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ManyToManyField pointing to a " "model that is swapped in place of more than one model " "(%s and %s)" % (kwargs['to'].setting_name, swappable_setting) ) kwargs['to'] = SettingsReference( kwargs['to'], swappable_setting, ) return name, path, args, kwargs def _get_path_info(self, direct=False, filtered_relation=None): """Called by both direct and indirect m2m traversal.""" int_model = self.remote_field.through linkfield1 = int_model._meta.get_field(self.m2m_field_name()) linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = linkfield1.get_reverse_path_info() join2infos = linkfield2.get_path_info(filtered_relation) else: join1infos = linkfield2.get_reverse_path_info() join2infos = linkfield1.get_path_info(filtered_relation) # Get join infos between the last model of join 1 and the first model # of join 2. Assume the only reason these may differ is due to model # inheritance. join1_final = join1infos[-1].to_opts join2_initial = join2infos[0].from_opts if join1_final is join2_initial: intermediate_infos = [] elif issubclass(join1_final.model, join2_initial.model): intermediate_infos = join1_final.get_path_to_parent(join2_initial.model) else: intermediate_infos = join2_initial.get_path_from_parent(join1_final.model) return [*join1infos, *intermediate_infos, *join2infos] def get_path_info(self, filtered_relation=None): return self._get_path_info(direct=True, filtered_relation=filtered_relation) def get_reverse_path_info(self, filtered_relation=None): return self._get_path_info(direct=False, filtered_relation=filtered_relation) def _get_m2m_db_table(self, opts): """ Function that can be curried to provide the m2m table name for this relation. """ if self.remote_field.through is not None: return self.remote_field.through._meta.db_table elif self.db_table: return self.db_table else: m2m_table_name = '%s_%s' % (utils.strip_quotes(opts.db_table), self.name) return utils.truncate_name(m2m_table_name, connection.ops.max_name_length()) def _get_m2m_attr(self, related, attr): """ Function that can be curried to provide the source accessor or DB column name for the m2m table. """ cache_attr = '_m2m_%s_cache' % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[0] else: link_field_name = None for f in self.remote_field.through._meta.fields: if (f.is_relation and f.remote_field.model == related.related_model and (link_field_name is None or link_field_name == f.name)): setattr(self, cache_attr, getattr(f, attr)) return getattr(self, cache_attr) def _get_m2m_reverse_attr(self, related, attr): """ Function that can be curried to provide the related accessor or DB column name for the m2m table. """ cache_attr = '_m2m_reverse_%s_cache' % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) found = False if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[1] else: link_field_name = None for f in self.remote_field.through._meta.fields: if f.is_relation and f.remote_field.model == related.model: if link_field_name is None and related.related_model == related.model: # If this is an m2m-intermediate to self, # the first foreign key you find will be # the source column. Keep searching for # the second foreign key. if found: setattr(self, cache_attr, getattr(f, attr)) break else: found = True elif link_field_name is None or link_field_name == f.name: setattr(self, cache_attr, getattr(f, attr)) break return getattr(self, cache_attr) def contribute_to_class(self, cls, name, **kwargs): # To support multiple relations to self, it's useful to have a non-None # related name on symmetrical relations for internal reasons. The # concept doesn't make a lot of sense externally ("you want me to # specify *what* on my non-reversible relation?!"), so we set it up # automatically. The funky name reduces the chance of an accidental # clash. if self.remote_field.symmetrical and ( self.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT or self.remote_field.model == cls._meta.object_name ): self.remote_field.related_name = "%s_rel_+" % name elif self.remote_field.is_hidden(): # If the backwards relation is disabled, replace the original # related_name with one generated from the m2m field name. Django # still uses backwards relations internally and we need to avoid # clashes between multiple m2m fields with related_name == '+'. self.remote_field.related_name = "_%s_%s_+" % (cls.__name__.lower(), name) super().contribute_to_class(cls, name, **kwargs) # The intermediate m2m model is not auto created if: # 1) There is a manually specified intermediate, or # 2) The class owning the m2m field is abstract. # 3) The class owning the m2m field has been swapped out. if not cls._meta.abstract: if self.remote_field.through: def resolve_through_model(_, model, field): field.remote_field.through = model lazy_related_operation(resolve_through_model, cls, self.remote_field.through, field=self) elif not cls._meta.swapped: self.remote_field.through = create_many_to_many_intermediary_model(self, cls) # Add the descriptor for the m2m relation. setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False)) # Set up the accessor for the m2m table name for the relation. self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta) def contribute_to_related_class(self, cls, related): # Internal M2Ms (i.e., those with a related name ending with '+') # and swapped models don't get a related descriptor. if not self.remote_field.is_hidden() and not related.related_model._meta.swapped: setattr(cls, related.get_accessor_name(), ManyToManyDescriptor(self.remote_field, reverse=True)) # Set up the accessors for the column names on the m2m table. self.m2m_column_name = partial(self._get_m2m_attr, related, 'column') self.m2m_reverse_name = partial(self._get_m2m_reverse_attr, related, 'column') self.m2m_field_name = partial(self._get_m2m_attr, related, 'name') self.m2m_reverse_field_name = partial(self._get_m2m_reverse_attr, related, 'name') get_m2m_rel = partial(self._get_m2m_attr, related, 'remote_field') self.m2m_target_field_name = lambda: get_m2m_rel().field_name get_m2m_reverse_rel = partial(self._get_m2m_reverse_attr, related, 'remote_field') self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name def set_attributes_from_rel(self): pass def value_from_object(self, obj): return [] if obj.pk is None else list(getattr(obj, self.attname).all()) def save_form_data(self, instance, data): getattr(instance, self.attname).set(data) def formfield(self, *, using=None, **kwargs): defaults = { 'form_class': forms.ModelMultipleChoiceField, 'queryset': self.remote_field.model._default_manager.using(using), **kwargs, } # If initial is passed in, it's a list of related objects, but the # MultipleChoiceField takes a list of IDs. if defaults.get('initial') is not None: initial = defaults['initial'] if callable(initial): initial = initial() defaults['initial'] = [i.pk for i in initial] return super().formfield(**defaults) def db_check(self, connection): return None def db_type(self, connection): # A ManyToManyField is not represented by a single column, # so return None. return None def db_parameters(self, connection): return {"type": None, "check": None}
e43ee318b13674af9402d69cf5a57a6b30605831f9959f95ad888ff5f949d379
import collections import re from functools import partial from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError from django.db import DatabaseError, NotSupportedError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import OrderBy, Random, RawSQL, Ref, Value from django.db.models.functions import Cast from django.db.models.query_utils import Q, select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.transaction import TransactionManagementError from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLCompiler: def __init__(self, query, connection, using): self.query = query self.connection = connection self.using = using self.quote_cache = {'*': '*'} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None # Multiline ordering SQL clause may appear from RawSQL. self.ordering_parts = re.compile(r'^(.*)\s(ASC|DESC)(.*)', re.MULTILINE | re.DOTALL) self._meta_ordering = None def setup_query(self): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select() self.col_count = len(self.select) def pre_sql_setup(self): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query() order_by = self.get_order_by() self.where, self.having = self.query.where.split_having() extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, 'as_sql'): expressions.append(self.query.resolve_ref(expr)) else: expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. ref_sources = { expr.source for expr in expressions if isinstance(expr, Ref) } for expr, _, _ in select: # Skip members of the select clause that are already included # by reference. if expr in ref_sources: continue cols = expr.get_group_by_cols() for col in cols: expressions.append(col) for expr, (sql, params, is_ref) in order_by: # Skip References to the select clause, as all expressions in the # select clause are already part of the group by. if not is_ref: expressions.extend(expr.get_group_by_cols()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: sql, params = self.compile(expr) params_hash = make_hashable(params) if (sql, params_hash) not in seen: result.append((sql, params)) seen.add((sql, params_hash)) return result def collapse_group_by(self, expressions, having): # If the DB can group by primary key, then group by the primary key of # query's main model. Note that for PostgreSQL the GROUP BY clause must # include the primary key of every table, but for MySQL it is enough to # have the main table's primary key. if self.connection.features.allows_group_by_pk: # Determine if the main model's primary key is in the query. pk = None for expr in expressions: # Is this a reference to query's base table primary key? If the # expression isn't a Col-like, then skip the expression. if (getattr(expr, 'target', None) == self.query.model._meta.pk and getattr(expr, 'alias', None) == self.query.base_table): pk = expr break # If the main model's primary key is in the query, group by that # field, HAVING expressions, and expressions associated with tables # that don't have a primary key included in the grouped columns. if pk: pk_aliases = { expr.alias for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key } expressions = [pk] + [ expr for expr in expressions if expr in having or ( getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases ) ] elif self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if ( hasattr(expr, 'target') and expr.target.primary_key and self.connection.features.allows_group_by_selected_pks_on_model(expr.target.model) ) } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases ] return expressions def get_select(self): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: cols = self.get_default_columns() else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { 'model': self.query.model, 'select_fields': select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select) klass_info['related_klass_infos'] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info['related_klass_infos']: if ki['from_parent']: ki['select_fields'] = (klass_info['select_fields'] + ki['select_fields']) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] for col, alias in select: try: sql, params = self.compile(col) except EmptyResultSet: # Select a predicate that's always False. sql, params = '0', () else: sql, params = col.select_format(self, sql, params) ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def get_order_by(self): """ Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif self.query.get_meta().ordering: ordering = self.query.get_meta().ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: asc, desc = ORDER_DIR['ASC'] else: asc, desc = ORDER_DIR['DESC'] order_by = [] for field in ordering: if hasattr(field, 'resolve_expression'): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() order_by.append((field, False)) continue if field == '?': # random order_by.append((OrderBy(Random()), False)) continue col, order = get_order_dir(field, asc) descending = order == 'DESC' if col in self.query.annotation_select: # Reference to expression in SELECT clause order_by.append(( OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending), True)) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT # clause. expr = self.query.annotations[col] if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) order_by.append((OrderBy(expr, descending=descending), False)) continue if '.' in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split('.', 1) order_by.append(( OrderBy( RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []), descending=descending ), False)) continue if not self.query.extra or col not in self.query.extra: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. order_by.extend(self.find_ordering_name( field, self.query.get_meta(), default_order=asc)) else: if col not in self.query.extra_select: order_by.append(( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False)) else: order_by.append(( OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending), True)) result = [] seen = set() for expr, is_ref in order_by: resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if self.query.combinator: src = resolved.get_source_expressions()[0] # Relabel order by columns to raw numbers if this is a combined # query; necessary since the columns can't be referenced by the # fully qualified name and the simple column names may collide. for idx, (sel_expr, _, col_alias) in enumerate(self.select): if is_ref and col_alias == src.refs: src = src.source elif col_alias: continue if src == sel_expr: resolved.set_source_expressions([RawSQL('%d' % (idx + 1), ())]) break else: if col_alias: raise DatabaseError('ORDER BY term does not match any column in the result set.') # Add column used in ORDER BY clause without an alias to # the selected columns. self.query.add_select_col(src) resolved.set_source_expressions([RawSQL('%d' % len(self.query.select), ())]) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql).group(1) params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql).group(1) if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ((name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( self.query.external_aliases.get(name) and name not in self.query.table_map)): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node): vendor_impl = getattr(node, 'as_' + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection) for query in self.query.combined_queries if not query.is_empty() ] if not features.supports_slicing_ordering_in_compound: for query, compiler in zip(self.query.combined_queries, compilers): if query.low_mark or query.high_mark: raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.') if compiler.get_order_by(): raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.') parts = () for compiler in compilers: try: # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. if not compiler.query.values_select and self.query.values_select: compiler.query = compiler.query.clone() compiler.query.set_values(( *self.query.extra_select, *self.query.values_select, *self.query.annotation_select, )) part_sql, part_args = compiler.as_sql() if compiler.query.combinator: # Wrap in a subquery if wrapping in parentheses isn't # supported. if not features.supports_parentheses_in_compound: part_sql = 'SELECT * FROM ({})'.format(part_sql) # Add parentheses when combining with compound query if not # already added for all compound queries. elif not features.supports_slicing_ordering_in_compound: part_sql = '({})'.format(part_sql) parts += ((part_sql, part_args),) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == 'union' or (combinator == 'difference' and parts): continue raise if not parts: raise EmptyResultSet combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == 'union': combinator_sql += ' ALL' braces = '({})' if features.supports_slicing_ordering_in_compound else '{}' sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts)) result = [' {} '.format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup() for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark) combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, 'supports_select_{}'.format(combinator)): raise NotSupportedError('{} is not supported on this database backend.'.format(combinator)) result, params = self.get_combinator_sql(combinator, self.query.combinator_all) else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() where, w_params = self.compile(self.where) if self.where is not None else ("", []) having, h_params = self.compile(self.having) if self.having is not None else ("", []) result = ['SELECT'] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] col_idx = 1 for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias)) elif with_col_aliases: s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx) col_idx += 1 params.extend(s_params) out_cols.append(s_sql) result += [', '.join(out_cols), 'FROM', *from_] params.extend(f_params) if self.query.select_for_update and self.connection.features.has_select_for_update: if self.connection.get_autocommit(): raise TransactionManagementError('select_for_update cannot be used outside of a transaction.') if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit: raise NotSupportedError( 'LIMIT/OFFSET is not supported with ' 'select_for_update on this database backend.' ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of # If it's a NOWAIT/SKIP LOCKED/OF query but the backend # doesn't support it, raise NotSupportedError to prevent a # possible deadlock. if nowait and not self.connection.features.has_select_for_update_nowait: raise NotSupportedError('NOWAIT is not supported on this database backend.') elif skip_locked and not self.connection.features.has_select_for_update_skip_locked: raise NotSupportedError('SKIP LOCKED is not supported on this database backend.') elif of and not self.connection.features.has_select_for_update_of: raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.') for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), ) if for_update_part and self.connection.features.for_update_after_from: result.append(for_update_part) if where: result.append('WHERE %s' % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError('annotate() + distinct(fields) is not implemented.') order_by = order_by or self.connection.ops.force_no_ordering() result.append('GROUP BY %s' % ', '.join(grouping)) if self._meta_ordering: order_by = None if having: result.append('HAVING %s' % having) params.extend(h_params) if self.query.explain_query: result.insert(0, self.connection.ops.explain_query_prefix( self.query.explain_format, **self.query.explain_options )) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) result.append('ORDER BY %s' % ', '.join(ordering)) if with_limit_offset: result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark)) if for_update_part and not self.connection.features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering expressions # to be selected as well. Wrap the query in another subquery # to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if not alias and with_col_aliases: alias = 'col%d' % index if alias: sub_selects.append("%s.%s" % ( self.connection.ops.quote_name('subquery'), self.connection.ops.quote_name(alias), )) else: select_clone = select.relabeled_clone({select.alias: 'subquery'}) subselect, subparams = select_clone.as_sql(self, self.connection) sub_selects.append(subselect) sub_params.extend(subparams) return 'SELECT %s FROM (%s) subquery' % ( ', '.join(sub_selects), ' '.join(result), ), tuple(sub_params + params) return ' '.join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns(self, start_alias=None, opts=None, from_parent=None): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: opts = self.query.get_meta() only_load = self.deferred_to_columns() start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if field.model in only_load and field.attname not in only_load[field.model]: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(name) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == 'DESC' pieces = name.split(LOOKUP_SEP) field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless the attribute name # of the field is specified. if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name: # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins) if join_tuple in already_seen: raise FieldError('Infinite loop caused by ordering.') already_seen.add(join_tuple) results = [] for item in opts.ordering: if hasattr(item, 'resolve_expression') and not isinstance(item, OrderBy): item = item.desc() if descending else item.asc() if isinstance(item, OrderBy): results.append((item, False)) continue results.extend(self.find_ordering_name(item, opts, alias, order, already_seen)) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in tuple(self.query.alias_map): if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: result.append(', %s' % self.quote_name_unless_alias(alias)) return result, params def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain(direct_choices, reverse_choices, self.query._filtered_relations) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() only_load = self.query.get_loaded_field_names() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info['related_klass_infos'] = related_klass_infos for f in opts.fields: field_model = f.model._meta.concrete_model fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or '(none)', ) ) else: next = False if not select_related_descend(f, restricted, requested, only_load.get(field_model)): continue klass_info = { 'model': f.remote_field.model, 'field': f, 'reverse': False, 'local_setter': f.set_cached_value, 'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None, 'from_parent': False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins( [f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_klass_infos = self.get_related_selections( select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: if not select_related_descend(f, restricted, requested, only_load.get(model), reverse=True): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins([related_field_name], opts, root_alias) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': f.remote_field.set_cached_value, 'remote_setter': f.set_cached_value, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) def local_setter(obj, from_obj): # Set a reverse fk object when relation is non-empty. if from_obj: f.remote_field.set_cached_value(from_obj, obj) def remote_setter(name, obj, from_obj): setattr(from_obj, name, obj) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias) model = join_opts.model alias = joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': local_setter, 'remote_setter': partial(remote_setter, name), 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( 'Invalid field name(s) given in select_related: %s. ' 'Choices are: %s' % ( ', '.join(invalid_fields), ', '.join(_get_field_choices()) or '(none)', ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_parent_klass_info(klass_info): return ( { 'model': parent_model, 'field': parent_link, 'reverse': False, 'select_fields': [ select_index for select_index in klass_info['select_fields'] if self.select[select_index][0].target.model == parent_model ], } for parent_model, parent_link in klass_info['model']._meta.parents.items() ) def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield 'self' else: field = klass_info['field'] if klass_info['reverse']: field = field.remote_field path = parent_path + [field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in _get_parent_klass_info(klass_info) ) queue.extend( (path, klass_info) for klass_info in klass_info.get('related_klass_infos', []) ) result = [] invalid_names = [] for name in self.query.select_for_update_of: klass_info = self.klass_info if name == 'self': # Find the first selected column from a base model. If it # doesn't exist, don't lock a base model. for select_index in klass_info['select_fields']: if self.select[select_index][0].target.model == klass_info['model']: col = self.select[select_index][0] break else: col = None else: for part in name.split(LOOKUP_SEP): klass_infos = ( *klass_info.get('related_klass_infos', []), *_get_parent_klass_info(klass_info), ) for related_klass_info in klass_infos: field = related_klass_info['field'] if related_klass_info['reverse']: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue select_index = klass_info['select_fields'][0] col = self.select[select_index][0] if col is not None: if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( 'Invalid field name(s) given in select_for_update(of=(...)): %s. ' 'Only relational fields followed in the query are allowed. ' 'Choices are: %s.' % ( ', '.join(invalid_names), ', '.join(_get_field_choices()), ) ) return result def deferred_to_columns(self): """ Convert the self.deferred_loading data structure to mapping of table names to sets of column names which are to be loaded. Return the dictionary. """ columns = {} self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb) return columns def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size) fields = [s[0] for s in self.select[0:self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ # This is always executed on a query clone, so we can modify self.query self.query.add_extra({'a': 1}, None, None, None, None, None) self.query.set_extra_mask(['a']) return bool(self.execute_sql(SINGLE)) def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0:self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch or not self.connection.features.can_use_chunked_reads: try: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested, # unless the database doesn't support it. return list(result) finally: # done with the cursor cursor.close() return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = '%s.%s' % (qn(alias), qn2(columns[index])) self.query.where.add( RawSQL('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND') sql, params = self.as_sql() return 'EXISTS (%s)' % sql, params def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. for row in result[0]: if not isinstance(row, str): yield ' '.join(str(c) for c in row) else: yield row class SQLInsertCompiler(SQLCompiler): returning_fields = None returning_params = tuple() def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, 'as_sql'): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, 'get_placeholder'): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = '%s', [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, 'resolve_expression'): value = value.resolve_expression(self.query, allow_joins=False, for_save=True) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' 'can only be used to update, not to insert.' % (value, field) ) if value.contains_aggregate: raise FieldError( 'Aggregate functions are not allowed in this query ' '(%s=%r).' % (field.name, value) ) if value.contains_over_clause: raise FieldError( 'Window expressions are not allowed in this query (%s=%r).' % (field.name, value) ) else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() insert_statement = self.connection.ops.insert_statement(ignore_conflicts=self.query.ignore_conflicts) result = ['%s %s' % (insert_statement, qn(opts.db_table))] fields = self.query.fields or [opts.pk] result.append('(%s)' % ', '.join(qn(f.column) for f in fields)) if self.query.fields: value_rows = [ [self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields] for obj in self.query.objs ] else: # An empty object. value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = (not self.returning_fields and self.connection.features.has_bulk_insert) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) ignore_conflicts_suffix_sql = self.connection.ops.ignore_conflicts_suffix_sql( ignore_conflicts=self.query.ignore_conflicts ) if self.returning_fields and self.connection.features.can_return_columns_from_insert: if self.connection.features.can_return_rows_from_bulk_insert: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) # Skip empty r_sql to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. r_sql, self.returning_params = self.connection.ops.return_insert_columns(self.returning_fields) if r_sql: result.append(r_sql) params += [self.returning_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, returning_fields=None): assert not ( returning_fields and len(self.query.objs) != 1 and not self.connection.features.can_return_rows_from_bulk_insert ) self.returning_fields = returning_fields with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not self.returning_fields: return [] if self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1: return self.connection.ops.fetch_returned_insert_rows(cursor) if self.connection.features.can_return_columns_from_insert: assert len(self.query.objs) == 1 return self.connection.ops.fetch_returned_insert_columns(cursor, self.returning_params) return [self.connection.ops.last_insert_id( cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column )] class SQLDeleteCompiler(SQLCompiler): @cached_property def single_alias(self): return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1 def _as_sql(self, query): result = [ 'DELETE FROM %s' % self.quote_name_unless_alias(query.base_table) ] where, params = self.compile(query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(params) def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ if self.single_alias: return self._as_sql(self.query) innerq = self.query.clone() innerq.__class__ = Query innerq.clear_select_clause() pk = self.query.model._meta.pk innerq.select = [ pk.get_col(self.query.get_initial_alias()) ] outerq = Query(self.query.model) outerq.where = self.query.where_class() outerq.add_q(Q(pk__in=innerq)) return self._as_sql(outerq) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return '', () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, 'resolve_expression'): val = val.resolve_expression(self.query, allow_joins=False, for_save=True) if val.contains_aggregate: raise FieldError( 'Aggregate functions are not allowed in this query ' '(%s=%r).' % (field.name, val) ) if val.contains_over_clause: raise FieldError( 'Window expressions are not allowed in this query ' '(%s=%r).' % (field.name, val) ) elif hasattr(val, 'prepare_database_save'): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, 'get_placeholder'): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = '%s' name = field.column if hasattr(val, 'as_sql'): sql, params = self.compile(val) values.append('%s = %s' % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append('%s = %s' % (qn(name), placeholder)) update_params.append(val) else: values.append('%s = NULL' % qn(name)) table = self.query.base_table result = [ 'UPDATE %s SET' % qn(table), ', '.join(values), ] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(True) query.extra = {} query.select = [] query.add_fields([query.get_meta().pk.name]) super().pre_sql_setup() must_pre_select = count > 1 and not self.connection.features.update_can_self_select # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.where = self.query.where_class() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) self.query.add_filter(('pk__in', idents)) self.query.related_ids = idents else: # The fast path. Filters and updates in one query. self.query.add_filter(('pk__in', query)) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation) ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ', '.join(sql) params = tuple(params) sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery) params = params + self.query.sub_params return sql, params def cursor_iter(cursor, sentinel, col_count, itersize): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): yield rows if col_count is None else [r[:col_count] for r in rows] finally: cursor.close()
08aafe474541baef6a3edb5a040b0d3e8422a65dffc8ce4855235160dd560d85
from django.db import InterfaceError from django.db.backends.base.features import BaseDatabaseFeatures class DatabaseFeatures(BaseDatabaseFeatures): interprets_empty_strings_as_nulls = True has_select_for_update = True has_select_for_update_nowait = True has_select_for_update_skip_locked = True has_select_for_update_of = True select_for_update_of_column = True can_return_columns_from_insert = True can_introspect_autofield = True supports_subqueries_in_group_by = False supports_transactions = True supports_timezones = False has_native_duration_field = True can_defer_constraint_checks = True supports_partially_nullable_unique_constraints = False truncates_names = True supports_tablespaces = True supports_sequence_reset = False can_introspect_materialized_views = True can_introspect_time_field = False atomic_transactions = False supports_combined_alters = False nulls_order_largest = True requires_literal_defaults = True closed_cursor_error_class = InterfaceError bare_select_suffix = " FROM DUAL" # select for update with limit can be achieved on Oracle, but not with the current backend. supports_select_for_update_with_limit = False supports_temporal_subtraction = True # Oracle doesn't ignore quoted identifiers case but the current backend # does by uppercasing all identifiers. ignores_table_name_case = True supports_index_on_text_field = False has_case_insensitive_like = False create_test_procedure_without_params_sql = """ CREATE PROCEDURE "TEST_PROCEDURE" AS V_I INTEGER; BEGIN V_I := 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE "TEST_PROCEDURE" (P_I INTEGER) AS V_I INTEGER; BEGIN V_I := P_I; END; """ supports_callproc_kwargs = True supports_over_clause = True supports_frame_range_fixed_distance = True supports_ignore_conflicts = False max_query_params = 2**16 - 1 supports_partial_indexes = False supports_slicing_ordering_in_compound = True allows_multiple_constraints_on_same_fields = False supports_boolean_expr_in_select_clause = False
f3755b974f10409dd5f2399278ee86c180c77110c0347f099af2d3d34d80d85d
""" Oracle database backend for Django. Requires cx_Oracle: https://oracle.github.io/python-cx_Oracle/ """ import datetime import decimal import os import platform from contextlib import contextmanager from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import IntegrityError from django.db.backends.base.base import BaseDatabaseWrapper from django.utils.asyncio import async_unsafe from django.utils.encoding import force_bytes, force_str from django.utils.functional import cached_property def _setup_environment(environ): # Cygwin requires some special voodoo to set the environment variables # properly so that Oracle will see them. if platform.system().upper().startswith('CYGWIN'): try: import ctypes except ImportError as e: raise ImproperlyConfigured("Error loading ctypes: %s; " "the Oracle backend requires ctypes to " "operate correctly under Cygwin." % e) kernel32 = ctypes.CDLL('kernel32') for name, value in environ: kernel32.SetEnvironmentVariableA(name, value) else: os.environ.update(environ) _setup_environment([ # Oracle takes client-side character set encoding from the environment. ('NLS_LANG', '.AL32UTF8'), # This prevents unicode from getting mangled by getting encoded into the # potentially non-unicode database character set. ('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'), ]) try: import cx_Oracle as Database except ImportError as e: raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e) # Some of these import cx_Oracle, so import them after checking if it's installed. from .client import DatabaseClient # NOQA isort:skip from .creation import DatabaseCreation # NOQA isort:skip from .features import DatabaseFeatures # NOQA isort:skip from .introspection import DatabaseIntrospection # NOQA isort:skip from .operations import DatabaseOperations # NOQA isort:skip from .schema import DatabaseSchemaEditor # NOQA isort:skip from .utils import Oracle_datetime # NOQA isort:skip from .validation import DatabaseValidation # NOQA isort:skip @contextmanager def wrap_oracle_errors(): try: yield except Database.DatabaseError as e: # cx_Oracle raises a cx_Oracle.DatabaseError exception with the # following attributes and values: # code = 2091 # message = 'ORA-02091: transaction rolled back # 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS # _C00102056) violated - parent key not found' # Convert that case to Django's IntegrityError exception. x = e.args[0] if hasattr(x, 'code') and hasattr(x, 'message') and x.code == 2091 and 'ORA-02291' in x.message: raise IntegrityError(*tuple(e.args)) raise class _UninitializedOperatorsDescriptor: def __get__(self, instance, cls=None): # If connection.operators is looked up before a connection has been # created, transparently initialize connection.operators to avert an # AttributeError. if instance is None: raise AttributeError("operators not available as class attribute") # Creating a cursor will initialize the operators. instance.cursor().close() return instance.__dict__['operators'] class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'oracle' display_name = 'Oracle' # This dictionary maps Field objects to their associated Oracle column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. # # Any format strings starting with "qn_" are quoted before being used in the # output (the "qn_" prefix is stripped before the lookup is performed. data_types = { 'AutoField': 'NUMBER(11) GENERATED BY DEFAULT ON NULL AS IDENTITY', 'BigAutoField': 'NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY', 'BinaryField': 'BLOB', 'BooleanField': 'NUMBER(1)', 'CharField': 'NVARCHAR2(%(max_length)s)', 'DateField': 'DATE', 'DateTimeField': 'TIMESTAMP', 'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)', 'DurationField': 'INTERVAL DAY(9) TO SECOND(6)', 'FileField': 'NVARCHAR2(%(max_length)s)', 'FilePathField': 'NVARCHAR2(%(max_length)s)', 'FloatField': 'DOUBLE PRECISION', 'IntegerField': 'NUMBER(11)', 'BigIntegerField': 'NUMBER(19)', 'IPAddressField': 'VARCHAR2(15)', 'GenericIPAddressField': 'VARCHAR2(39)', 'NullBooleanField': 'NUMBER(1)', 'OneToOneField': 'NUMBER(11)', 'PositiveBigIntegerField': 'NUMBER(19)', 'PositiveIntegerField': 'NUMBER(11)', 'PositiveSmallIntegerField': 'NUMBER(11)', 'SlugField': 'NVARCHAR2(%(max_length)s)', 'SmallAutoField': 'NUMBER(5) GENERATED BY DEFAULT ON NULL AS IDENTITY', 'SmallIntegerField': 'NUMBER(11)', 'TextField': 'NCLOB', 'TimeField': 'TIMESTAMP', 'URLField': 'VARCHAR2(%(max_length)s)', 'UUIDField': 'VARCHAR2(32)', } data_type_check_constraints = { 'BooleanField': '%(qn_column)s IN (0,1)', 'NullBooleanField': '%(qn_column)s IN (0,1)', 'PositiveBigIntegerField': '%(qn_column)s >= 0', 'PositiveIntegerField': '%(qn_column)s >= 0', 'PositiveSmallIntegerField': '%(qn_column)s >= 0', } # Oracle doesn't support a database index on these columns. _limited_data_types = ('clob', 'nclob', 'blob') operators = _UninitializedOperatorsDescriptor() _standard_operators = { 'exact': '= %s', 'iexact': '= UPPER(%s)', 'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", } _likec_operators = { **_standard_operators, 'contains': "LIKEC %s ESCAPE '\\'", 'icontains': "LIKEC UPPER(%s) ESCAPE '\\'", 'startswith': "LIKEC %s ESCAPE '\\'", 'endswith': "LIKEC %s ESCAPE '\\'", 'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'", 'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, %, _) # should be escaped on the database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" _pattern_ops = { 'contains': "'%%' || {} || '%%'", 'icontains': "'%%' || UPPER({}) || '%%'", 'startswith': "{} || '%%'", 'istartswith': "UPPER({}) || '%%'", 'endswith': "'%%' || {}", 'iendswith': "'%%' || UPPER({})", } _standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)" " ESCAPE TRANSLATE('\\' USING NCHAR_CS)" for k, v in _pattern_ops.items()} _likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'" for k, v in _pattern_ops.items()} Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations validation_class = DatabaseValidation def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True) self.features.can_return_columns_from_insert = use_returning_into def _dsn(self): settings_dict = self.settings_dict if not settings_dict['HOST'].strip(): settings_dict['HOST'] = 'localhost' if settings_dict['PORT']: return Database.makedsn(settings_dict['HOST'], int(settings_dict['PORT']), settings_dict['NAME']) return settings_dict['NAME'] def _connect_string(self): return '%s/"%s"@%s' % (self.settings_dict['USER'], self.settings_dict['PASSWORD'], self._dsn()) def get_connection_params(self): conn_params = self.settings_dict['OPTIONS'].copy() if 'use_returning_into' in conn_params: del conn_params['use_returning_into'] return conn_params @async_unsafe def get_new_connection(self, conn_params): return Database.connect( user=self.settings_dict['USER'], password=self.settings_dict['PASSWORD'], dsn=self._dsn(), **conn_params, ) def init_connection_state(self): cursor = self.create_cursor() # Set the territory first. The territory overrides NLS_DATE_FORMAT # and NLS_TIMESTAMP_FORMAT to the territory default. When all of # these are set in single statement it isn't clear what is supposed # to happen. cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'") # Set Oracle date to ANSI date format. This only needs to execute # once when we create a new connection. We also set the Territory # to 'AMERICA' which forces Sunday to evaluate to a '1' in # TO_CHAR(). cursor.execute( "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'" " NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" + (" TIME_ZONE = 'UTC'" if settings.USE_TZ else '') ) cursor.close() if 'operators' not in self.__dict__: # Ticket #14149: Check whether our LIKE implementation will # work for this connection or we need to fall back on LIKEC. # This check is performed only once per DatabaseWrapper # instance per thread, since subsequent connections will use # the same settings. cursor = self.create_cursor() try: cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s" % self._standard_operators['contains'], ['X']) except Database.DatabaseError: self.operators = self._likec_operators self.pattern_ops = self._likec_pattern_ops else: self.operators = self._standard_operators self.pattern_ops = self._standard_pattern_ops cursor.close() self.connection.stmtcachesize = 20 # Ensure all changes are preserved even when AUTOCOMMIT is False. if not self.get_autocommit(): self.commit() @async_unsafe def create_cursor(self, name=None): return FormatStylePlaceholderCursor(self.connection) def _commit(self): if self.connection is not None: with wrap_oracle_errors(): return self.connection.commit() # Oracle doesn't support releasing savepoints. But we fake them when query # logging is enabled to keep query counts consistent with other backends. def _savepoint_commit(self, sid): if self.queries_logged: self.queries_log.append({ 'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid), 'time': '0.000', }) def _set_autocommit(self, autocommit): with self.wrap_database_errors: self.connection.autocommit = autocommit def check_constraints(self, table_names=None): """ Check constraints by setting them to immediate. Return them to deferred afterward. """ self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE') self.cursor().execute('SET CONSTRAINTS ALL DEFERRED') def is_usable(self): try: self.connection.ping() except Database.Error: return False else: return True @cached_property def oracle_version(self): with self.temporary_connection(): return tuple(int(x) for x in self.connection.version.split('.')) class OracleParam: """ Wrapper object for formatting parameters for Oracle. If the string representation of the value is large enough (greater than 4000 characters) the input size needs to be set as CLOB. Alternatively, if the parameter has an `input_size` attribute, then the value of the `input_size` attribute will be used instead. Otherwise, no input size will be set for the parameter when executing the query. """ def __init__(self, param, cursor, strings_only=False): # With raw SQL queries, datetimes can reach this function # without being converted by DateTimeField.get_db_prep_value. if settings.USE_TZ and (isinstance(param, datetime.datetime) and not isinstance(param, Oracle_datetime)): param = Oracle_datetime.from_datetime(param) string_size = 0 # Oracle doesn't recognize True and False correctly. if param is True: param = 1 elif param is False: param = 0 if hasattr(param, 'bind_parameter'): self.force_bytes = param.bind_parameter(cursor) elif isinstance(param, (Database.Binary, datetime.timedelta)): self.force_bytes = param else: # To transmit to the database, we need Unicode if supported # To get size right, we must consider bytes. self.force_bytes = force_str(param, cursor.charset, strings_only) if isinstance(self.force_bytes, str): # We could optimize by only converting up to 4000 bytes here string_size = len(force_bytes(param, cursor.charset, strings_only)) if hasattr(param, 'input_size'): # If parameter has `input_size` attribute, use that. self.input_size = param.input_size elif string_size > 4000: # Mark any string param greater than 4000 characters as a CLOB. self.input_size = Database.CLOB elif isinstance(param, datetime.datetime): self.input_size = Database.TIMESTAMP else: self.input_size = None class VariableWrapper: """ An adapter class for cursor variables that prevents the wrapped object from being converted into a string when used to instantiate an OracleParam. This can be used generally for any other object that should be passed into Cursor.execute as-is. """ def __init__(self, var): self.var = var def bind_parameter(self, cursor): return self.var def __getattr__(self, key): return getattr(self.var, key) def __setattr__(self, key, value): if key == 'var': self.__dict__[key] = value else: setattr(self.var, key, value) class FormatStylePlaceholderCursor: """ Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". """ charset = 'utf-8' def __init__(self, connection): self.cursor = connection.cursor() self.cursor.outputtypehandler = self._output_type_handler @staticmethod def _output_number_converter(value): return decimal.Decimal(value) if '.' in value else int(value) @staticmethod def _get_decimal_converter(precision, scale): if scale == 0: return int context = decimal.Context(prec=precision) quantize_value = decimal.Decimal(1).scaleb(-scale) return lambda v: decimal.Decimal(v).quantize(quantize_value, context=context) @staticmethod def _output_type_handler(cursor, name, defaultType, length, precision, scale): """ Called for each db column fetched from cursors. Return numbers as the appropriate Python type. """ if defaultType == Database.NUMBER: if scale == -127: if precision == 0: # NUMBER column: decimal-precision floating point. # This will normally be an integer from a sequence, # but it could be a decimal value. outconverter = FormatStylePlaceholderCursor._output_number_converter else: # FLOAT column: binary-precision floating point. # This comes from FloatField columns. outconverter = float elif precision > 0: # NUMBER(p,s) column: decimal-precision fixed point. # This comes from IntegerField and DecimalField columns. outconverter = FormatStylePlaceholderCursor._get_decimal_converter(precision, scale) else: # No type information. This normally comes from a # mathematical expression in the SELECT list. Guess int # or Decimal based on whether it has a decimal point. outconverter = FormatStylePlaceholderCursor._output_number_converter return cursor.var( Database.STRING, size=255, arraysize=cursor.arraysize, outconverter=outconverter, ) def _format_params(self, params): try: return {k: OracleParam(v, self, True) for k, v in params.items()} except AttributeError: return tuple(OracleParam(p, self, True) for p in params) def _guess_input_sizes(self, params_list): # Try dict handling; if that fails, treat as sequence if hasattr(params_list[0], 'keys'): sizes = {} for params in params_list: for k, value in params.items(): if value.input_size: sizes[k] = value.input_size if sizes: self.setinputsizes(**sizes) else: # It's not a list of dicts; it's a list of sequences sizes = [None] * len(params_list[0]) for params in params_list: for i, value in enumerate(params): if value.input_size: sizes[i] = value.input_size if sizes: self.setinputsizes(*sizes) def _param_generator(self, params): # Try dict handling; if that fails, treat as sequence if hasattr(params, 'items'): return {k: v.force_bytes for k, v in params.items()} else: return [p.force_bytes for p in params] def _fix_for_params(self, query, params, unify_by_values=False): # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(';') or query.endswith('/'): query = query[:-1] if params is None: params = [] elif hasattr(params, 'keys'): # Handle params as dict args = {k: ":%s" % k for k in params} query = query % args elif unify_by_values and params: # Handle params as a dict with unified query parameters by their # values. It can be used only in single query execute() because # executemany() shares the formatted query with each of the params # list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75] # params_dict = {0.75: ':arg0', 2: ':arg1', 'sth': ':arg2'} # args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0'] # params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'} params_dict = {param: ':arg%d' % i for i, param in enumerate(set(params))} args = [params_dict[param] for param in params] params = {value: key for key, value in params_dict.items()} query = query % tuple(args) else: # Handle params as sequence args = [(':arg%d' % i) for i in range(len(params))] query = query % tuple(args) return query, self._format_params(params) def execute(self, query, params=None): query, params = self._fix_for_params(query, params, unify_by_values=True) self._guess_input_sizes([params]) with wrap_oracle_errors(): return self.cursor.execute(query, self._param_generator(params)) def executemany(self, query, params=None): if not params: # No params given, nothing to do return None # uniform treatment for sequences and iterables params_iter = iter(params) query, firstparams = self._fix_for_params(query, next(params_iter)) # we build a list of formatted params; as we're going to traverse it # more than once, we can't make it lazy by using a generator formatted = [firstparams] + [self._format_params(p) for p in params_iter] self._guess_input_sizes(formatted) with wrap_oracle_errors(): return self.cursor.executemany(query, [self._param_generator(p) for p in formatted]) def close(self): try: self.cursor.close() except Database.InterfaceError: # already closed pass def var(self, *args): return VariableWrapper(self.cursor.var(*args)) def arrayvar(self, *args): return VariableWrapper(self.cursor.arrayvar(*args)) def __getattr__(self, attr): return getattr(self.cursor, attr) def __iter__(self): return iter(self.cursor)
a18d5d9a821e2e1aa97068b0c9f30b60f2de0b3cd666d0746eeeab37d1a037ef
import datetime import uuid from functools import lru_cache from django.conf import settings from django.db import DatabaseError from django.db.backends.base.operations import BaseDatabaseOperations from django.db.backends.utils import strip_quotes, truncate_name from django.db.models import AutoField, Exists, ExpressionWrapper from django.db.models.expressions import RawSQL from django.db.models.sql.where import WhereNode from django.utils import timezone from django.utils.encoding import force_bytes, force_str from django.utils.functional import cached_property from django.utils.regex_helper import _lazy_re_compile from .base import Database from .utils import BulkInsertMapper, InsertVar, Oracle_datetime class DatabaseOperations(BaseDatabaseOperations): # Oracle uses NUMBER(5), NUMBER(11), and NUMBER(19) for integer fields. # SmallIntegerField uses NUMBER(11) instead of NUMBER(5), which is used by # SmallAutoField, to preserve backward compatibility. integer_field_ranges = { 'SmallIntegerField': (-99999999999, 99999999999), 'IntegerField': (-99999999999, 99999999999), 'BigIntegerField': (-9999999999999999999, 9999999999999999999), 'PositiveBigIntegerField': (0, 9999999999999999999), 'PositiveSmallIntegerField': (0, 99999999999), 'PositiveIntegerField': (0, 99999999999), 'SmallAutoField': (-99999, 99999), 'AutoField': (-99999999999, 99999999999), 'BigAutoField': (-9999999999999999999, 9999999999999999999), } set_operators = {**BaseDatabaseOperations.set_operators, 'difference': 'MINUS'} # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc. _sequence_reset_sql = """ DECLARE table_value integer; seq_value integer; seq_name user_tab_identity_cols.sequence_name%%TYPE; BEGIN BEGIN SELECT sequence_name INTO seq_name FROM user_tab_identity_cols WHERE table_name = '%(table_name)s' AND column_name = '%(column_name)s'; EXCEPTION WHEN NO_DATA_FOUND THEN seq_name := '%(no_autofield_sequence_name)s'; END; SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s; SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences WHERE sequence_name = seq_name; WHILE table_value > seq_value LOOP EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL' INTO seq_value; END LOOP; END; /""" # Oracle doesn't support string without precision; use the max string size. cast_char_field_without_max_length = 'NVARCHAR2(2000)' cast_data_types = { 'AutoField': 'NUMBER(11)', 'BigAutoField': 'NUMBER(19)', 'SmallAutoField': 'NUMBER(5)', 'TextField': cast_char_field_without_max_length, } def cache_key_culling_sql(self): return 'SELECT cache_key FROM %s ORDER BY cache_key OFFSET %%s ROWS FETCH FIRST 1 ROWS ONLY' def date_extract_sql(self, lookup_type, field_name): if lookup_type == 'week_day': # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. return "TO_CHAR(%s, 'D')" % field_name elif lookup_type == 'iso_week_day': return "TO_CHAR(%s - 1, 'D')" % field_name elif lookup_type == 'week': # IW = ISO week number return "TO_CHAR(%s, 'IW')" % field_name elif lookup_type == 'quarter': return "TO_CHAR(%s, 'Q')" % field_name elif lookup_type == 'iso_year': return "TO_CHAR(%s, 'IYYY')" % field_name else: # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/EXTRACT-datetime.html return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': return "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': return "TRUNC(%s, 'IW')" % field_name else: return "TRUNC(%s)" % field_name # Oracle crashes with "ORA-03113: end-of-file on communication channel" # if the time zone name is passed in parameter. Use interpolation instead. # https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ # This regexp matches all time zone names from the zoneinfo database. _tzname_re = _lazy_re_compile(r'^[\w/:+-]+$') def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname[tzname.find('+'):] elif '-' in tzname: return tzname[tzname.find('-'):] return tzname def _convert_field_to_tz(self, field_name, tzname): if not settings.USE_TZ: return field_name if not self._tzname_re.match(tzname): raise ValueError("Invalid time zone name: %s" % tzname) # Convert from connection timezone to the local time, returning # TIMESTAMP WITH TIME ZONE and cast it back to TIMESTAMP to strip the # TIME ZONE details. if self.connection.timezone_name != tzname: return "CAST((FROM_TZ(%s, '%s') AT TIME ZONE '%s') AS TIMESTAMP)" % ( field_name, self.connection.timezone_name, self._prepare_tzname_delta(tzname), ) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return 'TRUNC(%s)' % field_name def datetime_cast_time_sql(self, field_name, tzname): # Since `TimeField` values are stored as TIMESTAMP where only the date # part is ignored, convert the field to the specified timezone. return self._convert_field_to_tz(field_name, tzname) def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': sql = "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': sql = "TRUNC(%s, 'IW')" % field_name elif lookup_type == 'day': sql = "TRUNC(%s)" % field_name elif lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name else: sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def time_trunc_sql(self, lookup_type, field_name): # The implementation is similar to `datetime_trunc_sql` as both # `DateTimeField` and `TimeField` are stored as TIMESTAMP where # the date part of the later is ignored. if lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name elif lookup_type == 'second': sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'TextField': converters.append(self.convert_textfield_value) elif internal_type == 'BinaryField': converters.append(self.convert_binaryfield_value) elif internal_type in ['BooleanField', 'NullBooleanField']: converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) # Oracle stores empty strings as null. If the field accepts the empty # string, undo this to adhere to the Django convention of using # the empty string instead of null. if expression.field.empty_strings_allowed: converters.append( self.convert_empty_bytes if internal_type == 'BinaryField' else self.convert_empty_string ) return converters def convert_textfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = value.read() return value def convert_binaryfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = force_bytes(value.read()) return value def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value # cx_Oracle always returns datetime.datetime objects for # DATE and TIMESTAMP columns, but Django wants to see a # python datetime.date, .time, or .datetime. def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.date() return value def convert_timefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.time() return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value @staticmethod def convert_empty_string(value, expression, connection): return '' if value is None else value @staticmethod def convert_empty_bytes(value, expression, connection): return b'' if value is None else value def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_columns(self, cursor, returning_params): for param in returning_params: value = param.get_value() if value is None or value == []: # cx_Oracle < 6.3 returns None, >= 6.3 returns empty list. raise DatabaseError( 'The database did not return a new row id. Probably ' '"ORA-1403: no data found" was raised internally but was ' 'hidden by the Oracle OCI library (see ' 'https://code.djangoproject.com/ticket/28859).' ) # cx_Oracle < 7 returns value, >= 7 returns list with single value. yield value[0] if isinstance(value, list) else value def field_cast_sql(self, db_type, internal_type): if db_type and db_type.endswith('LOB'): return "DBMS_LOB.SUBSTR(%s)" else: return "%s" def no_limit_value(self): return None def limit_offset_sql(self, low_mark, high_mark): fetch, offset = self._get_limit_offset_params(low_mark, high_mark) return ' '.join(sql for sql in ( ('OFFSET %d ROWS' % offset) if offset else None, ('FETCH FIRST %d ROWS ONLY' % fetch) if fetch else None, ) if sql) def last_executed_query(self, cursor, sql, params): # https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement # The DB API definition does not define this attribute. statement = cursor.statement # Unlike Psycopg's `query` and MySQLdb`'s `_executed`, cx_Oracle's # `statement` doesn't contain the query parameters. Substitute # parameters manually. if isinstance(params, (tuple, list)): for i, param in enumerate(params): statement = statement.replace(':arg%d' % i, force_str(param, errors='replace')) elif isinstance(params, dict): for key, param in params.items(): statement = statement.replace(':%s' % key, force_str(param, errors='replace')) return statement def last_insert_id(self, cursor, table_name, pk_name): sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name) cursor.execute('"%s".currval' % sq_name) return cursor.fetchone()[0] def lookup_cast(self, lookup_type, internal_type=None): if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): return "UPPER(%s)" return "%s" def max_in_list_size(self): return 1000 def max_name_length(self): return 30 def pk_default_value(self): return "NULL" def prep_for_iexact_query(self, x): return x def process_clob(self, value): if value is None: return '' return value.read() def quote_name(self, name): # SQL92 requires delimited (quoted) names to be case-sensitive. When # not quoted, Oracle has case-insensitive behavior for identifiers, but # always defaults to uppercase. # We simplify things by making Oracle identifiers always uppercase. if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % truncate_name(name.upper(), self.max_name_length()) # Oracle puts the query text into a (query % args) construct, so % signs # in names need to be escaped. The '%%' will be collapsed back to '%' at # that stage so we aren't really making the name longer here. name = name.replace('%', '%%') return name.upper() def random_function_sql(self): return "DBMS_RANDOM.RANDOM" def regex_lookup(self, lookup_type): if lookup_type == 'regex': match_option = "'c'" else: match_option = "'i'" return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option def return_insert_columns(self, fields): if not fields: return '', () field_names = [] params = [] for field in fields: field_names.append('%s.%s' % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), )) params.append(InsertVar(field)) return 'RETURNING %s INTO %s' % ( ', '.join(field_names), ', '.join(['%s'] * len(params)), ), tuple(params) def __foreign_key_constraints(self, table_name, recursive): with self.connection.cursor() as cursor: if recursive: cursor.execute(""" SELECT user_tables.table_name, rcons.constraint_name FROM user_tables JOIN user_constraints cons ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U')) LEFT JOIN user_constraints rcons ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R') START WITH user_tables.table_name = UPPER(%s) CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name GROUP BY user_tables.table_name, rcons.constraint_name HAVING user_tables.table_name != UPPER(%s) ORDER BY MAX(level) DESC """, (table_name, table_name)) else: cursor.execute(""" SELECT cons.table_name, cons.constraint_name FROM user_constraints cons WHERE cons.constraint_type = 'R' AND cons.table_name = UPPER(%s) """, (table_name,)) return cursor.fetchall() @cached_property def _foreign_key_constraints(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__foreign_key_constraints) def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables: truncated_tables = {table.upper() for table in tables} constraints = set() # Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE # foreign keys which Django doesn't define. Emulate the # PostgreSQL behavior which truncates all dependent tables by # manually retrieving all foreign key constraints and resolving # dependencies. for table in tables: for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade): if allow_cascade: truncated_tables.add(foreign_table) constraints.add((foreign_table, constraint)) sql = [ "%s %s %s %s %s %s %s %s;" % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('DISABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), style.SQL_KEYWORD('KEEP'), style.SQL_KEYWORD('INDEX'), ) for table, constraint in constraints ] + [ "%s %s %s;" % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), ) for table in truncated_tables ] + [ "%s %s %s %s %s %s;" % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('ENABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), ) for table, constraint in constraints ] # Since we've just deleted all the rows, running our sequence # ALTER code will reset the sequence to 0. sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): sql = [] for sequence_info in sequences: no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table']) table = self.quote_name(sequence_info['table']) column = self.quote_name(sequence_info['column'] or 'id') query = self._sequence_reset_sql % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), } sql.append(query) return sql def sequence_reset_sql(self, style, model_list): output = [] query = self._sequence_reset_sql for model in model_list: for f in model._meta.local_fields: if isinstance(f, AutoField): no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table) table = self.quote_name(model._meta.db_table) column = self.quote_name(f.column) output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), }) # Only one AutoField is allowed per model, so don't # continue to loop break for f in model._meta.many_to_many: if not f.remote_field.through: no_autofield_sequence_name = self._get_no_autofield_sequence_name(f.m2m_db_table()) table = self.quote_name(f.m2m_db_table()) column = self.quote_name('id') output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': 'ID', }) return output def start_transaction_sql(self): return '' def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. The default implementation transforms the date to text, but that is not necessary for Oracle. """ return value def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. If naive datetime is passed assumes that is in UTC. Normally Django models.DateTimeField makes sure that if USE_TZ is True passed datetime is timezone aware. """ if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # cx_Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.") return Oracle_datetime.from_datetime(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value if isinstance(value, str): return datetime.datetime.strptime(value, '%H:%M:%S') # Oracle doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("Oracle backend does not support timezone-aware times.") return Oracle_datetime(1900, 1, 1, value.hour, value.minute, value.second, value.microsecond) def combine_expression(self, connector, sub_expressions): lhs, rhs = sub_expressions if connector == '%%': return 'MOD(%s)' % ','.join(sub_expressions) elif connector == '&': return 'BITAND(%s)' % ','.join(sub_expressions) elif connector == '|': return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs} elif connector == '<<': return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '>>': return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '^': return 'POWER(%s)' % ','.join(sub_expressions) return super().combine_expression(connector, sub_expressions) def _get_no_autofield_sequence_name(self, table): """ Manually created sequence name to keep backward compatibility for AutoFields that aren't Oracle identity columns. """ name_length = self.max_name_length() - 3 return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper() def _get_sequence_name(self, cursor, table, pk_name): cursor.execute(""" SELECT sequence_name FROM user_tab_identity_cols WHERE table_name = UPPER(%s) AND column_name = UPPER(%s)""", [table, pk_name]) row = cursor.fetchone() return self._get_no_autofield_sequence_name(table) if row is None else row[0] def bulk_insert_sql(self, fields, placeholder_rows): query = [] for row in placeholder_rows: select = [] for i, placeholder in enumerate(row): # A model without any fields has fields=[None]. if fields[i]: internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type() placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder # Add columns aliases to the first select to avoid "ORA-00918: # column ambiguously defined" when two or more columns in the # first select have the same value. if not query: placeholder = '%s col_%s' % (placeholder, i) select.append(placeholder) query.append('SELECT %s FROM DUAL' % ', '.join(select)) # Bulk insert to tables with Oracle identity columns causes Oracle to # add sequence.nextval to it. Sequence.nextval cannot be used with the # UNION operator. To prevent incorrect SQL, move UNION to a subquery. return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query) def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) return "NUMTODSINTERVAL(TO_NUMBER(%s - %s), 'DAY')" % (lhs_sql, rhs_sql), params return super().subtract_temporals(internal_type, lhs, rhs) def bulk_batch_size(self, fields, objs): """Oracle restricts the number of parameters in a query.""" if fields: return self.connection.features.max_query_params // len(fields) return len(objs) def conditional_expression_supported_in_where_clause(self, expression): """ Oracle supports only EXISTS(...) or filters in the WHERE clause, others must be compared with True. """ if isinstance(expression, (Exists, WhereNode)): return True if isinstance(expression, ExpressionWrapper) and expression.conditional: return self.conditional_expression_supported_in_where_clause(expression.expression) if isinstance(expression, RawSQL) and expression.conditional: return True return False
d8b4b6e9b5e92f3e3235827585bcb44ef77cd3f68fa3ff64c3ef9bd37b78bb7a
import sys from django.conf import settings from django.db import DatabaseError from django.db.backends.base.creation import BaseDatabaseCreation from django.utils.crypto import get_random_string from django.utils.functional import cached_property TEST_DATABASE_PREFIX = 'test_' class DatabaseCreation(BaseDatabaseCreation): @cached_property def _maindb_connection(self): """ This is analogous to other backends' `_nodb_connection` property, which allows access to an "administrative" connection which can be used to manage the test databases. For Oracle, the only connection that can be used for that purpose is the main (non-test) connection. """ settings_dict = settings.DATABASES[self.connection.alias] user = settings_dict.get('SAVED_USER') or settings_dict['USER'] password = settings_dict.get('SAVED_PASSWORD') or settings_dict['PASSWORD'] settings_dict = {**settings_dict, 'USER': user, 'PASSWORD': password} DatabaseWrapper = type(self.connection) return DatabaseWrapper(settings_dict, alias=self.connection.alias) def _create_test_db(self, verbosity=1, autoclobber=False, keepdb=False): parameters = self._get_test_db_params() with self._maindb_connection.cursor() as cursor: if self._test_database_create(): try: self._execute_test_db_creation(cursor, parameters, verbosity, keepdb) except Exception as e: if 'ORA-01543' not in str(e): # All errors except "tablespace already exists" cancel tests self.log('Got an error creating the test database: %s' % e) sys.exit(2) if not autoclobber: confirm = input( "It appears the test database, %s, already exists. " "Type 'yes' to delete it, or 'no' to cancel: " % parameters['user']) if autoclobber or confirm == 'yes': if verbosity >= 1: self.log("Destroying old test database for alias '%s'..." % self.connection.alias) try: self._execute_test_db_destruction(cursor, parameters, verbosity) except DatabaseError as e: if 'ORA-29857' in str(e): self._handle_objects_preventing_db_destruction(cursor, parameters, verbosity, autoclobber) else: # Ran into a database error that isn't about leftover objects in the tablespace self.log('Got an error destroying the old test database: %s' % e) sys.exit(2) except Exception as e: self.log('Got an error destroying the old test database: %s' % e) sys.exit(2) try: self._execute_test_db_creation(cursor, parameters, verbosity, keepdb) except Exception as e: self.log('Got an error recreating the test database: %s' % e) sys.exit(2) else: self.log('Tests cancelled.') sys.exit(1) if self._test_user_create(): if verbosity >= 1: self.log('Creating test user...') try: self._create_test_user(cursor, parameters, verbosity, keepdb) except Exception as e: if 'ORA-01920' not in str(e): # All errors except "user already exists" cancel tests self.log('Got an error creating the test user: %s' % e) sys.exit(2) if not autoclobber: confirm = input( "It appears the test user, %s, already exists. Type " "'yes' to delete it, or 'no' to cancel: " % parameters['user']) if autoclobber or confirm == 'yes': try: if verbosity >= 1: self.log('Destroying old test user...') self._destroy_test_user(cursor, parameters, verbosity) if verbosity >= 1: self.log('Creating test user...') self._create_test_user(cursor, parameters, verbosity, keepdb) except Exception as e: self.log('Got an error recreating the test user: %s' % e) sys.exit(2) else: self.log('Tests cancelled.') sys.exit(1) self._maindb_connection.close() # done with main user -- test user and tablespaces created self._switch_to_test_user(parameters) return self.connection.settings_dict['NAME'] def _switch_to_test_user(self, parameters): """ Switch to the user that's used for creating the test database. Oracle doesn't have the concept of separate databases under the same user, so a separate user is used; see _create_test_db(). The main user is also needed for cleanup when testing is completed, so save its credentials in the SAVED_USER/SAVED_PASSWORD key in the settings dict. """ real_settings = settings.DATABASES[self.connection.alias] real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = \ self.connection.settings_dict['USER'] real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = \ self.connection.settings_dict['PASSWORD'] real_test_settings = real_settings['TEST'] test_settings = self.connection.settings_dict['TEST'] real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = \ self.connection.settings_dict['USER'] = parameters['user'] real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = parameters['password'] def set_as_test_mirror(self, primary_settings_dict): """ Set this database up to be used in testing as a mirror of a primary database whose settings are given. """ self.connection.settings_dict['USER'] = primary_settings_dict['USER'] self.connection.settings_dict['PASSWORD'] = primary_settings_dict['PASSWORD'] def _handle_objects_preventing_db_destruction(self, cursor, parameters, verbosity, autoclobber): # There are objects in the test tablespace which prevent dropping it # The easy fix is to drop the test user -- but are we allowed to do so? self.log( 'There are objects in the old test database which prevent its destruction.\n' 'If they belong to the test user, deleting the user will allow the test ' 'database to be recreated.\n' 'Otherwise, you will need to find and remove each of these objects, ' 'or use a different tablespace.\n' ) if self._test_user_create(): if not autoclobber: confirm = input("Type 'yes' to delete user %s: " % parameters['user']) if autoclobber or confirm == 'yes': try: if verbosity >= 1: self.log('Destroying old test user...') self._destroy_test_user(cursor, parameters, verbosity) except Exception as e: self.log('Got an error destroying the test user: %s' % e) sys.exit(2) try: if verbosity >= 1: self.log("Destroying old test database for alias '%s'..." % self.connection.alias) self._execute_test_db_destruction(cursor, parameters, verbosity) except Exception as e: self.log('Got an error destroying the test database: %s' % e) sys.exit(2) else: self.log('Tests cancelled -- test database cannot be recreated.') sys.exit(1) else: self.log("Django is configured to use pre-existing test user '%s'," " and will not attempt to delete it." % parameters['user']) self.log('Tests cancelled -- test database cannot be recreated.') sys.exit(1) def _destroy_test_db(self, test_database_name, verbosity=1): """ Destroy a test database, prompting the user for confirmation if the database already exists. Return the name of the test database created. """ self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER'] self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] self.connection.close() parameters = self._get_test_db_params() with self._maindb_connection.cursor() as cursor: if self._test_user_create(): if verbosity >= 1: self.log('Destroying test user...') self._destroy_test_user(cursor, parameters, verbosity) if self._test_database_create(): if verbosity >= 1: self.log('Destroying test database tables...') self._execute_test_db_destruction(cursor, parameters, verbosity) self._maindb_connection.close() def _execute_test_db_creation(self, cursor, parameters, verbosity, keepdb=False): if verbosity >= 2: self.log('_create_test_db(): dbname = %s' % parameters['user']) if self._test_database_oracle_managed_files(): statements = [ """ CREATE TABLESPACE %(tblspace)s DATAFILE SIZE %(size)s AUTOEXTEND ON NEXT %(extsize)s MAXSIZE %(maxsize)s """, """ CREATE TEMPORARY TABLESPACE %(tblspace_temp)s TEMPFILE SIZE %(size_tmp)s AUTOEXTEND ON NEXT %(extsize_tmp)s MAXSIZE %(maxsize_tmp)s """, ] else: statements = [ """ CREATE TABLESPACE %(tblspace)s DATAFILE '%(datafile)s' SIZE %(size)s REUSE AUTOEXTEND ON NEXT %(extsize)s MAXSIZE %(maxsize)s """, """ CREATE TEMPORARY TABLESPACE %(tblspace_temp)s TEMPFILE '%(datafile_tmp)s' SIZE %(size_tmp)s REUSE AUTOEXTEND ON NEXT %(extsize_tmp)s MAXSIZE %(maxsize_tmp)s """, ] # Ignore "tablespace already exists" error when keepdb is on. acceptable_ora_err = 'ORA-01543' if keepdb else None self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err) def _create_test_user(self, cursor, parameters, verbosity, keepdb=False): if verbosity >= 2: self.log('_create_test_user(): username = %s' % parameters['user']) statements = [ """CREATE USER %(user)s IDENTIFIED BY "%(password)s" DEFAULT TABLESPACE %(tblspace)s TEMPORARY TABLESPACE %(tblspace_temp)s QUOTA UNLIMITED ON %(tblspace)s """, """GRANT CREATE SESSION, CREATE TABLE, CREATE SEQUENCE, CREATE PROCEDURE, CREATE TRIGGER TO %(user)s""", ] # Ignore "user already exists" error when keepdb is on acceptable_ora_err = 'ORA-01920' if keepdb else None success = self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err) # If the password was randomly generated, change the user accordingly. if not success and self._test_settings_get('PASSWORD') is None: set_password = 'ALTER USER %(user)s IDENTIFIED BY "%(password)s"' self._execute_statements(cursor, [set_password], parameters, verbosity) # Most test suites can be run without "create view" and # "create materialized view" privileges. But some need it. for object_type in ('VIEW', 'MATERIALIZED VIEW'): extra = 'GRANT CREATE %(object_type)s TO %(user)s' parameters['object_type'] = object_type success = self._execute_allow_fail_statements(cursor, [extra], parameters, verbosity, 'ORA-01031') if not success and verbosity >= 2: self.log('Failed to grant CREATE %s permission to test user. This may be ok.' % object_type) def _execute_test_db_destruction(self, cursor, parameters, verbosity): if verbosity >= 2: self.log('_execute_test_db_destruction(): dbname=%s' % parameters['user']) statements = [ 'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS', 'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS', ] self._execute_statements(cursor, statements, parameters, verbosity) def _destroy_test_user(self, cursor, parameters, verbosity): if verbosity >= 2: self.log('_destroy_test_user(): user=%s' % parameters['user']) self.log('Be patient. This can take some time...') statements = [ 'DROP USER %(user)s CASCADE', ] self._execute_statements(cursor, statements, parameters, verbosity) def _execute_statements(self, cursor, statements, parameters, verbosity, allow_quiet_fail=False): for template in statements: stmt = template % parameters if verbosity >= 2: print(stmt) try: cursor.execute(stmt) except Exception as err: if (not allow_quiet_fail) or verbosity >= 2: self.log('Failed (%s)' % (err)) raise def _execute_allow_fail_statements(self, cursor, statements, parameters, verbosity, acceptable_ora_err): """ Execute statements which are allowed to fail silently if the Oracle error code given by `acceptable_ora_err` is raised. Return True if the statements execute without an exception, or False otherwise. """ try: # Statement can fail when acceptable_ora_err is not None allow_quiet_fail = acceptable_ora_err is not None and len(acceptable_ora_err) > 0 self._execute_statements(cursor, statements, parameters, verbosity, allow_quiet_fail=allow_quiet_fail) return True except DatabaseError as err: description = str(err) if acceptable_ora_err is None or acceptable_ora_err not in description: raise return False def _get_test_db_params(self): return { 'dbname': self._test_database_name(), 'user': self._test_database_user(), 'password': self._test_database_passwd(), 'tblspace': self._test_database_tblspace(), 'tblspace_temp': self._test_database_tblspace_tmp(), 'datafile': self._test_database_tblspace_datafile(), 'datafile_tmp': self._test_database_tblspace_tmp_datafile(), 'maxsize': self._test_database_tblspace_maxsize(), 'maxsize_tmp': self._test_database_tblspace_tmp_maxsize(), 'size': self._test_database_tblspace_size(), 'size_tmp': self._test_database_tblspace_tmp_size(), 'extsize': self._test_database_tblspace_extsize(), 'extsize_tmp': self._test_database_tblspace_tmp_extsize(), } def _test_settings_get(self, key, default=None, prefixed=None): """ Return a value from the test settings dict, or a given default, or a prefixed entry from the main settings dict. """ settings_dict = self.connection.settings_dict val = settings_dict['TEST'].get(key, default) if val is None and prefixed: val = TEST_DATABASE_PREFIX + settings_dict[prefixed] return val def _test_database_name(self): return self._test_settings_get('NAME', prefixed='NAME') def _test_database_create(self): return self._test_settings_get('CREATE_DB', default=True) def _test_user_create(self): return self._test_settings_get('CREATE_USER', default=True) def _test_database_user(self): return self._test_settings_get('USER', prefixed='USER') def _test_database_passwd(self): password = self._test_settings_get('PASSWORD') if password is None and self._test_user_create(): # Oracle passwords are limited to 30 chars and can't contain symbols. password = get_random_string(length=30) return password def _test_database_tblspace(self): return self._test_settings_get('TBLSPACE', prefixed='USER') def _test_database_tblspace_tmp(self): settings_dict = self.connection.settings_dict return settings_dict['TEST'].get('TBLSPACE_TMP', TEST_DATABASE_PREFIX + settings_dict['USER'] + '_temp') def _test_database_tblspace_datafile(self): tblspace = '%s.dbf' % self._test_database_tblspace() return self._test_settings_get('DATAFILE', default=tblspace) def _test_database_tblspace_tmp_datafile(self): tblspace = '%s.dbf' % self._test_database_tblspace_tmp() return self._test_settings_get('DATAFILE_TMP', default=tblspace) def _test_database_tblspace_maxsize(self): return self._test_settings_get('DATAFILE_MAXSIZE', default='500M') def _test_database_tblspace_tmp_maxsize(self): return self._test_settings_get('DATAFILE_TMP_MAXSIZE', default='500M') def _test_database_tblspace_size(self): return self._test_settings_get('DATAFILE_SIZE', default='50M') def _test_database_tblspace_tmp_size(self): return self._test_settings_get('DATAFILE_TMP_SIZE', default='50M') def _test_database_tblspace_extsize(self): return self._test_settings_get('DATAFILE_EXTSIZE', default='25M') def _test_database_tblspace_tmp_extsize(self): return self._test_settings_get('DATAFILE_TMP_EXTSIZE', default='25M') def _test_database_oracle_managed_files(self): return self._test_settings_get('ORACLE_MANAGED_FILES', default=False) def _get_test_db_name(self): """ Return the 'production' DB name to get the test DB creation machinery to work. This isn't a great deal in this case because DB names as handled by Django don't have real counterparts in Oracle. """ return self.connection.settings_dict['NAME'] def test_db_signature(self): settings_dict = self.connection.settings_dict return ( settings_dict['HOST'], settings_dict['PORT'], settings_dict['ENGINE'], settings_dict['NAME'], self._test_database_user(), )
df607af77089d9b890bc9658bbe5db439950740231434b0693ab782258be52c7
from django.db import ProgrammingError from django.utils.functional import cached_property class BaseDatabaseFeatures: gis_enabled = False allows_group_by_pk = False allows_group_by_selected_pks = False empty_fetchmany_value = [] update_can_self_select = True # Does the backend distinguish between '' and None? interprets_empty_strings_as_nulls = False # Does the backend allow inserting duplicate NULL rows in a nullable # unique field? All core backends implement this correctly, but other # databases such as SQL Server do not. supports_nullable_unique_constraints = True # Does the backend allow inserting duplicate rows when a unique_together # constraint exists and some fields are nullable but not all of them? supports_partially_nullable_unique_constraints = True can_use_chunked_reads = True can_return_columns_from_insert = False can_return_rows_from_bulk_insert = False has_bulk_insert = True uses_savepoints = True can_release_savepoints = False # If True, don't use integer foreign keys referring to, e.g., positive # integer primary keys. related_fields_match_type = False allow_sliced_subqueries_with_in = True has_select_for_update = False has_select_for_update_nowait = False has_select_for_update_skip_locked = False has_select_for_update_of = False # Does the database's SELECT FOR UPDATE OF syntax require a column rather # than a table? select_for_update_of_column = False # Does the default test database allow multiple connections? # Usually an indication that the test database is in-memory test_db_allows_multiple_connections = True # Can an object be saved without an explicit primary key? supports_unspecified_pk = False # Can a fixture contain forward references? i.e., are # FK constraints checked at the end of transaction, or # at the end of each save operation? supports_forward_references = True # Does the backend truncate names properly when they are too long? truncates_names = False # Is there a REAL datatype in addition to floats/doubles? has_real_datatype = False supports_subqueries_in_group_by = True # Is there a true datatype for uuid? has_native_uuid_field = False # Is there a true datatype for timedeltas? has_native_duration_field = False # Does the database driver supports same type temporal data subtraction # by returning the type used to store duration field? supports_temporal_subtraction = False # Does the __regex lookup support backreferencing and grouping? supports_regex_backreferencing = True # Can date/datetime lookups be performed using a string? supports_date_lookup_using_string = True # Can datetimes with timezones be used? supports_timezones = True # Does the database have a copy of the zoneinfo database? has_zoneinfo_database = True # When performing a GROUP BY, is an ORDER BY NULL required # to remove any ordering? requires_explicit_null_ordering_when_grouping = False # Does the backend order NULL values as largest or smallest? nulls_order_largest = False # Does the backend support NULLS FIRST and NULLS LAST in ORDER BY? supports_order_by_nulls_modifier = True # The database's limit on the number of query parameters. max_query_params = None # Can an object have an autoincrement primary key of 0? MySQL says No. allows_auto_pk_0 = True # Do we need to NULL a ForeignKey out, or can the constraint check be # deferred can_defer_constraint_checks = False # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas supports_mixed_date_datetime_comparisons = True # Does the backend support tablespaces? Default to False because it isn't # in the SQL standard. supports_tablespaces = False # Does the backend reset sequences between tests? supports_sequence_reset = True # Can the backend introspect the default value of a column? can_introspect_default = True # Confirm support for introspected foreign keys # Every database can do this reliably, except MySQL, # which can't do it for MyISAM tables can_introspect_foreign_keys = True # Can the backend introspect an AutoField, instead of an IntegerField? can_introspect_autofield = False # Can the backend introspect a BigIntegerField, instead of an IntegerField? can_introspect_big_integer_field = True # Can the backend introspect an BinaryField, instead of an TextField? can_introspect_binary_field = True # Can the backend introspect an DecimalField, instead of an FloatField? can_introspect_decimal_field = True # Can the backend introspect a DurationField, instead of a BigIntegerField? can_introspect_duration_field = True # Can the backend introspect an IPAddressField, instead of an CharField? can_introspect_ip_address_field = False # Can the backend introspect a PositiveIntegerField, instead of an IntegerField? can_introspect_positive_integer_field = False # Can the backend introspect a SmallIntegerField, instead of an IntegerField? can_introspect_small_integer_field = False # Can the backend introspect a TimeField, instead of a DateTimeField? can_introspect_time_field = True # Some backends may not be able to differentiate BigAutoField or # SmallAutoField from other fields such as AutoField. introspected_big_auto_field_type = 'BigAutoField' introspected_small_auto_field_type = 'SmallAutoField' # Some backends may not be able to differentiate BooleanField from other # fields such as IntegerField. introspected_boolean_field_type = 'BooleanField' # Can the backend introspect the column order (ASC/DESC) for indexes? supports_index_column_ordering = True # Does the backend support introspection of materialized views? can_introspect_materialized_views = False # Support for the DISTINCT ON clause can_distinct_on_fields = False # Does the backend prevent running SQL queries in broken transactions? atomic_transactions = True # Can we roll back DDL in a transaction? can_rollback_ddl = False # Does it support operations requiring references rename in a transaction? supports_atomic_references_rename = True # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? supports_combined_alters = False # Does it support foreign keys? supports_foreign_keys = True # Can it create foreign key constraints inline when adding columns? can_create_inline_fk = True # Does it support CHECK constraints? supports_column_check_constraints = True supports_table_check_constraints = True # Does the backend support introspection of CHECK constraints? can_introspect_check_constraints = True # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) # parameter passing? Note this can be provided by the backend even if not # supported by the Python driver supports_paramstyle_pyformat = True # Does the backend require literal defaults, rather than parameterized ones? requires_literal_defaults = False # Does the backend require a connection reset after each material schema change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? closed_cursor_error_class = ProgrammingError # Does 'a' LIKE 'A' match? has_case_insensitive_like = True # Suffix for backends that don't support "SELECT xxx;" queries. bare_select_suffix = '' # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False # Does the backend support "select for update" queries with limit (and offset)? supports_select_for_update_with_limit = True # Does the backend ignore null expressions in GREATEST and LEAST queries unless # every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? # Defaults to False to allow third-party backends to opt-in. can_clone_databases = False # Does the backend consider table names with different casing to # be equal? ignores_table_name_case = False # Place FOR UPDATE right after FROM clause. Used on MSSQL. for_update_after_from = False # Combinatorial flags supports_select_union = True supports_select_intersection = True supports_select_difference = True supports_slicing_ordering_in_compound = False supports_parentheses_in_compound = True # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate # expressions? supports_aggregate_filter_clause = False # Does the backend support indexing a TextField? supports_index_on_text_field = True # Does the backend support window expressions (expression OVER (...))? supports_over_clause = False supports_frame_range_fixed_distance = False only_supports_unbounded_with_preceding_and_following = False # Does the backend support CAST with precision? supports_cast_with_precision = True # How many second decimals does the database return when casting a value to # a type with time? time_cast_precision = 6 # SQL to create a procedure for use by the Django test suite. The # functionality of the procedure isn't important. create_test_procedure_without_params_sql = None create_test_procedure_with_int_param_sql = None # Does the backend support keyword parameters for cursor.callproc()? supports_callproc_kwargs = False # Convert CharField results from bytes to str in database functions. db_functions_convert_bytes_to_str = False # What formats does the backend EXPLAIN syntax support? supported_explain_formats = set() # Does DatabaseOperations.explain_query_prefix() raise ValueError if # unknown kwargs are passed to QuerySet.explain()? validates_explain_options = True # Does the backend support the default parameter in lead() and lag()? supports_default_in_lead_lag = True # Does the backend support ignoring constraint or uniqueness errors during # INSERT? supports_ignore_conflicts = True # Does this backend require casting the results of CASE expressions used # in UPDATE statements to ensure the expression has the correct type? requires_casted_case_in_updates = False # Does the backend support partial indexes (CREATE INDEX ... WHERE ...)? supports_partial_indexes = True supports_functions_in_partial_indexes = True # Does the database allow more than one constraint or index on the same # field(s)? allows_multiple_constraints_on_same_fields = True # Does the backend support boolean expressions in the SELECT clause? supports_boolean_expr_in_select_clause = True def __init__(self, connection): self.connection = connection @cached_property def supports_explaining_query_execution(self): """Does this backend support explaining query execution?""" return self.connection.ops.explain_prefix is not None @cached_property def supports_transactions(self): """Confirm support for transactions.""" with self.connection.cursor() as cursor: cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') self.connection.set_autocommit(False) cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)') self.connection.rollback() self.connection.set_autocommit(True) cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST') count, = cursor.fetchone() cursor.execute('DROP TABLE ROLLBACK_TEST') return count == 0 def allows_group_by_selected_pks_on_model(self, model): if not self.allows_group_by_selected_pks: return False return model._meta.managed
79d432035129bee0aebe054862036dc31b55266c843d00f902186330649fb382
import copy import threading import time import warnings from collections import deque from contextlib import contextmanager import _thread import pytz from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import DEFAULT_DB_ALIAS, DatabaseError from django.db.backends import utils from django.db.backends.base.validation import BaseDatabaseValidation from django.db.backends.signals import connection_created from django.db.transaction import TransactionManagementError from django.db.utils import DatabaseErrorWrapper from django.utils import timezone from django.utils.asyncio import async_unsafe from django.utils.functional import cached_property NO_DB_ALIAS = '__no_db__' class BaseDatabaseWrapper: """Represent a database connection.""" # Mapping of Field objects to their column types. data_types = {} # Mapping of Field objects to their SQL suffix such as AUTOINCREMENT. data_types_suffix = {} # Mapping of Field objects to their SQL for CHECK constraints. data_type_check_constraints = {} ops = None vendor = 'unknown' display_name = 'unknown' SchemaEditorClass = None # Classes instantiated in __init__(). client_class = None creation_class = None features_class = None introspection_class = None ops_class = None validation_class = BaseDatabaseValidation queries_limit = 9000 def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS): # Connection related attributes. # The underlying database connection. self.connection = None # `settings_dict` should be a dictionary containing keys such as # NAME, USER, etc. It's called `settings_dict` instead of `settings` # to disambiguate it from Django settings modules. self.settings_dict = settings_dict self.alias = alias # Query logging in debug mode or when explicitly enabled. self.queries_log = deque(maxlen=self.queries_limit) self.force_debug_cursor = False # Transaction related attributes. # Tracks if the connection is in autocommit mode. Per PEP 249, by # default, it isn't. self.autocommit = False # Tracks if the connection is in a transaction managed by 'atomic'. self.in_atomic_block = False # Increment to generate unique savepoint ids. self.savepoint_state = 0 # List of savepoints created by 'atomic'. self.savepoint_ids = [] # Tracks if the outermost 'atomic' block should commit on exit, # ie. if autocommit was active on entry. self.commit_on_exit = True # Tracks if the transaction should be rolled back to the next # available savepoint because of an exception in an inner block. self.needs_rollback = False # Connection termination related attributes. self.close_at = None self.closed_in_transaction = False self.errors_occurred = False # Thread-safety related attributes. self._thread_sharing_lock = threading.Lock() self._thread_sharing_count = 0 self._thread_ident = _thread.get_ident() # A list of no-argument functions to run when the transaction commits. # Each entry is an (sids, func) tuple, where sids is a set of the # active savepoint IDs when this function was registered. self.run_on_commit = [] # Should we run the on-commit hooks the next time set_autocommit(True) # is called? self.run_commit_hooks_on_set_autocommit_on = False # A stack of wrappers to be invoked around execute()/executemany() # calls. Each entry is a function taking five arguments: execute, sql, # params, many, and context. It's the function's responsibility to # call execute(sql, params, many, context). self.execute_wrappers = [] self.client = self.client_class(self) self.creation = self.creation_class(self) self.features = self.features_class(self) self.introspection = self.introspection_class(self) self.ops = self.ops_class(self) self.validation = self.validation_class(self) def ensure_timezone(self): """ Ensure the connection's timezone is set to `self.timezone_name` and return whether it changed or not. """ return False @cached_property def timezone(self): """ Return a tzinfo of the database connection time zone. This is only used when time zone support is enabled. When a datetime is read from the database, it is always returned in this time zone. When the database backend supports time zones, it doesn't matter which time zone Django uses, as long as aware datetimes are used everywhere. Other users connecting to the database can choose their own time zone. When the database backend doesn't support time zones, the time zone Django uses may be constrained by the requirements of other users of the database. """ if not settings.USE_TZ: return None elif self.settings_dict['TIME_ZONE'] is None: return timezone.utc else: return pytz.timezone(self.settings_dict['TIME_ZONE']) @cached_property def timezone_name(self): """ Name of the time zone of the database connection. """ if not settings.USE_TZ: return settings.TIME_ZONE elif self.settings_dict['TIME_ZONE'] is None: return 'UTC' else: return self.settings_dict['TIME_ZONE'] @property def queries_logged(self): return self.force_debug_cursor or settings.DEBUG @property def queries(self): if len(self.queries_log) == self.queries_log.maxlen: warnings.warn( "Limit for query logging exceeded, only the last {} queries " "will be returned.".format(self.queries_log.maxlen)) return list(self.queries_log) # ##### Backend-specific methods for creating connections and cursors ##### def get_connection_params(self): """Return a dict of parameters suitable for get_new_connection.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method') def get_new_connection(self, conn_params): """Open a connection to the database.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method') def init_connection_state(self): """Initialize the database connection settings.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method') def create_cursor(self, name=None): """Create a cursor. Assume that a connection is established.""" raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method') # ##### Backend-specific methods for creating connections ##### @async_unsafe def connect(self): """Connect to the database. Assume that the connection is closed.""" # Check for invalid configurations. self.check_settings() # In case the previous connection was closed while in an atomic block self.in_atomic_block = False self.savepoint_ids = [] self.needs_rollback = False # Reset parameters defining when to close the connection max_age = self.settings_dict['CONN_MAX_AGE'] self.close_at = None if max_age is None else time.monotonic() + max_age self.closed_in_transaction = False self.errors_occurred = False # Establish the connection conn_params = self.get_connection_params() self.connection = self.get_new_connection(conn_params) self.set_autocommit(self.settings_dict['AUTOCOMMIT']) self.init_connection_state() connection_created.send(sender=self.__class__, connection=self) self.run_on_commit = [] def check_settings(self): if self.settings_dict['TIME_ZONE'] is not None and not settings.USE_TZ: raise ImproperlyConfigured( "Connection '%s' cannot set TIME_ZONE because USE_TZ is False." % self.alias ) @async_unsafe def ensure_connection(self): """Guarantee that a connection to the database is established.""" if self.connection is None: with self.wrap_database_errors: self.connect() # ##### Backend-specific wrappers for PEP-249 connection methods ##### def _prepare_cursor(self, cursor): """ Validate the connection is usable and perform database cursor wrapping. """ self.validate_thread_sharing() if self.queries_logged: wrapped_cursor = self.make_debug_cursor(cursor) else: wrapped_cursor = self.make_cursor(cursor) return wrapped_cursor def _cursor(self, name=None): self.ensure_connection() with self.wrap_database_errors: return self._prepare_cursor(self.create_cursor(name)) def _commit(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.commit() def _rollback(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.rollback() def _close(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.close() # ##### Generic wrappers for PEP-249 connection methods ##### @async_unsafe def cursor(self): """Create a cursor, opening a connection if necessary.""" return self._cursor() @async_unsafe def commit(self): """Commit a transaction and reset the dirty flag.""" self.validate_thread_sharing() self.validate_no_atomic_block() self._commit() # A successful commit means that the database connection works. self.errors_occurred = False self.run_commit_hooks_on_set_autocommit_on = True @async_unsafe def rollback(self): """Roll back a transaction and reset the dirty flag.""" self.validate_thread_sharing() self.validate_no_atomic_block() self._rollback() # A successful rollback means that the database connection works. self.errors_occurred = False self.needs_rollback = False self.run_on_commit = [] @async_unsafe def close(self): """Close the connection to the database.""" self.validate_thread_sharing() self.run_on_commit = [] # Don't call validate_no_atomic_block() to avoid making it difficult # to get rid of a connection in an invalid state. The next connect() # will reset the transaction state anyway. if self.closed_in_transaction or self.connection is None: return try: self._close() finally: if self.in_atomic_block: self.closed_in_transaction = True self.needs_rollback = True else: self.connection = None # ##### Backend-specific savepoint management methods ##### def _savepoint(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_create_sql(sid)) def _savepoint_rollback(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_rollback_sql(sid)) def _savepoint_commit(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_commit_sql(sid)) def _savepoint_allowed(self): # Savepoints cannot be created outside a transaction return self.features.uses_savepoints and not self.get_autocommit() # ##### Generic savepoint management methods ##### @async_unsafe def savepoint(self): """ Create a savepoint inside the current transaction. Return an identifier for the savepoint that will be used for the subsequent rollback or commit. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return thread_ident = _thread.get_ident() tid = str(thread_ident).replace('-', '') self.savepoint_state += 1 sid = "s%s_x%d" % (tid, self.savepoint_state) self.validate_thread_sharing() self._savepoint(sid) return sid @async_unsafe def savepoint_rollback(self, sid): """ Roll back to a savepoint. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return self.validate_thread_sharing() self._savepoint_rollback(sid) # Remove any callbacks registered while this savepoint was active. self.run_on_commit = [ (sids, func) for (sids, func) in self.run_on_commit if sid not in sids ] @async_unsafe def savepoint_commit(self, sid): """ Release a savepoint. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return self.validate_thread_sharing() self._savepoint_commit(sid) @async_unsafe def clean_savepoints(self): """ Reset the counter used to generate unique savepoint ids in this thread. """ self.savepoint_state = 0 # ##### Backend-specific transaction management methods ##### def _set_autocommit(self, autocommit): """ Backend-specific implementation to enable or disable autocommit. """ raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method') # ##### Generic transaction management methods ##### def get_autocommit(self): """Get the autocommit state.""" self.ensure_connection() return self.autocommit def set_autocommit(self, autocommit, force_begin_transaction_with_broken_autocommit=False): """ Enable or disable autocommit. The usual way to start a transaction is to turn autocommit off. SQLite does not properly start a transaction when disabling autocommit. To avoid this buggy behavior and to actually enter a new transaction, an explicit BEGIN is required. Using force_begin_transaction_with_broken_autocommit=True will issue an explicit BEGIN with SQLite. This option will be ignored for other backends. """ self.validate_no_atomic_block() self.ensure_connection() start_transaction_under_autocommit = ( force_begin_transaction_with_broken_autocommit and not autocommit and hasattr(self, '_start_transaction_under_autocommit') ) if start_transaction_under_autocommit: self._start_transaction_under_autocommit() else: self._set_autocommit(autocommit) self.autocommit = autocommit if autocommit and self.run_commit_hooks_on_set_autocommit_on: self.run_and_clear_commit_hooks() self.run_commit_hooks_on_set_autocommit_on = False def get_rollback(self): """Get the "needs rollback" flag -- for *advanced use* only.""" if not self.in_atomic_block: raise TransactionManagementError( "The rollback flag doesn't work outside of an 'atomic' block.") return self.needs_rollback def set_rollback(self, rollback): """ Set or unset the "needs rollback" flag -- for *advanced use* only. """ if not self.in_atomic_block: raise TransactionManagementError( "The rollback flag doesn't work outside of an 'atomic' block.") self.needs_rollback = rollback def validate_no_atomic_block(self): """Raise an error if an atomic block is active.""" if self.in_atomic_block: raise TransactionManagementError( "This is forbidden when an 'atomic' block is active.") def validate_no_broken_transaction(self): if self.needs_rollback: raise TransactionManagementError( "An error occurred in the current transaction. You can't " "execute queries until the end of the 'atomic' block.") # ##### Foreign key constraints checks handling ##### @contextmanager def constraint_checks_disabled(self): """ Disable foreign key constraint checking. """ disabled = self.disable_constraint_checking() try: yield finally: if disabled: self.enable_constraint_checking() def disable_constraint_checking(self): """ Backends can implement as needed to temporarily disable foreign key constraint checking. Should return True if the constraints were disabled and will need to be reenabled. """ return False def enable_constraint_checking(self): """ Backends can implement as needed to re-enable foreign key constraint checking. """ pass def check_constraints(self, table_names=None): """ Backends can override this method if they can apply constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an IntegrityError if any invalid foreign key references are encountered. """ pass # ##### Connection termination handling ##### def is_usable(self): """ Test if the database connection is usable. This method may assume that self.connection is not None. Actual implementations should take care not to raise exceptions as that may prevent Django from recycling unusable connections. """ raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require an is_usable() method") def close_if_unusable_or_obsolete(self): """ Close the current connection if unrecoverable errors have occurred or if it outlived its maximum age. """ if self.connection is not None: # If the application didn't restore the original autocommit setting, # don't take chances, drop the connection. if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']: self.close() return # If an exception other than DataError or IntegrityError occurred # since the last commit / rollback, check if the connection works. if self.errors_occurred: if self.is_usable(): self.errors_occurred = False else: self.close() return if self.close_at is not None and time.monotonic() >= self.close_at: self.close() return # ##### Thread safety handling ##### @property def allow_thread_sharing(self): with self._thread_sharing_lock: return self._thread_sharing_count > 0 def inc_thread_sharing(self): with self._thread_sharing_lock: self._thread_sharing_count += 1 def dec_thread_sharing(self): with self._thread_sharing_lock: if self._thread_sharing_count <= 0: raise RuntimeError('Cannot decrement the thread sharing count below zero.') self._thread_sharing_count -= 1 def validate_thread_sharing(self): """ Validate that the connection isn't accessed by another thread than the one which originally created it, unless the connection was explicitly authorized to be shared between threads (via the `inc_thread_sharing()` method). Raise an exception if the validation fails. """ if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()): raise DatabaseError( "DatabaseWrapper objects created in a " "thread can only be used in that same thread. The object " "with alias '%s' was created in thread id %s and this is " "thread id %s." % (self.alias, self._thread_ident, _thread.get_ident()) ) # ##### Miscellaneous ##### def prepare_database(self): """ Hook to do any database check or preparation, generally called before migrating a project or an app. """ pass @cached_property def wrap_database_errors(self): """ Context manager and decorator that re-throws backend-specific database exceptions using Django's common wrappers. """ return DatabaseErrorWrapper(self) def chunked_cursor(self): """ Return a cursor that tries to avoid caching in the database (if supported by the database), otherwise return a regular cursor. """ return self.cursor() def make_debug_cursor(self, cursor): """Create a cursor that logs all queries in self.queries_log.""" return utils.CursorDebugWrapper(cursor, self) def make_cursor(self, cursor): """Create a cursor without debug logging.""" return utils.CursorWrapper(cursor, self) @contextmanager def temporary_connection(self): """ Context manager that ensures that a connection is established, and if it opened one, closes it to avoid leaving a dangling connection. This is useful for operations outside of the request-response cycle. Provide a cursor: with self.temporary_connection() as cursor: ... """ must_close = self.connection is None try: with self.cursor() as cursor: yield cursor finally: if must_close: self.close() @property def _nodb_connection(self): """ Return an alternative connection to be used when there is no need to access the main database, specifically for test db creation/deletion. This also prevents the production database from being exposed to potential child threads while (or after) the test database is destroyed. Refs #10868, #17786, #16969. """ return self.__class__({**self.settings_dict, 'NAME': None}, alias=NO_DB_ALIAS) def schema_editor(self, *args, **kwargs): """ Return a new instance of this backend's SchemaEditor. """ if self.SchemaEditorClass is None: raise NotImplementedError( 'The SchemaEditorClass attribute of this database wrapper is still None') return self.SchemaEditorClass(self, *args, **kwargs) def on_commit(self, func): if self.in_atomic_block: # Transaction in progress; save for execution on commit. self.run_on_commit.append((set(self.savepoint_ids), func)) elif not self.get_autocommit(): raise TransactionManagementError('on_commit() cannot be used in manual transaction management') else: # No transaction in progress and in autocommit mode; execute # immediately. func() def run_and_clear_commit_hooks(self): self.validate_no_atomic_block() current_run_on_commit = self.run_on_commit self.run_on_commit = [] while current_run_on_commit: sids, func = current_run_on_commit.pop(0) func() @contextmanager def execute_wrapper(self, wrapper): """ Return a context manager under which the wrapper is applied to suitable database query executions. """ self.execute_wrappers.append(wrapper) try: yield finally: self.execute_wrappers.pop() def copy(self, alias=None): """ Return a copy of this connection. For tests that require two connections to the same database. """ settings_dict = copy.deepcopy(self.settings_dict) if alias is None: alias = self.alias return type(self)(settings_dict, alias)
320ddff4d097818ec989651d5139cfe85c163d6a528798529942ca28425b0c8b
import datetime import decimal from importlib import import_module import sqlparse from django.conf import settings from django.db import NotSupportedError, transaction from django.db.backends import utils from django.utils import timezone from django.utils.encoding import force_str class BaseDatabaseOperations: """ Encapsulate backend-specific differences, such as the way a backend performs ordering or calculates the ID of a recently-inserted row. """ compiler_module = "django.db.models.sql.compiler" # Integer field safe ranges by `internal_type` as documented # in docs/ref/models/fields.txt. integer_field_ranges = { 'SmallIntegerField': (-32768, 32767), 'IntegerField': (-2147483648, 2147483647), 'BigIntegerField': (-9223372036854775808, 9223372036854775807), 'PositiveBigIntegerField': (0, 9223372036854775807), 'PositiveSmallIntegerField': (0, 32767), 'PositiveIntegerField': (0, 2147483647), 'SmallAutoField': (-32768, 32767), 'AutoField': (-2147483648, 2147483647), 'BigAutoField': (-9223372036854775808, 9223372036854775807), } set_operators = { 'union': 'UNION', 'intersection': 'INTERSECT', 'difference': 'EXCEPT', } # Mapping of Field.get_internal_type() (typically the model field's class # name) to the data type to use for the Cast() function, if different from # DatabaseWrapper.data_types. cast_data_types = {} # CharField data type if the max_length argument isn't provided. cast_char_field_without_max_length = None # Start and end points for window expressions. PRECEDING = 'PRECEDING' FOLLOWING = 'FOLLOWING' UNBOUNDED_PRECEDING = 'UNBOUNDED ' + PRECEDING UNBOUNDED_FOLLOWING = 'UNBOUNDED ' + FOLLOWING CURRENT_ROW = 'CURRENT ROW' # Prefix for EXPLAIN queries, or None EXPLAIN isn't supported. explain_prefix = None def __init__(self, connection): self.connection = connection self._cache = None def autoinc_sql(self, table, column): """ Return any SQL needed to support auto-incrementing primary keys, or None if no SQL is necessary. This SQL is executed when a table is created. """ return None def bulk_batch_size(self, fields, objs): """ Return the maximum allowed batch size for the backend. The fields are the fields going to be inserted in the batch, the objs contains all the objects to be inserted. """ return len(objs) def cache_key_culling_sql(self): """ Return an SQL query that retrieves the first cache key greater than the n smallest. This is used by the 'db' cache backend to determine where to start culling. """ return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" def unification_cast_sql(self, output_field): """ Given a field instance, return the SQL that casts the result of a union to that type. The resulting string should contain a '%s' placeholder for the expression being cast. """ return '%s' def date_extract_sql(self, lookup_type, field_name): """ Given a lookup_type of 'year', 'month', or 'day', return the SQL that extracts a value from the given date field field_name. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method') def date_interval_sql(self, timedelta): """ Implement the date interval functionality for expressions. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method') def date_trunc_sql(self, lookup_type, field_name): """ Given a lookup_type of 'year', 'month', or 'day', return the SQL that truncates the given date field field_name to a date object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_trunc_sql() method.') def datetime_cast_date_sql(self, field_name, tzname): """ Return the SQL to cast a datetime value to date value. """ raise NotImplementedError( 'subclasses of BaseDatabaseOperations may require a ' 'datetime_cast_date_sql() method.' ) def datetime_cast_time_sql(self, field_name, tzname): """ Return the SQL to cast a datetime value to time value. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method') def datetime_extract_sql(self, lookup_type, field_name, tzname): """ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that extracts a value from the given datetime field field_name. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method') def datetime_trunc_sql(self, lookup_type, field_name, tzname): """ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that truncates the given datetime field field_name to a datetime object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method') def time_trunc_sql(self, lookup_type, field_name): """ Given a lookup_type of 'hour', 'minute' or 'second', return the SQL that truncates the given time field field_name to a time object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method') def time_extract_sql(self, lookup_type, field_name): """ Given a lookup_type of 'hour', 'minute', or 'second', return the SQL that extracts a value from the given time field field_name. """ return self.date_extract_sql(lookup_type, field_name) def deferrable_sql(self): """ Return the SQL to make a constraint "initially deferred" during a CREATE TABLE statement. """ return '' def distinct_sql(self, fields, params): """ Return an SQL DISTINCT clause which removes duplicate rows from the result set. If any fields are given, only check the given fields for duplicates. """ if fields: raise NotSupportedError('DISTINCT ON fields is not supported by this database backend') else: return ['DISTINCT'], [] def fetch_returned_insert_columns(self, cursor, returning_params): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the newly created data. """ return cursor.fetchone() def field_cast_sql(self, db_type, internal_type): """ Given a column type (e.g. 'BLOB', 'VARCHAR') and an internal type (e.g. 'GenericIPAddressField'), return the SQL to cast it before using it in a WHERE statement. The resulting string should contain a '%s' placeholder for the column being searched against. """ return '%s' def force_no_ordering(self): """ Return a list used in the "ORDER BY" clause to force no ordering at all. Return an empty list to include nothing in the ordering. """ return [] def for_update_sql(self, nowait=False, skip_locked=False, of=()): """ Return the FOR UPDATE SQL clause to lock rows for an update operation. """ return 'FOR UPDATE%s%s%s' % ( ' OF %s' % ', '.join(of) if of else '', ' NOWAIT' if nowait else '', ' SKIP LOCKED' if skip_locked else '', ) def _get_limit_offset_params(self, low_mark, high_mark): offset = low_mark or 0 if high_mark is not None: return (high_mark - offset), offset elif offset: return self.connection.ops.no_limit_value(), offset return None, offset def limit_offset_sql(self, low_mark, high_mark): """Return LIMIT/OFFSET SQL clause.""" limit, offset = self._get_limit_offset_params(low_mark, high_mark) return ' '.join(sql for sql in ( ('LIMIT %d' % limit) if limit else None, ('OFFSET %d' % offset) if offset else None, ) if sql) def last_executed_query(self, cursor, sql, params): """ Return a string of the query last executed by the given cursor, with placeholders replaced with actual values. `sql` is the raw query containing placeholders and `params` is the sequence of parameters. These are used by default, but this method exists for database backends to provide a better implementation according to their own quoting schemes. """ # Convert params to contain string values. def to_string(s): return force_str(s, strings_only=True, errors='replace') if isinstance(params, (list, tuple)): u_params = tuple(to_string(val) for val in params) elif params is None: u_params = () else: u_params = {to_string(k): to_string(v) for k, v in params.items()} return "QUERY = %r - PARAMS = %r" % (sql, u_params) def last_insert_id(self, cursor, table_name, pk_name): """ Given a cursor object that has just performed an INSERT statement into a table that has an auto-incrementing ID, return the newly created ID. `pk_name` is the name of the primary-key column. """ return cursor.lastrowid def lookup_cast(self, lookup_type, internal_type=None): """ Return the string to use in a query when performing lookups ("contains", "like", etc.). It should contain a '%s' placeholder for the column being searched against. """ return "%s" def max_in_list_size(self): """ Return the maximum number of items that can be passed in a single 'IN' list condition, or None if the backend does not impose a limit. """ return None def max_name_length(self): """ Return the maximum length of table and column names, or None if there is no limit. """ return None def no_limit_value(self): """ Return the value to use for the LIMIT when we are wanting "LIMIT infinity". Return None if the limit clause can be omitted in this case. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method') def pk_default_value(self): """ Return the value to use during an INSERT statement to specify that the field should use its default value. """ return 'DEFAULT' def prepare_sql_script(self, sql): """ Take an SQL script that may contain multiple lines and return a list of statements to feed to successive cursor.execute() calls. Since few databases are able to process raw SQL scripts in a single cursor.execute() call and PEP 249 doesn't talk about this use case, the default implementation is conservative. """ return [ sqlparse.format(statement, strip_comments=True) for statement in sqlparse.split(sql) if statement ] def process_clob(self, value): """ Return the value of a CLOB column, for backends that return a locator object that requires additional processing. """ return value def return_insert_columns(self, fields): """ For backends that support returning columns as part of an insert query, return the SQL and params to append to the INSERT query. The returned fragment should contain a format string to hold the appropriate column. """ pass def compiler(self, compiler_name): """ Return the SQLCompiler class corresponding to the given name, in the namespace corresponding to the `compiler_module` attribute on this backend. """ if self._cache is None: self._cache = import_module(self.compiler_module) return getattr(self._cache, compiler_name) def quote_name(self, name): """ Return a quoted version of the given table, index, or column name. Do not quote the given name if it's already been quoted. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method') def random_function_sql(self): """Return an SQL expression that returns a random value.""" return 'RANDOM()' def regex_lookup(self, lookup_type): """ Return the string to use in a query when performing regular expression lookups (using "regex" or "iregex"). It should contain a '%s' placeholder for the column being searched against. If the feature is not supported (or part of it is not supported), raise NotImplementedError. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method') def savepoint_create_sql(self, sid): """ Return the SQL for starting a new savepoint. Only required if the "uses_savepoints" feature is True. The "sid" parameter is a string for the savepoint id. """ return "SAVEPOINT %s" % self.quote_name(sid) def savepoint_commit_sql(self, sid): """ Return the SQL for committing the given savepoint. """ return "RELEASE SAVEPOINT %s" % self.quote_name(sid) def savepoint_rollback_sql(self, sid): """ Return the SQL for rolling back the given savepoint. """ return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid) def set_time_zone_sql(self): """ Return the SQL that will set the connection's time zone. Return '' if the backend doesn't support time zones. """ return '' def sql_flush(self, style, tables, sequences, allow_cascade=False): """ Return a list of SQL statements required to remove all data from the given database tables (without actually removing the tables themselves) and the SQL statements required to reset the sequences passed in `sequences`. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. The `allow_cascade` argument determines whether truncation may cascade to tables with foreign keys pointing the tables being truncated. PostgreSQL requires a cascade even if these tables are empty. """ raise NotImplementedError('subclasses of BaseDatabaseOperations must provide a sql_flush() method') def execute_sql_flush(self, using, sql_list): """Execute a list of SQL statements to flush the database.""" with transaction.atomic(using=using, savepoint=self.connection.features.can_rollback_ddl): with self.connection.cursor() as cursor: for sql in sql_list: cursor.execute(sql) def sequence_reset_by_name_sql(self, style, sequences): """ Return a list of the SQL statements required to reset sequences passed in `sequences`. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. """ return [] def sequence_reset_sql(self, style, model_list): """ Return a list of the SQL statements required to reset sequences for the given models. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. """ return [] # No sequence reset required by default. def start_transaction_sql(self): """Return the SQL statement required to start a transaction.""" return "BEGIN;" def end_transaction_sql(self, success=True): """Return the SQL statement required to end a transaction.""" if not success: return "ROLLBACK;" return "COMMIT;" def tablespace_sql(self, tablespace, inline=False): """ Return the SQL that will be used in a query to define the tablespace. Return '' if the backend doesn't support tablespaces. If `inline` is True, append the SQL to a row; otherwise append it to the entire CREATE TABLE or CREATE INDEX statement. """ return '' def prep_for_like_query(self, x): """Prepare a value for use in a LIKE query.""" return str(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_") # Same as prep_for_like_query(), but called for "iexact" matches, which # need not necessarily be implemented using "LIKE" in the backend. prep_for_iexact_query = prep_for_like_query def validate_autopk_value(self, value): """ Certain backends do not accept some values for "serial" fields (for example zero in MySQL). Raise a ValueError if the value is invalid, otherwise return the validated value. """ return value def adapt_unknown_value(self, value): """ Transform a value to something compatible with the backend driver. This method only depends on the type of the value. It's designed for cases where the target type isn't known, such as .raw() SQL queries. As a consequence it may not work perfectly in all circumstances. """ if isinstance(value, datetime.datetime): # must be before date return self.adapt_datetimefield_value(value) elif isinstance(value, datetime.date): return self.adapt_datefield_value(value) elif isinstance(value, datetime.time): return self.adapt_timefield_value(value) elif isinstance(value, decimal.Decimal): return self.adapt_decimalfield_value(value) else: return value def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. """ if value is None: return None return str(value) def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. """ if value is None: return None return str(value) def adapt_timefield_value(self, value): """ Transform a time value to an object compatible with what is expected by the backend driver for time columns. """ if value is None: return None if timezone.is_aware(value): raise ValueError("Django does not support timezone-aware times.") return str(value) def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): """ Transform a decimal.Decimal value to an object compatible with what is expected by the backend driver for decimal (numeric) columns. """ return utils.format_number(value, max_digits, decimal_places) def adapt_ipaddressfield_value(self, value): """ Transform a string representation of an IP address into the expected type for the backend driver. """ return value or None def year_lookup_bounds_for_date_field(self, value): """ Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateField value using a year lookup. `value` is an int, containing the looked-up year. """ first = datetime.date(value, 1, 1) second = datetime.date(value, 12, 31) first = self.adapt_datefield_value(first) second = self.adapt_datefield_value(second) return [first, second] def year_lookup_bounds_for_datetime_field(self, value): """ Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateTimeField value using a year lookup. `value` is an int, containing the looked-up year. """ first = datetime.datetime(value, 1, 1) second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999) if settings.USE_TZ: tz = timezone.get_current_timezone() first = timezone.make_aware(first, tz) second = timezone.make_aware(second, tz) first = self.adapt_datetimefield_value(first) second = self.adapt_datetimefield_value(second) return [first, second] def get_db_converters(self, expression): """ Return a list of functions needed to convert field data. Some field types on some backends do not provide data in the correct format, this is the hook for converter functions. """ return [] def convert_durationfield_value(self, value, expression, connection): if value is not None: return datetime.timedelta(0, 0, value) def check_expression_support(self, expression): """ Check that the backend supports the provided expression. This is used on specific backends to rule out known expressions that have problematic or nonexistent implementations. If the expression has a known problem, the backend should raise NotSupportedError. """ pass def conditional_expression_supported_in_where_clause(self, expression): """ Return True, if the conditional expression is supported in the WHERE clause. """ return True def combine_expression(self, connector, sub_expressions): """ Combine a list of subexpressions into a single expression, using the provided connecting operator. This is required because operators can vary between backends (e.g., Oracle with %% and &) and between subexpression types (e.g., date expressions). """ conn = ' %s ' % connector return conn.join(sub_expressions) def combine_duration_expression(self, connector, sub_expressions): return self.combine_expression(connector, sub_expressions) def binary_placeholder_sql(self, value): """ Some backends require special syntax to insert binary content (MySQL for example uses '_binary %s'). """ return '%s' def modify_insert_params(self, placeholder, params): """ Allow modification of insert parameters. Needed for Oracle Spatial backend due to #10888. """ return params def integer_field_range(self, internal_type): """ Given an integer field internal type (e.g. 'PositiveIntegerField'), return a tuple of the (min_value, max_value) form representing the range of the column type bound to the field. """ return self.integer_field_ranges[internal_type] def subtract_temporals(self, internal_type, lhs, rhs): if self.connection.features.supports_temporal_subtraction: lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return '(%s - %s)' % (lhs_sql, rhs_sql), (*lhs_params, *rhs_params) raise NotSupportedError("This backend does not support %s subtraction." % internal_type) def window_frame_start(self, start): if isinstance(start, int): if start < 0: return '%d %s' % (abs(start), self.PRECEDING) elif start == 0: return self.CURRENT_ROW elif start is None: return self.UNBOUNDED_PRECEDING raise ValueError("start argument must be a negative integer, zero, or None, but got '%s'." % start) def window_frame_end(self, end): if isinstance(end, int): if end == 0: return self.CURRENT_ROW elif end > 0: return '%d %s' % (end, self.FOLLOWING) elif end is None: return self.UNBOUNDED_FOLLOWING raise ValueError("end argument must be a positive integer, zero, or None, but got '%s'." % end) def window_frame_rows_start_end(self, start=None, end=None): """ Return SQL for start and end points in an OVER clause window frame. """ if not self.connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') return self.window_frame_start(start), self.window_frame_end(end) def window_frame_range_start_end(self, start=None, end=None): start_, end_ = self.window_frame_rows_start_end(start, end) if ( self.connection.features.only_supports_unbounded_with_preceding_and_following and ((start and start < 0) or (end and end > 0)) ): raise NotSupportedError( '%s only supports UNBOUNDED together with PRECEDING and ' 'FOLLOWING.' % self.connection.display_name ) return start_, end_ def explain_query_prefix(self, format=None, **options): if not self.connection.features.supports_explaining_query_execution: raise NotSupportedError('This backend does not support explaining query execution.') if format: supported_formats = self.connection.features.supported_explain_formats normalized_format = format.upper() if normalized_format not in supported_formats: msg = '%s is not a recognized format.' % normalized_format if supported_formats: msg += ' Allowed formats: %s' % ', '.join(sorted(supported_formats)) raise ValueError(msg) if options: raise ValueError('Unknown options: %s' % ', '.join(sorted(options.keys()))) return self.explain_prefix def insert_statement(self, ignore_conflicts=False): return 'INSERT INTO' def ignore_conflicts_suffix_sql(self, ignore_conflicts=None): return ''
eb5a3995377bc7d070c7ecbabf13bfb936e17e4890e836a681c2df032d775484
from collections import namedtuple import sqlparse from MySQLdb.constants import FIELD_TYPE from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo, ) from django.db.models import Index from django.utils.datastructures import OrderedSet FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('extra', 'is_unsigned')) InfoLine = namedtuple('InfoLine', 'col_name data_type max_len num_prec num_scale extra column_default is_unsigned') class DatabaseIntrospection(BaseDatabaseIntrospection): data_types_reverse = { FIELD_TYPE.BLOB: 'TextField', FIELD_TYPE.CHAR: 'CharField', FIELD_TYPE.DECIMAL: 'DecimalField', FIELD_TYPE.NEWDECIMAL: 'DecimalField', FIELD_TYPE.DATE: 'DateField', FIELD_TYPE.DATETIME: 'DateTimeField', FIELD_TYPE.DOUBLE: 'FloatField', FIELD_TYPE.FLOAT: 'FloatField', FIELD_TYPE.INT24: 'IntegerField', FIELD_TYPE.LONG: 'IntegerField', FIELD_TYPE.LONGLONG: 'BigIntegerField', FIELD_TYPE.SHORT: 'SmallIntegerField', FIELD_TYPE.STRING: 'CharField', FIELD_TYPE.TIME: 'TimeField', FIELD_TYPE.TIMESTAMP: 'DateTimeField', FIELD_TYPE.TINY: 'IntegerField', FIELD_TYPE.TINY_BLOB: 'TextField', FIELD_TYPE.MEDIUM_BLOB: 'TextField', FIELD_TYPE.LONG_BLOB: 'TextField', FIELD_TYPE.VAR_STRING: 'CharField', } def get_field_type(self, data_type, description): field_type = super().get_field_type(data_type, description) if 'auto_increment' in description.extra: if field_type == 'IntegerField': return 'AutoField' elif field_type == 'BigIntegerField': return 'BigAutoField' elif field_type == 'SmallIntegerField': return 'SmallAutoField' if description.is_unsigned: if field_type == 'BigIntegerField': return 'PositiveBigIntegerField' elif field_type == 'IntegerField': return 'PositiveIntegerField' elif field_type == 'SmallIntegerField': return 'PositiveSmallIntegerField' return field_type def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" cursor.execute("SHOW FULL TABLES") return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1])) for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface." """ # information_schema database gives more accurate results for some figures: # - varchar length returned by cursor.description is an internal length, # not visible length (#5725) # - precision and scale (for decimal fields) (#5014) # - auto_increment is not available in cursor.description cursor.execute(""" SELECT column_name, data_type, character_maximum_length, numeric_precision, numeric_scale, extra, column_default, CASE WHEN column_type LIKE '%% unsigned' THEN 1 ELSE 0 END AS is_unsigned FROM information_schema.columns WHERE table_name = %s AND table_schema = DATABASE()""", [table_name]) field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()} cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)) def to_int(i): return int(i) if i is not None else i fields = [] for line in cursor.description: info = field_info[line[0]] fields.append(FieldInfo( *line[:3], to_int(info.max_len) or line[3], to_int(info.num_prec) or line[4], to_int(info.num_scale) or line[5], line[6], info.column_default, info.extra, info.is_unsigned, )) return fields def get_sequences(self, cursor, table_name, table_fields=()): for field_info in self.get_table_description(cursor, table_name): if 'auto_increment' in field_info.extra: # MySQL allows only one auto-increment column per table. return [{'table': table_name, 'column': field_info.name}] return [] def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all relationships to the given table. """ constraints = self.get_key_columns(cursor, table_name) relations = {} for my_fieldname, other_table, other_field in constraints: relations[my_fieldname] = (other_field, other_table) return relations def get_key_columns(self, cursor, table_name): """ Return a list of (column_name, referenced_table_name, referenced_column_name) for all key columns in the given table. """ key_columns = [] cursor.execute(""" SELECT column_name, referenced_table_name, referenced_column_name FROM information_schema.key_column_usage WHERE table_name = %s AND table_schema = DATABASE() AND referenced_table_name IS NOT NULL AND referenced_column_name IS NOT NULL""", [table_name]) key_columns.extend(cursor.fetchall()) return key_columns def get_storage_engine(self, cursor, table_name): """ Retrieve the storage engine for a given table. Return the default storage engine if the table doesn't exist. """ cursor.execute( "SELECT engine " "FROM information_schema.tables " "WHERE table_name = %s", [table_name]) result = cursor.fetchone() if not result: return self.connection.features._mysql_storage_engine return result[0] def _parse_constraint_columns(self, check_clause, columns): check_columns = OrderedSet() statement = sqlparse.parse(check_clause)[0] tokens = (token for token in statement.flatten() if not token.is_whitespace) for token in tokens: if ( token.ttype == sqlparse.tokens.Name and self.connection.ops.quote_name(token.value) == token.value and token.value[1:-1] in columns ): check_columns.add(token.value[1:-1]) return check_columns def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. """ constraints = {} # Get the actual constraint names and columns name_query = """ SELECT kc.`constraint_name`, kc.`column_name`, kc.`referenced_table_name`, kc.`referenced_column_name` FROM information_schema.key_column_usage AS kc WHERE kc.table_schema = DATABASE() AND kc.table_name = %s ORDER BY kc.`ordinal_position` """ cursor.execute(name_query, [table_name]) for constraint, column, ref_table, ref_column in cursor.fetchall(): if constraint not in constraints: constraints[constraint] = { 'columns': OrderedSet(), 'primary_key': False, 'unique': False, 'index': False, 'check': False, 'foreign_key': (ref_table, ref_column) if ref_column else None, } constraints[constraint]['columns'].add(column) # Now get the constraint types type_query = """ SELECT c.constraint_name, c.constraint_type FROM information_schema.table_constraints AS c WHERE c.table_schema = DATABASE() AND c.table_name = %s """ cursor.execute(type_query, [table_name]) for constraint, kind in cursor.fetchall(): if kind.lower() == "primary key": constraints[constraint]['primary_key'] = True constraints[constraint]['unique'] = True elif kind.lower() == "unique": constraints[constraint]['unique'] = True # Add check constraints. if self.connection.features.can_introspect_check_constraints: unnamed_constraints_index = 0 columns = {info.name for info in self.get_table_description(cursor, table_name)} if self.connection.mysql_is_mariadb: type_query = """ SELECT c.constraint_name, c.check_clause FROM information_schema.check_constraints AS c WHERE c.constraint_schema = DATABASE() AND c.table_name = %s """ else: type_query = """ SELECT cc.constraint_name, cc.check_clause FROM information_schema.check_constraints AS cc, information_schema.table_constraints AS tc WHERE cc.constraint_schema = DATABASE() AND tc.table_schema = cc.constraint_schema AND cc.constraint_name = tc.constraint_name AND tc.constraint_type = 'CHECK' AND tc.table_name = %s """ cursor.execute(type_query, [table_name]) for constraint, check_clause in cursor.fetchall(): constraint_columns = self._parse_constraint_columns(check_clause, columns) # Ensure uniqueness of unnamed constraints. Unnamed unique # and check columns constraints have the same name as # a column. if set(constraint_columns) == {constraint}: unnamed_constraints_index += 1 constraint = '__unnamed_constraint_%s__' % unnamed_constraints_index constraints[constraint] = { 'columns': constraint_columns, 'primary_key': False, 'unique': False, 'index': False, 'check': True, 'foreign_key': None, } # Now add in the indexes cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name)) for table, non_unique, index, colseq, column, type_ in [x[:5] + (x[10],) for x in cursor.fetchall()]: if index not in constraints: constraints[index] = { 'columns': OrderedSet(), 'primary_key': False, 'unique': False, 'check': False, 'foreign_key': None, } constraints[index]['index'] = True constraints[index]['type'] = Index.suffix if type_ == 'BTREE' else type_.lower() constraints[index]['columns'].add(column) # Convert the sorted sets to lists for constraint in constraints.values(): constraint['columns'] = list(constraint['columns']) return constraints
37b5a75a8d17dc68a0d9f540bd170bf515acb14200d1ffdb75c8c4655548a058
""" MySQL database backend for Django. Requires mysqlclient: https://pypi.org/project/mysqlclient/ """ from django.core.exceptions import ImproperlyConfigured from django.db import IntegrityError from django.db.backends import utils as backend_utils from django.db.backends.base.base import BaseDatabaseWrapper from django.utils.asyncio import async_unsafe from django.utils.functional import cached_property from django.utils.regex_helper import _lazy_re_compile try: import MySQLdb as Database except ImportError as err: raise ImproperlyConfigured( 'Error loading MySQLdb module.\n' 'Did you install mysqlclient?' ) from err from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip from MySQLdb.converters import conversions # isort:skip # Some of these import MySQLdb, so import them after checking if it's installed. from .client import DatabaseClient # isort:skip from .creation import DatabaseCreation # isort:skip from .features import DatabaseFeatures # isort:skip from .introspection import DatabaseIntrospection # isort:skip from .operations import DatabaseOperations # isort:skip from .schema import DatabaseSchemaEditor # isort:skip from .validation import DatabaseValidation # isort:skip version = Database.version_info if version < (1, 3, 13): raise ImproperlyConfigured('mysqlclient 1.3.13 or newer is required; you have %s.' % Database.__version__) # MySQLdb returns TIME columns as timedelta -- they are more like timedelta in # terms of actual behavior as they are signed and include days -- and Django # expects time. django_conversions = { **conversions, **{FIELD_TYPE.TIME: backend_utils.typecast_time}, } # This should match the numerical portion of the version numbers (we can treat # versions like 5.0.24 and 5.0.24a as the same). server_version_re = _lazy_re_compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})') class CursorWrapper: """ A thin wrapper around MySQLdb's normal cursor class that catches particular exception instances and reraises them with the correct types. Implemented as a wrapper, rather than a subclass, so that it isn't stuck to the particular underlying representation returned by Connection.cursor(). """ codes_for_integrityerror = ( 1048, # Column cannot be null 1690, # BIGINT UNSIGNED value is out of range 3819, # CHECK constraint is violated 4025, # CHECK constraint failed ) def __init__(self, cursor): self.cursor = cursor def execute(self, query, args=None): try: # args is None means no string interpolation return self.cursor.execute(query, args) except Database.OperationalError as e: # Map some error codes to IntegrityError, since they seem to be # misclassified and Django would prefer the more logical place. if e.args[0] in self.codes_for_integrityerror: raise IntegrityError(*tuple(e.args)) raise def executemany(self, query, args): try: return self.cursor.executemany(query, args) except Database.OperationalError as e: # Map some error codes to IntegrityError, since they seem to be # misclassified and Django would prefer the more logical place. if e.args[0] in self.codes_for_integrityerror: raise IntegrityError(*tuple(e.args)) raise def __getattr__(self, attr): return getattr(self.cursor, attr) def __iter__(self): return iter(self.cursor) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'mysql' # This dictionary maps Field objects to their associated MySQL column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. data_types = { 'AutoField': 'integer AUTO_INCREMENT', 'BigAutoField': 'bigint AUTO_INCREMENT', 'BinaryField': 'longblob', 'BooleanField': 'bool', 'CharField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'datetime(6)', 'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)', 'DurationField': 'bigint', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'double precision', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'char(15)', 'GenericIPAddressField': 'char(39)', 'NullBooleanField': 'bool', 'OneToOneField': 'integer', 'PositiveBigIntegerField': 'bigint UNSIGNED', 'PositiveIntegerField': 'integer UNSIGNED', 'PositiveSmallIntegerField': 'smallint UNSIGNED', 'SlugField': 'varchar(%(max_length)s)', 'SmallAutoField': 'smallint AUTO_INCREMENT', 'SmallIntegerField': 'smallint', 'TextField': 'longtext', 'TimeField': 'time(6)', 'UUIDField': 'char(32)', } # For these data types: # - MySQL < 8.0.13 and MariaDB < 10.2.1 don't accept default values and # implicitly treat them as nullable # - all versions of MySQL and MariaDB don't support full width database # indexes _limited_data_types = ( 'tinyblob', 'blob', 'mediumblob', 'longblob', 'tinytext', 'text', 'mediumtext', 'longtext', 'json', ) operators = { 'exact': '= %s', 'iexact': 'LIKE %s', 'contains': 'LIKE BINARY %s', 'icontains': 'LIKE %s', 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': 'LIKE BINARY %s', 'endswith': 'LIKE BINARY %s', 'istartswith': 'LIKE %s', 'iendswith': 'LIKE %s', } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { 'contains': "LIKE BINARY CONCAT('%%', {}, '%%')", 'icontains': "LIKE CONCAT('%%', {}, '%%')", 'startswith': "LIKE BINARY CONCAT({}, '%%')", 'istartswith': "LIKE CONCAT({}, '%%')", 'endswith': "LIKE BINARY CONCAT('%%', {})", 'iendswith': "LIKE CONCAT('%%', {})", } isolation_levels = { 'read uncommitted', 'read committed', 'repeatable read', 'serializable', } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations validation_class = DatabaseValidation def get_connection_params(self): kwargs = { 'conv': django_conversions, 'charset': 'utf8', } settings_dict = self.settings_dict if settings_dict['USER']: kwargs['user'] = settings_dict['USER'] if settings_dict['NAME']: kwargs['db'] = settings_dict['NAME'] if settings_dict['PASSWORD']: kwargs['passwd'] = settings_dict['PASSWORD'] if settings_dict['HOST'].startswith('/'): kwargs['unix_socket'] = settings_dict['HOST'] elif settings_dict['HOST']: kwargs['host'] = settings_dict['HOST'] if settings_dict['PORT']: kwargs['port'] = int(settings_dict['PORT']) # We need the number of potentially affected rows after an # "UPDATE", not the number of changed rows. kwargs['client_flag'] = CLIENT.FOUND_ROWS # Validate the transaction isolation level, if specified. options = settings_dict['OPTIONS'].copy() isolation_level = options.pop('isolation_level', 'read committed') if isolation_level: isolation_level = isolation_level.lower() if isolation_level not in self.isolation_levels: raise ImproperlyConfigured( "Invalid transaction isolation level '%s' specified.\n" "Use one of %s, or None." % ( isolation_level, ', '.join("'%s'" % s for s in sorted(self.isolation_levels)) )) self.isolation_level = isolation_level kwargs.update(options) return kwargs @async_unsafe def get_new_connection(self, conn_params): return Database.connect(**conn_params) def init_connection_state(self): assignments = [] if self.features.is_sql_auto_is_null_enabled: # SQL_AUTO_IS_NULL controls whether an AUTO_INCREMENT column on # a recently inserted row will return when the field is tested # for NULL. Disabling this brings this aspect of MySQL in line # with SQL standards. assignments.append('SET SQL_AUTO_IS_NULL = 0') if self.isolation_level: assignments.append('SET SESSION TRANSACTION ISOLATION LEVEL %s' % self.isolation_level.upper()) if assignments: with self.cursor() as cursor: cursor.execute('; '.join(assignments)) @async_unsafe def create_cursor(self, name=None): cursor = self.connection.cursor() return CursorWrapper(cursor) def _rollback(self): try: BaseDatabaseWrapper._rollback(self) except Database.NotSupportedError: pass def _set_autocommit(self, autocommit): with self.wrap_database_errors: self.connection.autocommit(autocommit) def disable_constraint_checking(self): """ Disable foreign key checks, primarily for use in adding rows with forward references. Always return True to indicate constraint checks need to be re-enabled. """ self.cursor().execute('SET foreign_key_checks=0') return True def enable_constraint_checking(self): """ Re-enable foreign key checks after they have been disabled. """ # Override needs_rollback in case constraint_checks_disabled is # nested inside transaction.atomic. self.needs_rollback, needs_rollback = False, self.needs_rollback try: self.cursor().execute('SET foreign_key_checks=1') finally: self.needs_rollback = needs_rollback def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) if not primary_key_column_name: continue key_columns = self.introspection.get_key_columns(cursor, table_name) for column_name, referenced_table_name, referenced_column_name in key_columns: cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise IntegrityError( "The row in table '%s' with primary key '%s' has an invalid " "foreign key: %s.%s contains a value '%s' that does not " "have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): try: self.connection.ping() except Database.Error: return False else: return True @cached_property def display_name(self): return 'MariaDB' if self.mysql_is_mariadb else 'MySQL' @cached_property def data_type_check_constraints(self): if self.features.supports_column_check_constraints: return { 'PositiveBigIntegerField': '`%(column)s` >= 0', 'PositiveIntegerField': '`%(column)s` >= 0', 'PositiveSmallIntegerField': '`%(column)s` >= 0', } return {} @cached_property def mysql_server_info(self): with self.temporary_connection() as cursor: cursor.execute('SELECT VERSION()') return cursor.fetchone()[0] @cached_property def mysql_version(self): match = server_version_re.match(self.mysql_server_info) if not match: raise Exception('Unable to determine MySQL version from version string %r' % self.mysql_server_info) return tuple(int(x) for x in match.groups()) @cached_property def mysql_is_mariadb(self): return 'mariadb' in self.mysql_server_info.lower()
cf655a30401fb23611ffe9074158aa4ffb48d77ff666d303abe48aaf1b418e8d
import subprocess import sys from django.db.backends.base.creation import BaseDatabaseCreation from .client import DatabaseClient class DatabaseCreation(BaseDatabaseCreation): def sql_table_creation_suffix(self): suffix = [] test_settings = self.connection.settings_dict['TEST'] if test_settings['CHARSET']: suffix.append('CHARACTER SET %s' % test_settings['CHARSET']) if test_settings['COLLATION']: suffix.append('COLLATE %s' % test_settings['COLLATION']) return ' '.join(suffix) def _execute_create_test_db(self, cursor, parameters, keepdb=False): try: super()._execute_create_test_db(cursor, parameters, keepdb) except Exception as e: if len(e.args) < 1 or e.args[0] != 1007: # All errors except "database exists" (1007) cancel tests. self.log('Got an error creating the test database: %s' % e) sys.exit(2) else: raise def _clone_test_db(self, suffix, verbosity, keepdb=False): source_database_name = self.connection.settings_dict['NAME'] target_database_name = self.get_test_db_clone_settings(suffix)['NAME'] test_db_params = { 'dbname': self.connection.ops.quote_name(target_database_name), 'suffix': self.sql_table_creation_suffix(), } with self._nodb_connection.cursor() as cursor: try: self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception: if keepdb: # If the database should be kept, skip everything else. return try: if verbosity >= 1: self.log('Destroying old test database for alias %s...' % ( self._get_database_display_str(verbosity, target_database_name), )) cursor.execute('DROP DATABASE %(dbname)s' % test_db_params) self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception as e: self.log('Got an error recreating the test database: %s' % e) sys.exit(2) self._clone_db(source_database_name, target_database_name) def _clone_db(self, source_database_name, target_database_name): dump_args = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict)[1:] dump_cmd = ['mysqldump', *dump_args[:-1], '--routines', '--events', source_database_name] load_cmd = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict) load_cmd[-1] = target_database_name with subprocess.Popen(dump_cmd, stdout=subprocess.PIPE) as dump_proc: with subprocess.Popen(load_cmd, stdin=dump_proc.stdout, stdout=subprocess.DEVNULL): # Allow dump_proc to receive a SIGPIPE if the load process exits. dump_proc.stdout.close()
ab6c764cdb82d35745f08eac3769d07c7070afe9adf7ed0d2dfa68779d09565e
import operator from django.db import InterfaceError from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): allows_group_by_selected_pks = True can_return_columns_from_insert = True can_return_rows_from_bulk_insert = True has_real_datatype = True has_native_uuid_field = True has_native_duration_field = True can_defer_constraint_checks = True has_select_for_update = True has_select_for_update_nowait = True has_select_for_update_of = True has_select_for_update_skip_locked = True can_release_savepoints = True supports_tablespaces = True supports_transactions = True can_introspect_autofield = True can_introspect_ip_address_field = True can_introspect_materialized_views = True can_introspect_small_integer_field = True can_distinct_on_fields = True can_rollback_ddl = True supports_combined_alters = True nulls_order_largest = True closed_cursor_error_class = InterfaceError has_case_insensitive_like = False greatest_least_ignores_nulls = True can_clone_databases = True supports_temporal_subtraction = True supports_slicing_ordering_in_compound = True create_test_procedure_without_params_sql = """ CREATE FUNCTION test_procedure () RETURNS void AS $$ DECLARE V_I INTEGER; BEGIN V_I := 1; END; $$ LANGUAGE plpgsql;""" create_test_procedure_with_int_param_sql = """ CREATE FUNCTION test_procedure (P_I INTEGER) RETURNS void AS $$ DECLARE V_I INTEGER; BEGIN V_I := P_I; END; $$ LANGUAGE plpgsql;""" requires_casted_case_in_updates = True supports_over_clause = True only_supports_unbounded_with_preceding_and_following = True supports_aggregate_filter_clause = True supported_explain_formats = {'JSON', 'TEXT', 'XML', 'YAML'} validates_explain_options = False # A query will error on invalid options. @cached_property def is_postgresql_9_6(self): return self.connection.pg_version >= 90600 @cached_property def is_postgresql_10(self): return self.connection.pg_version >= 100000 @cached_property def is_postgresql_11(self): return self.connection.pg_version >= 110000 @cached_property def is_postgresql_12(self): return self.connection.pg_version >= 120000 has_bloom_index = property(operator.attrgetter('is_postgresql_9_6')) has_brin_autosummarize = property(operator.attrgetter('is_postgresql_10')) has_phraseto_tsquery = property(operator.attrgetter('is_postgresql_9_6')) has_websearch_to_tsquery = property(operator.attrgetter('is_postgresql_11')) supports_table_partitions = property(operator.attrgetter('is_postgresql_10'))
e61761e02123650cdbc6ab716173f3041dda13826739b2c4743d8d3b39d288e1
from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo, TableInfo, ) from django.db.models import Index class DatabaseIntrospection(BaseDatabaseIntrospection): # Maps type codes to Django Field types. data_types_reverse = { 16: 'BooleanField', 17: 'BinaryField', 20: 'BigIntegerField', 21: 'SmallIntegerField', 23: 'IntegerField', 25: 'TextField', 700: 'FloatField', 701: 'FloatField', 869: 'GenericIPAddressField', 1042: 'CharField', # blank-padded 1043: 'CharField', 1082: 'DateField', 1083: 'TimeField', 1114: 'DateTimeField', 1184: 'DateTimeField', 1186: 'DurationField', 1266: 'TimeField', 1700: 'DecimalField', 2950: 'UUIDField', } ignored_tables = [] def get_field_type(self, data_type, description): field_type = super().get_field_type(data_type, description) if description.default and 'nextval' in description.default: if field_type == 'IntegerField': return 'AutoField' elif field_type == 'BigIntegerField': return 'BigAutoField' elif field_type == 'SmallIntegerField': return 'SmallAutoField' return field_type def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" cursor.execute(""" SELECT c.relname, CASE WHEN {} THEN 'p' WHEN c.relkind IN ('m', 'v') THEN 'v' ELSE 't' END FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('f', 'm', 'p', 'r', 'v') AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND pg_catalog.pg_table_is_visible(c.oid) """.format('c.relispartition' if self.connection.features.supports_table_partitions else 'FALSE')) return [TableInfo(*row) for row in cursor.fetchall() if row[0] not in self.ignored_tables] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface. """ # Query the pg_catalog tables as cursor.description does not reliably # return the nullable property and information_schema.columns does not # contain details of materialized views. cursor.execute(""" SELECT a.attname AS column_name, NOT (a.attnotnull OR (t.typtype = 'd' AND t.typnotnull)) AS is_nullable, pg_get_expr(ad.adbin, ad.adrelid) AS column_default FROM pg_attribute a LEFT JOIN pg_attrdef ad ON a.attrelid = ad.adrelid AND a.attnum = ad.adnum JOIN pg_type t ON a.atttypid = t.oid JOIN pg_class c ON a.attrelid = c.oid JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.relkind IN ('f', 'm', 'p', 'r', 'v') AND c.relname = %s AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND pg_catalog.pg_table_is_visible(c.oid) """, [table_name]) field_map = {line[0]: line[1:] for line in cursor.fetchall()} cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)) return [ FieldInfo( line.name, line.type_code, line.display_size, line.internal_size, line.precision, line.scale, *field_map[line.name], ) for line in cursor.description ] def get_sequences(self, cursor, table_name, table_fields=()): cursor.execute(""" SELECT s.relname as sequence_name, col.attname FROM pg_class s JOIN pg_namespace sn ON sn.oid = s.relnamespace JOIN pg_depend d ON d.refobjid = s.oid AND d.refclassid = 'pg_class'::regclass JOIN pg_attrdef ad ON ad.oid = d.objid AND d.classid = 'pg_attrdef'::regclass JOIN pg_attribute col ON col.attrelid = ad.adrelid AND col.attnum = ad.adnum JOIN pg_class tbl ON tbl.oid = ad.adrelid WHERE s.relkind = 'S' AND d.deptype in ('a', 'n') AND pg_catalog.pg_table_is_visible(tbl.oid) AND tbl.relname = %s """, [table_name]) return [ {'name': row[0], 'table': table_name, 'column': row[1]} for row in cursor.fetchall() ] def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all relationships to the given table. """ return {row[0]: (row[2], row[1]) for row in self.get_key_columns(cursor, table_name)} def get_key_columns(self, cursor, table_name): cursor.execute(""" SELECT a1.attname, c2.relname, a2.attname FROM pg_constraint con LEFT JOIN pg_class c1 ON con.conrelid = c1.oid LEFT JOIN pg_class c2 ON con.confrelid = c2.oid LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1] LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1] WHERE c1.relname = %s AND con.contype = 'f' AND c1.relnamespace = c2.relnamespace AND pg_catalog.pg_table_is_visible(c1.oid) """, [table_name]) return cursor.fetchall() def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. Also retrieve the definition of expression-based indexes. """ constraints = {} # Loop over the key table, collecting things as constraints. The column # array must return column names in the same order in which they were # created. cursor.execute(""" SELECT c.conname, array( SELECT attname FROM unnest(c.conkey) WITH ORDINALITY cols(colid, arridx) JOIN pg_attribute AS ca ON cols.colid = ca.attnum WHERE ca.attrelid = c.conrelid ORDER BY cols.arridx ), c.contype, (SELECT fkc.relname || '.' || fka.attname FROM pg_attribute AS fka JOIN pg_class AS fkc ON fka.attrelid = fkc.oid WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]), cl.reloptions FROM pg_constraint AS c JOIN pg_class AS cl ON c.conrelid = cl.oid WHERE cl.relname = %s AND pg_catalog.pg_table_is_visible(cl.oid) """, [table_name]) for constraint, columns, kind, used_cols, options in cursor.fetchall(): constraints[constraint] = { "columns": columns, "primary_key": kind == "p", "unique": kind in ["p", "u"], "foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None, "check": kind == "c", "index": False, "definition": None, "options": options, } # Now get indexes cursor.execute(""" SELECT indexname, array_agg(attname ORDER BY arridx), indisunique, indisprimary, array_agg(ordering ORDER BY arridx), amname, exprdef, s2.attoptions FROM ( SELECT c2.relname as indexname, idx.*, attr.attname, am.amname, CASE WHEN idx.indexprs IS NOT NULL THEN pg_get_indexdef(idx.indexrelid) END AS exprdef, CASE am.amname WHEN 'btree' THEN CASE (option & 1) WHEN 1 THEN 'DESC' ELSE 'ASC' END END as ordering, c2.reloptions as attoptions FROM ( SELECT * FROM pg_index i, unnest(i.indkey, i.indoption) WITH ORDINALITY koi(key, option, arridx) ) idx LEFT JOIN pg_class c ON idx.indrelid = c.oid LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid LEFT JOIN pg_am am ON c2.relam = am.oid LEFT JOIN pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key WHERE c.relname = %s AND pg_catalog.pg_table_is_visible(c.oid) ) s2 GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions; """, [table_name]) for index, columns, unique, primary, orders, type_, definition, options in cursor.fetchall(): if index not in constraints: basic_index = type_ == 'btree' and not index.endswith('_btree') and options is None constraints[index] = { "columns": columns if columns != [None] else [], "orders": orders if orders != [None] else [], "primary_key": primary, "unique": unique, "foreign_key": None, "check": False, "index": True, "type": Index.suffix if basic_index else type_, "definition": definition, "options": options, } return constraints
dbb5db51f2d4054074cb7634f0c979303a178ecfa90d946ebb9c9569269abd5e
""" PostgreSQL database backend for Django. Requires psycopg 2: https://www.psycopg.org/ """ import asyncio import threading import warnings from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import DatabaseError as WrappedDatabaseError, connections from django.db.backends.base.base import BaseDatabaseWrapper from django.db.backends.utils import ( CursorDebugWrapper as BaseCursorDebugWrapper, ) from django.utils.asyncio import async_unsafe from django.utils.functional import cached_property from django.utils.safestring import SafeString from django.utils.version import get_version_tuple try: import psycopg2 as Database import psycopg2.extensions import psycopg2.extras except ImportError as e: raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e) def psycopg2_version(): version = psycopg2.__version__.split(' ', 1)[0] return get_version_tuple(version) PSYCOPG2_VERSION = psycopg2_version() if PSYCOPG2_VERSION < (2, 5, 4): raise ImproperlyConfigured("psycopg2_version 2.5.4 or newer is required; you have %s" % psycopg2.__version__) # Some of these import psycopg2, so import them after checking if it's installed. from .client import DatabaseClient # NOQA isort:skip from .creation import DatabaseCreation # NOQA isort:skip from .features import DatabaseFeatures # NOQA isort:skip from .introspection import DatabaseIntrospection # NOQA isort:skip from .operations import DatabaseOperations # NOQA isort:skip from .schema import DatabaseSchemaEditor # NOQA isort:skip psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString) psycopg2.extras.register_uuid() # Register support for inet[] manually so we don't have to handle the Inet() # object on load all the time. INETARRAY_OID = 1041 INETARRAY = psycopg2.extensions.new_array_type( (INETARRAY_OID,), 'INETARRAY', psycopg2.extensions.UNICODE, ) psycopg2.extensions.register_type(INETARRAY) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'postgresql' display_name = 'PostgreSQL' # This dictionary maps Field objects to their associated PostgreSQL column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. data_types = { 'AutoField': 'serial', 'BigAutoField': 'bigserial', 'BinaryField': 'bytea', 'BooleanField': 'boolean', 'CharField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'timestamp with time zone', 'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)', 'DurationField': 'interval', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'double precision', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'inet', 'GenericIPAddressField': 'inet', 'NullBooleanField': 'boolean', 'OneToOneField': 'integer', 'PositiveBigIntegerField': 'bigint', 'PositiveIntegerField': 'integer', 'PositiveSmallIntegerField': 'smallint', 'SlugField': 'varchar(%(max_length)s)', 'SmallAutoField': 'smallserial', 'SmallIntegerField': 'smallint', 'TextField': 'text', 'TimeField': 'time', 'UUIDField': 'uuid', } data_type_check_constraints = { 'PositiveBigIntegerField': '"%(column)s" >= 0', 'PositiveIntegerField': '"%(column)s" >= 0', 'PositiveSmallIntegerField': '"%(column)s" >= 0', } operators = { 'exact': '= %s', 'iexact': '= UPPER(%s)', 'contains': 'LIKE %s', 'icontains': 'LIKE UPPER(%s)', 'regex': '~ %s', 'iregex': '~* %s', 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': 'LIKE %s', 'endswith': 'LIKE %s', 'istartswith': 'LIKE UPPER(%s)', 'iendswith': 'LIKE UPPER(%s)', } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, E'\\', E'\\\\'), E'%%', E'\\%%'), E'_', E'\\_')" pattern_ops = { 'contains': "LIKE '%%' || {} || '%%'", 'icontains': "LIKE '%%' || UPPER({}) || '%%'", 'startswith': "LIKE {} || '%%'", 'istartswith': "LIKE UPPER({}) || '%%'", 'endswith': "LIKE '%%' || {}", 'iendswith': "LIKE '%%' || UPPER({})", } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations # PostgreSQL backend-specific attributes. _named_cursor_idx = 0 def get_connection_params(self): settings_dict = self.settings_dict # None may be used to connect to the default 'postgres' db if settings_dict['NAME'] == '': raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value.") if len(settings_dict['NAME'] or '') > self.ops.max_name_length(): raise ImproperlyConfigured( "The database name '%s' (%d characters) is longer than " "PostgreSQL's limit of %d characters. Supply a shorter NAME " "in settings.DATABASES." % ( settings_dict['NAME'], len(settings_dict['NAME']), self.ops.max_name_length(), ) ) conn_params = { 'database': settings_dict['NAME'] or 'postgres', **settings_dict['OPTIONS'], } conn_params.pop('isolation_level', None) if settings_dict['USER']: conn_params['user'] = settings_dict['USER'] if settings_dict['PASSWORD']: conn_params['password'] = settings_dict['PASSWORD'] if settings_dict['HOST']: conn_params['host'] = settings_dict['HOST'] if settings_dict['PORT']: conn_params['port'] = settings_dict['PORT'] return conn_params @async_unsafe def get_new_connection(self, conn_params): connection = Database.connect(**conn_params) # self.isolation_level must be set: # - after connecting to the database in order to obtain the database's # default when no value is explicitly specified in options. # - before calling _set_autocommit() because if autocommit is on, that # will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT. options = self.settings_dict['OPTIONS'] try: self.isolation_level = options['isolation_level'] except KeyError: self.isolation_level = connection.isolation_level else: # Set the isolation level to the value from OPTIONS. if self.isolation_level != connection.isolation_level: connection.set_session(isolation_level=self.isolation_level) return connection def ensure_timezone(self): if self.connection is None: return False conn_timezone_name = self.connection.get_parameter_status('TimeZone') timezone_name = self.timezone_name if timezone_name and conn_timezone_name != timezone_name: with self.connection.cursor() as cursor: cursor.execute(self.ops.set_time_zone_sql(), [timezone_name]) return True return False def init_connection_state(self): self.connection.set_client_encoding('UTF8') timezone_changed = self.ensure_timezone() if timezone_changed: # Commit after setting the time zone (see #17062) if not self.get_autocommit(): self.connection.commit() @async_unsafe def create_cursor(self, name=None): if name: # In autocommit mode, the cursor will be used outside of a # transaction, hence use a holdable cursor. cursor = self.connection.cursor(name, scrollable=False, withhold=self.connection.autocommit) else: cursor = self.connection.cursor() cursor.tzinfo_factory = self.tzinfo_factory if settings.USE_TZ else None return cursor def tzinfo_factory(self, offset): return self.timezone @async_unsafe def chunked_cursor(self): self._named_cursor_idx += 1 # Get the current async task # Note that right now this is behind @async_unsafe, so this is # unreachable, but in future we'll start loosening this restriction. # For now, it's here so that every use of "threading" is # also async-compatible. try: if hasattr(asyncio, 'current_task'): # Python 3.7 and up current_task = asyncio.current_task() else: # Python 3.6 current_task = asyncio.Task.current_task() except RuntimeError: current_task = None # Current task can be none even if the current_task call didn't error if current_task: task_ident = str(id(current_task)) else: task_ident = 'sync' # Use that and the thread ident to get a unique name return self._cursor( name='_django_curs_%d_%s_%d' % ( # Avoid reusing name in other threads / tasks threading.current_thread().ident, task_ident, self._named_cursor_idx, ) ) def _set_autocommit(self, autocommit): with self.wrap_database_errors: self.connection.autocommit = autocommit def check_constraints(self, table_names=None): """ Check constraints by setting them to immediate. Return them to deferred afterward. """ self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE') self.cursor().execute('SET CONSTRAINTS ALL DEFERRED') def is_usable(self): try: # Use a psycopg cursor directly, bypassing Django's utilities. self.connection.cursor().execute("SELECT 1") except Database.Error: return False else: return True @property def _nodb_connection(self): nodb_connection = super()._nodb_connection try: nodb_connection.ensure_connection() except (Database.DatabaseError, WrappedDatabaseError): warnings.warn( "Normally Django will use a connection to the 'postgres' database " "to avoid running initialization queries against the production " "database when it's not needed (for example, when running tests). " "Django was unable to create a connection to the 'postgres' database " "and will use the first PostgreSQL database instead.", RuntimeWarning ) for connection in connections.all(): if connection.vendor == 'postgresql' and connection.settings_dict['NAME'] != 'postgres': return self.__class__( {**self.settings_dict, 'NAME': connection.settings_dict['NAME']}, alias=self.alias, ) return nodb_connection @cached_property def pg_version(self): with self.temporary_connection(): return self.connection.server_version def make_debug_cursor(self, cursor): return CursorDebugWrapper(cursor, self) class CursorDebugWrapper(BaseCursorDebugWrapper): def copy_expert(self, sql, file, *args): with self.debug_sql(sql): return self.cursor.copy_expert(sql, file, *args) def copy_to(self, file, table, *args, **kwargs): with self.debug_sql(sql='COPY %s TO STDOUT' % table): return self.cursor.copy_to(file, table, *args, **kwargs)
cbad5692a71e70d5cd03b6f78bff1686e1c06a00327b2eacce8f9fae94b3be44
from psycopg2.extras import Inet from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'varchar' explain_prefix = 'EXPLAIN' cast_data_types = { 'AutoField': 'integer', 'BigAutoField': 'bigint', 'SmallAutoField': 'smallint', } def unification_cast_sql(self, output_field): internal_type = output_field.get_internal_type() if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"): # PostgreSQL will resolve a union as type 'text' if input types are # 'unknown'. # https://www.postgresql.org/docs/current/typeconv-union-case.html # These fields cannot be implicitly cast back in the default # PostgreSQL configuration so we need to explicitly cast them. # We must also remove components of the type within brackets: # varchar(255) -> varchar. return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0] return '%s' def date_extract_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT if lookup_type == 'week_day': # For consistency across backends, we return Sunday=1, Saturday=7. return "EXTRACT('dow' FROM %s) + 1" % field_name elif lookup_type == 'iso_week_day': return "EXTRACT('isodow' FROM %s)" % field_name elif lookup_type == 'iso_year': return "EXTRACT('isoyear' FROM %s)" % field_name else: return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name) def date_trunc_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname.replace('+', '-') elif '-' in tzname: return tzname.replace('-', '+') return tzname def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ: field_name = "%s AT TIME ZONE '%s'" % (field_name, self._prepare_tzname_delta(tzname)) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::date' % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::time' % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def time_trunc_sql(self, lookup_type, field_name): return "DATE_TRUNC('%s', %s)::time" % (lookup_type, field_name) def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_rows(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the tuple of returned data. """ return cursor.fetchall() def lookup_cast(self, lookup_type, internal_type=None): lookup = '%s' # Cast text lookups to text to allow things like filter(x__contains=4) if lookup_type in ('iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'): if internal_type in ('IPAddressField', 'GenericIPAddressField'): lookup = "HOST(%s)" elif internal_type in ('CICharField', 'CIEmailField', 'CITextField'): lookup = '%s::citext' else: lookup = "%s::text" # Use UPPER(x) for case-insensitive lookups; it's faster. if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): lookup = 'UPPER(%s)' % lookup return lookup def no_limit_value(self): return None def prepare_sql_script(self, sql): return [sql] def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def set_time_zone_sql(self): return "SET TIME ZONE %s" def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables: # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows # us to truncate tables referenced by a foreign key in any other # table. tables_sql = ', '.join( style.SQL_FIELD(self.quote_name(table)) for table in tables) if allow_cascade: sql = ['%s %s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, style.SQL_KEYWORD('CASCADE'), )] else: sql = ['%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, )] sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements # to reset sequence indices sql = [] for sequence_info in sequences: table_name = sequence_info['table'] # 'id' will be the case if it's an m2m using an autogenerated # intermediate table (see BaseDatabaseIntrospection.sequence_list). column_name = sequence_info['column'] or 'id' sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(self.quote_name(table_name)), style.SQL_FIELD(column_name), )) return sql def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def sequence_reset_sql(self, style, model_list): from django.db import models output = [] qn = self.quote_name for model in model_list: # Use `coalesce` to set the sequence for each model to the max pk value if there are records, # or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true # if there are records (as the max pk value is already in use), otherwise set it to false. # Use pg_get_serial_sequence to get the underlying sequence name from the table name # and column name (available since PostgreSQL 8) for f in model._meta.local_fields: if isinstance(f, models.AutoField): output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(model._meta.db_table)), style.SQL_FIELD(f.column), style.SQL_FIELD(qn(f.column)), style.SQL_FIELD(qn(f.column)), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(model._meta.db_table)), ) ) break # Only one AutoField is allowed per model, so don't bother continuing. for f in model._meta.many_to_many: if not f.remote_field.through: output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(f.m2m_db_table())), style.SQL_FIELD('id'), style.SQL_FIELD(qn('id')), style.SQL_FIELD(qn('id')), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(f.m2m_db_table())) ) ) return output def prep_for_iexact_query(self, x): return x def max_name_length(self): """ Return the maximum length of an identifier. The maximum length of an identifier is 63 by default, but can be changed by recompiling PostgreSQL after editing the NAMEDATALEN macro in src/include/pg_config_manual.h. This implementation returns 63, but can be overridden by a custom database backend that inherits most of its behavior from this one. """ return 63 def distinct_sql(self, fields, params): if fields: params = [param for param_list in params for param in param_list] return (['DISTINCT ON (%s)' % ', '.join(fields)], params) else: return ['DISTINCT'], [] def last_executed_query(self, cursor, sql, params): # https://www.psycopg.org/docs/cursor.html#cursor.query # The query attribute is a Psycopg extension to the DB API 2.0. if cursor.query is not None: return cursor.query.decode() return None def return_insert_columns(self, fields): if not fields: return '', () columns = [ '%s.%s' % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), ) for field in fields ] return 'RETURNING %s' % ', '.join(columns), () def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def adapt_datefield_value(self, value): return value def adapt_datetimefield_value(self, value): return value def adapt_timefield_value(self, value): return value def adapt_ipaddressfield_value(self, value): if value: return Inet(value) return None def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), params return super().subtract_temporals(internal_type, lhs, rhs) def explain_query_prefix(self, format=None, **options): prefix = super().explain_query_prefix(format) extra = {} if format: extra['FORMAT'] = format if options: extra.update({ name.upper(): 'true' if value else 'false' for name, value in options.items() }) if extra: prefix += ' (%s)' % ', '.join('%s %s' % i for i in extra.items()) return prefix def ignore_conflicts_suffix_sql(self, ignore_conflicts=None): return 'ON CONFLICT DO NOTHING' if ignore_conflicts else super().ignore_conflicts_suffix_sql(ignore_conflicts)
5b9dd79a161cc03b862b7132d7771d7ecf6a12f039cc5dbc6e76de53e09f200c
import sys from psycopg2 import errorcodes from django.db.backends.base.creation import BaseDatabaseCreation from django.db.backends.utils import strip_quotes class DatabaseCreation(BaseDatabaseCreation): def _quote_name(self, name): return self.connection.ops.quote_name(name) def _get_database_create_suffix(self, encoding=None, template=None): suffix = "" if encoding: suffix += " ENCODING '{}'".format(encoding) if template: suffix += " TEMPLATE {}".format(self._quote_name(template)) return suffix and "WITH" + suffix def sql_table_creation_suffix(self): test_settings = self.connection.settings_dict['TEST'] assert test_settings['COLLATION'] is None, ( "PostgreSQL does not support collation setting at database creation time." ) return self._get_database_create_suffix( encoding=test_settings['CHARSET'], template=test_settings.get('TEMPLATE'), ) def _database_exists(self, cursor, database_name): cursor.execute('SELECT 1 FROM pg_catalog.pg_database WHERE datname = %s', [strip_quotes(database_name)]) return cursor.fetchone() is not None def _execute_create_test_db(self, cursor, parameters, keepdb=False): try: if keepdb and self._database_exists(cursor, parameters['dbname']): # If the database should be kept and it already exists, don't # try to create a new one. return super()._execute_create_test_db(cursor, parameters, keepdb) except Exception as e: if getattr(e.__cause__, 'pgcode', '') != errorcodes.DUPLICATE_DATABASE: # All errors except "database already exists" cancel tests. self.log('Got an error creating the test database: %s' % e) sys.exit(2) elif not keepdb: # If the database should be kept, ignore "database already # exists". raise def _clone_test_db(self, suffix, verbosity, keepdb=False): # CREATE DATABASE ... WITH TEMPLATE ... requires closing connections # to the template database. self.connection.close() source_database_name = self.connection.settings_dict['NAME'] target_database_name = self.get_test_db_clone_settings(suffix)['NAME'] test_db_params = { 'dbname': self._quote_name(target_database_name), 'suffix': self._get_database_create_suffix(template=source_database_name), } with self._nodb_connection.cursor() as cursor: try: self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception: try: if verbosity >= 1: self.log('Destroying old test database for alias %s...' % ( self._get_database_display_str(verbosity, target_database_name), )) cursor.execute('DROP DATABASE %(dbname)s' % test_db_params) self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception as e: self.log('Got an error cloning the test database: %s' % e) sys.exit(2)
7720b5b00f06979ef39d82529f5604f6ccc1108564385544c8ef02eb5794631a
import re from collections import namedtuple import sqlparse from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo, ) from django.db.models import Index from django.utils.regex_helper import _lazy_re_compile FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('pk',)) field_size_re = _lazy_re_compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$') def get_field_size(name): """ Extract the size number from a "varchar(11)" type name """ m = field_size_re.search(name) return int(m.group(1)) if m else None # This light wrapper "fakes" a dictionary interface, because some SQLite data # types include variables in them -- e.g. "varchar(30)" -- and can't be matched # as a simple dictionary lookup. class FlexibleFieldLookupDict: # Maps SQL types to Django Field types. Some of the SQL types have multiple # entries here because SQLite allows for anything and doesn't normalize the # field type; it uses whatever was given. base_data_types_reverse = { 'bool': 'BooleanField', 'boolean': 'BooleanField', 'smallint': 'SmallIntegerField', 'smallint unsigned': 'PositiveSmallIntegerField', 'smallinteger': 'SmallIntegerField', 'int': 'IntegerField', 'integer': 'IntegerField', 'bigint': 'BigIntegerField', 'integer unsigned': 'PositiveIntegerField', 'bigint unsigned': 'PositiveBigIntegerField', 'decimal': 'DecimalField', 'real': 'FloatField', 'text': 'TextField', 'char': 'CharField', 'varchar': 'CharField', 'blob': 'BinaryField', 'date': 'DateField', 'datetime': 'DateTimeField', 'time': 'TimeField', } def __getitem__(self, key): key = key.lower().split('(', 1)[0].strip() return self.base_data_types_reverse[key] class DatabaseIntrospection(BaseDatabaseIntrospection): data_types_reverse = FlexibleFieldLookupDict() def get_field_type(self, data_type, description): field_type = super().get_field_type(data_type, description) if description.pk and field_type in {'BigIntegerField', 'IntegerField', 'SmallIntegerField'}: # No support for BigAutoField or SmallAutoField as SQLite treats # all integer primary keys as signed 64-bit integers. return 'AutoField' return field_type def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" # Skip the sqlite_sequence system table used for autoincrement key # generation. cursor.execute(""" SELECT name, type FROM sqlite_master WHERE type in ('table', 'view') AND NOT name='sqlite_sequence' ORDER BY name""") return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface. """ cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(table_name)) return [ FieldInfo( name, data_type, None, get_field_size(data_type), None, None, not notnull, default, pk == 1, ) for cid, name, data_type, notnull, default, pk in cursor.fetchall() ] def get_sequences(self, cursor, table_name, table_fields=()): pk_col = self.get_primary_key_column(cursor, table_name) return [{'table': table_name, 'column': pk_col}] def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all relationships to the given table. """ # Dictionary of relations to return relations = {} # Schema for this table cursor.execute( "SELECT sql, type FROM sqlite_master " "WHERE tbl_name = %s AND type IN ('table', 'view')", [table_name] ) create_sql, table_type = cursor.fetchone() if table_type == 'view': # It might be a view, then no results will be returned return relations results = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')] # Walk through and look for references to other tables. SQLite doesn't # really have enforced references, but since it echoes out the SQL used # to create the table we can look for REFERENCES statements used there. for field_desc in results.split(','): field_desc = field_desc.strip() if field_desc.startswith("UNIQUE"): continue m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I) if not m: continue table, column = [s.strip('"') for s in m.groups()] if field_desc.startswith("FOREIGN KEY"): # Find name of the target FK field m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I) field_name = m.groups()[0].strip('"') else: field_name = field_desc.split()[0].strip('"') cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table]) result = cursor.fetchall()[0] other_table_results = result[0].strip() li, ri = other_table_results.index('('), other_table_results.rindex(')') other_table_results = other_table_results[li + 1:ri] for other_desc in other_table_results.split(','): other_desc = other_desc.strip() if other_desc.startswith('UNIQUE'): continue other_name = other_desc.split(' ', 1)[0].strip('"') if other_name == column: relations[field_name] = (other_name, table) break return relations def get_key_columns(self, cursor, table_name): """ Return a list of (column_name, referenced_table_name, referenced_column_name) for all key columns in given table. """ key_columns = [] # Schema for this table cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) results = cursor.fetchone()[0].strip() results = results[results.index('(') + 1:results.rindex(')')] # Walk through and look for references to other tables. SQLite doesn't # really have enforced references, but since it echoes out the SQL used # to create the table we can look for REFERENCES statements used there. for field_index, field_desc in enumerate(results.split(',')): field_desc = field_desc.strip() if field_desc.startswith("UNIQUE"): continue m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I) if not m: continue # This will append (column_name, referenced_table_name, referenced_column_name) to key_columns key_columns.append(tuple(s.strip('"') for s in m.groups())) return key_columns def get_primary_key_column(self, cursor, table_name): """Return the column name of the primary key for the given table.""" # Don't use PRAGMA because that causes issues with some transactions cursor.execute( "SELECT sql, type FROM sqlite_master " "WHERE tbl_name = %s AND type IN ('table', 'view')", [table_name] ) row = cursor.fetchone() if row is None: raise ValueError("Table %s does not exist" % table_name) create_sql, table_type = row if table_type == 'view': # Views don't have a primary key. return None fields_sql = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')] for field_desc in fields_sql.split(','): field_desc = field_desc.strip() m = re.match(r'(?:(?:["`\[])(.*)(?:["`\]])|(\w+)).*PRIMARY KEY.*', field_desc) if m: return m.group(1) if m.group(1) else m.group(2) return None def _get_foreign_key_constraints(self, cursor, table_name): constraints = {} cursor.execute('PRAGMA foreign_key_list(%s)' % self.connection.ops.quote_name(table_name)) for row in cursor.fetchall(): # Remaining on_update/on_delete/match values are of no interest. id_, _, table, from_, to = row[:5] constraints['fk_%d' % id_] = { 'columns': [from_], 'primary_key': False, 'unique': False, 'foreign_key': (table, to), 'check': False, 'index': False, } return constraints def _parse_column_or_constraint_definition(self, tokens, columns): token = None is_constraint_definition = None field_name = None constraint_name = None unique = False unique_columns = [] check = False check_columns = [] braces_deep = 0 for token in tokens: if token.match(sqlparse.tokens.Punctuation, '('): braces_deep += 1 elif token.match(sqlparse.tokens.Punctuation, ')'): braces_deep -= 1 if braces_deep < 0: # End of columns and constraints for table definition. break elif braces_deep == 0 and token.match(sqlparse.tokens.Punctuation, ','): # End of current column or constraint definition. break # Detect column or constraint definition by first token. if is_constraint_definition is None: is_constraint_definition = token.match(sqlparse.tokens.Keyword, 'CONSTRAINT') if is_constraint_definition: continue if is_constraint_definition: # Detect constraint name by second token. if constraint_name is None: if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): constraint_name = token.value elif token.ttype == sqlparse.tokens.Literal.String.Symbol: constraint_name = token.value[1:-1] # Start constraint columns parsing after UNIQUE keyword. if token.match(sqlparse.tokens.Keyword, 'UNIQUE'): unique = True unique_braces_deep = braces_deep elif unique: if unique_braces_deep == braces_deep: if unique_columns: # Stop constraint parsing. unique = False continue if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): unique_columns.append(token.value) elif token.ttype == sqlparse.tokens.Literal.String.Symbol: unique_columns.append(token.value[1:-1]) else: # Detect field name by first token. if field_name is None: if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): field_name = token.value elif token.ttype == sqlparse.tokens.Literal.String.Symbol: field_name = token.value[1:-1] if token.match(sqlparse.tokens.Keyword, 'UNIQUE'): unique_columns = [field_name] # Start constraint columns parsing after CHECK keyword. if token.match(sqlparse.tokens.Keyword, 'CHECK'): check = True check_braces_deep = braces_deep elif check: if check_braces_deep == braces_deep: if check_columns: # Stop constraint parsing. check = False continue if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): if token.value in columns: check_columns.append(token.value) elif token.ttype == sqlparse.tokens.Literal.String.Symbol: if token.value[1:-1] in columns: check_columns.append(token.value[1:-1]) unique_constraint = { 'unique': True, 'columns': unique_columns, 'primary_key': False, 'foreign_key': None, 'check': False, 'index': False, } if unique_columns else None check_constraint = { 'check': True, 'columns': check_columns, 'primary_key': False, 'unique': False, 'foreign_key': None, 'index': False, } if check_columns else None return constraint_name, unique_constraint, check_constraint, token def _parse_table_constraints(self, sql, columns): # Check constraint parsing is based of SQLite syntax diagram. # https://www.sqlite.org/syntaxdiagrams.html#table-constraint statement = sqlparse.parse(sql)[0] constraints = {} unnamed_constrains_index = 0 tokens = (token for token in statement.flatten() if not token.is_whitespace) # Go to columns and constraint definition for token in tokens: if token.match(sqlparse.tokens.Punctuation, '('): break # Parse columns and constraint definition while True: constraint_name, unique, check, end_token = self._parse_column_or_constraint_definition(tokens, columns) if unique: if constraint_name: constraints[constraint_name] = unique else: unnamed_constrains_index += 1 constraints['__unnamed_constraint_%s__' % unnamed_constrains_index] = unique if check: if constraint_name: constraints[constraint_name] = check else: unnamed_constrains_index += 1 constraints['__unnamed_constraint_%s__' % unnamed_constrains_index] = check if end_token.match(sqlparse.tokens.Punctuation, ')'): break return constraints def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. """ constraints = {} # Find inline check constraints. try: table_schema = cursor.execute( "SELECT sql FROM sqlite_master WHERE type='table' and name=%s" % ( self.connection.ops.quote_name(table_name), ) ).fetchone()[0] except TypeError: # table_name is a view. pass else: columns = {info.name for info in self.get_table_description(cursor, table_name)} constraints.update(self._parse_table_constraints(table_schema, columns)) # Get the index info cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name)) for row in cursor.fetchall(): # SQLite 3.8.9+ has 5 columns, however older versions only give 3 # columns. Discard last 2 columns if there. number, index, unique = row[:3] cursor.execute( "SELECT sql FROM sqlite_master " "WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index) ) # There's at most one row. sql, = cursor.fetchone() or (None,) # Inline constraints are already detected in # _parse_table_constraints(). The reasons to avoid fetching inline # constraints from `PRAGMA index_list` are: # - Inline constraints can have a different name and information # than what `PRAGMA index_list` gives. # - Not all inline constraints may appear in `PRAGMA index_list`. if not sql: # An inline constraint continue # Get the index info for that index cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index)) for index_rank, column_rank, column in cursor.fetchall(): if index not in constraints: constraints[index] = { "columns": [], "primary_key": False, "unique": bool(unique), "foreign_key": None, "check": False, "index": True, } constraints[index]['columns'].append(column) # Add type and column orders for indexes if constraints[index]['index'] and not constraints[index]['unique']: # SQLite doesn't support any index type other than b-tree constraints[index]['type'] = Index.suffix order_info = sql.split('(')[-1].split(')')[0].split(',') orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info] constraints[index]['orders'] = orders # Get the PK pk_column = self.get_primary_key_column(cursor, table_name) if pk_column: # SQLite doesn't actually give a name to the PK constraint, # so we invent one. This is fine, as the SQLite backend never # deletes PK constraints by name, as you can't delete constraints # in SQLite; we remake the table with a new PK instead. constraints["__primary__"] = { "columns": [pk_column], "primary_key": True, "unique": False, # It's not actually a unique constraint. "foreign_key": None, "check": False, "index": False, } constraints.update(self._get_foreign_key_constraints(cursor, table_name)) return constraints
7b06ab51f29d02cc55c64362dbb5196edfdeaec4e60cb998c6ef0898a3edd09e
""" SQLite backend for the sqlite3 module in the standard library. """ import datetime import decimal import functools import hashlib import math import operator import re import statistics import warnings from itertools import chain from sqlite3 import dbapi2 as Database import pytz from django.core.exceptions import ImproperlyConfigured from django.db import IntegrityError from django.db.backends import utils as backend_utils from django.db.backends.base.base import BaseDatabaseWrapper from django.utils import timezone from django.utils.asyncio import async_unsafe from django.utils.dateparse import parse_datetime, parse_time from django.utils.duration import duration_microseconds from django.utils.regex_helper import _lazy_re_compile from .client import DatabaseClient # isort:skip from .creation import DatabaseCreation # isort:skip from .features import DatabaseFeatures # isort:skip from .introspection import DatabaseIntrospection # isort:skip from .operations import DatabaseOperations # isort:skip from .schema import DatabaseSchemaEditor # isort:skip def decoder(conv_func): """ Convert bytestrings from Python's sqlite3 interface to a regular string. """ return lambda s: conv_func(s.decode()) def none_guard(func): """ Decorator that returns None if any of the arguments to the decorated function are None. Many SQL functions return NULL if any of their arguments are NULL. This decorator simplifies the implementation of this for the custom functions registered below. """ @functools.wraps(func) def wrapper(*args, **kwargs): return None if None in args else func(*args, **kwargs) return wrapper def list_aggregate(function): """ Return an aggregate class that accumulates values in a list and applies the provided function to the data. """ return type('ListAggregate', (list,), {'finalize': function, 'step': list.append}) def check_sqlite_version(): if Database.sqlite_version_info < (3, 8, 3): raise ImproperlyConfigured('SQLite 3.8.3 or later is required (found %s).' % Database.sqlite_version) check_sqlite_version() Database.register_converter("bool", b'1'.__eq__) Database.register_converter("time", decoder(parse_time)) Database.register_converter("datetime", decoder(parse_datetime)) Database.register_converter("timestamp", decoder(parse_datetime)) Database.register_converter("TIMESTAMP", decoder(parse_datetime)) Database.register_adapter(decimal.Decimal, str) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'sqlite' display_name = 'SQLite' # SQLite doesn't actually support most of these types, but it "does the right # thing" given more verbose field definitions, so leave them as is so that # schema inspection is more useful. data_types = { 'AutoField': 'integer', 'BigAutoField': 'integer', 'BinaryField': 'BLOB', 'BooleanField': 'bool', 'CharField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'datetime', 'DecimalField': 'decimal', 'DurationField': 'bigint', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'real', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'char(15)', 'GenericIPAddressField': 'char(39)', 'NullBooleanField': 'bool', 'OneToOneField': 'integer', 'PositiveBigIntegerField': 'bigint unsigned', 'PositiveIntegerField': 'integer unsigned', 'PositiveSmallIntegerField': 'smallint unsigned', 'SlugField': 'varchar(%(max_length)s)', 'SmallAutoField': 'integer', 'SmallIntegerField': 'smallint', 'TextField': 'text', 'TimeField': 'time', 'UUIDField': 'char(32)', } data_type_check_constraints = { 'PositiveBigIntegerField': '"%(column)s" >= 0', 'PositiveIntegerField': '"%(column)s" >= 0', 'PositiveSmallIntegerField': '"%(column)s" >= 0', } data_types_suffix = { 'AutoField': 'AUTOINCREMENT', 'BigAutoField': 'AUTOINCREMENT', 'SmallAutoField': 'AUTOINCREMENT', } # SQLite requires LIKE statements to include an ESCAPE clause if the value # being escaped has a percent or underscore in it. # See https://www.sqlite.org/lang_expr.html for an explanation. operators = { 'exact': '= %s', 'iexact': "LIKE %s ESCAPE '\\'", 'contains': "LIKE %s ESCAPE '\\'", 'icontains': "LIKE %s ESCAPE '\\'", 'regex': 'REGEXP %s', 'iregex': "REGEXP '(?i)' || %s", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE %s ESCAPE '\\'", 'endswith': "LIKE %s ESCAPE '\\'", 'istartswith': "LIKE %s ESCAPE '\\'", 'iendswith': "LIKE %s ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { 'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'", 'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'", 'startswith': r"LIKE {} || '%%' ESCAPE '\'", 'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'", 'endswith': r"LIKE '%%' || {} ESCAPE '\'", 'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'", } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations def get_connection_params(self): settings_dict = self.settings_dict if not settings_dict['NAME']: raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value.") kwargs = { # TODO: Remove str() when dropping support for PY36. # https://bugs.python.org/issue33496 'database': str(settings_dict['NAME']), 'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES, **settings_dict['OPTIONS'], } # Always allow the underlying SQLite connection to be shareable # between multiple threads. The safe-guarding will be handled at a # higher level by the `BaseDatabaseWrapper.allow_thread_sharing` # property. This is necessary as the shareability is disabled by # default in pysqlite and it cannot be changed once a connection is # opened. if 'check_same_thread' in kwargs and kwargs['check_same_thread']: warnings.warn( 'The `check_same_thread` option was provided and set to ' 'True. It will be overridden with False. Use the ' '`DatabaseWrapper.allow_thread_sharing` property instead ' 'for controlling thread shareability.', RuntimeWarning ) kwargs.update({'check_same_thread': False, 'uri': True}) return kwargs @async_unsafe def get_new_connection(self, conn_params): conn = Database.connect(**conn_params) conn.create_function("django_date_extract", 2, _sqlite_datetime_extract) conn.create_function("django_date_trunc", 2, _sqlite_date_trunc) conn.create_function('django_datetime_cast_date', 3, _sqlite_datetime_cast_date) conn.create_function('django_datetime_cast_time', 3, _sqlite_datetime_cast_time) conn.create_function('django_datetime_extract', 4, _sqlite_datetime_extract) conn.create_function('django_datetime_trunc', 4, _sqlite_datetime_trunc) conn.create_function("django_time_extract", 2, _sqlite_time_extract) conn.create_function("django_time_trunc", 2, _sqlite_time_trunc) conn.create_function("django_time_diff", 2, _sqlite_time_diff) conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff) conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta) conn.create_function('regexp', 2, _sqlite_regexp) conn.create_function('ACOS', 1, none_guard(math.acos)) conn.create_function('ASIN', 1, none_guard(math.asin)) conn.create_function('ATAN', 1, none_guard(math.atan)) conn.create_function('ATAN2', 2, none_guard(math.atan2)) conn.create_function('CEILING', 1, none_guard(math.ceil)) conn.create_function('COS', 1, none_guard(math.cos)) conn.create_function('COT', 1, none_guard(lambda x: 1 / math.tan(x))) conn.create_function('DEGREES', 1, none_guard(math.degrees)) conn.create_function('EXP', 1, none_guard(math.exp)) conn.create_function('FLOOR', 1, none_guard(math.floor)) conn.create_function('LN', 1, none_guard(math.log)) conn.create_function('LOG', 2, none_guard(lambda x, y: math.log(y, x))) conn.create_function('LPAD', 3, _sqlite_lpad) conn.create_function('MD5', 1, none_guard(lambda x: hashlib.md5(x.encode()).hexdigest())) conn.create_function('MOD', 2, none_guard(math.fmod)) conn.create_function('PI', 0, lambda: math.pi) conn.create_function('POWER', 2, none_guard(operator.pow)) conn.create_function('RADIANS', 1, none_guard(math.radians)) conn.create_function('REPEAT', 2, none_guard(operator.mul)) conn.create_function('REVERSE', 1, none_guard(lambda x: x[::-1])) conn.create_function('RPAD', 3, _sqlite_rpad) conn.create_function('SHA1', 1, none_guard(lambda x: hashlib.sha1(x.encode()).hexdigest())) conn.create_function('SHA224', 1, none_guard(lambda x: hashlib.sha224(x.encode()).hexdigest())) conn.create_function('SHA256', 1, none_guard(lambda x: hashlib.sha256(x.encode()).hexdigest())) conn.create_function('SHA384', 1, none_guard(lambda x: hashlib.sha384(x.encode()).hexdigest())) conn.create_function('SHA512', 1, none_guard(lambda x: hashlib.sha512(x.encode()).hexdigest())) conn.create_function('SIGN', 1, none_guard(lambda x: (x > 0) - (x < 0))) conn.create_function('SIN', 1, none_guard(math.sin)) conn.create_function('SQRT', 1, none_guard(math.sqrt)) conn.create_function('TAN', 1, none_guard(math.tan)) conn.create_aggregate('STDDEV_POP', 1, list_aggregate(statistics.pstdev)) conn.create_aggregate('STDDEV_SAMP', 1, list_aggregate(statistics.stdev)) conn.create_aggregate('VAR_POP', 1, list_aggregate(statistics.pvariance)) conn.create_aggregate('VAR_SAMP', 1, list_aggregate(statistics.variance)) conn.execute('PRAGMA foreign_keys = ON') return conn def init_connection_state(self): pass def create_cursor(self, name=None): return self.connection.cursor(factory=SQLiteCursorWrapper) @async_unsafe def close(self): self.validate_thread_sharing() # If database is in memory, closing the connection destroys the # database. To prevent accidental data loss, ignore close requests on # an in-memory db. if not self.is_in_memory_db(): BaseDatabaseWrapper.close(self) def _savepoint_allowed(self): # When 'isolation_level' is not None, sqlite3 commits before each # savepoint; it's a bug. When it is None, savepoints don't make sense # because autocommit is enabled. The only exception is inside 'atomic' # blocks. To work around that bug, on SQLite, 'atomic' starts a # transaction explicitly rather than simply disable autocommit. return self.in_atomic_block def _set_autocommit(self, autocommit): if autocommit: level = None else: # sqlite3's internal default is ''. It's different from None. # See Modules/_sqlite/connection.c. level = '' # 'isolation_level' is a misleading API. # SQLite always runs at the SERIALIZABLE isolation level. with self.wrap_database_errors: self.connection.isolation_level = level def disable_constraint_checking(self): with self.cursor() as cursor: cursor.execute('PRAGMA foreign_keys = OFF') # Foreign key constraints cannot be turned off while in a multi- # statement transaction. Fetch the current state of the pragma # to determine if constraints are effectively disabled. enabled = cursor.execute('PRAGMA foreign_keys').fetchone()[0] return not bool(enabled) def enable_constraint_checking(self): self.cursor().execute('PRAGMA foreign_keys = ON') def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ if self.features.supports_pragma_foreign_key_check: with self.cursor() as cursor: if table_names is None: violations = self.cursor().execute('PRAGMA foreign_key_check').fetchall() else: violations = chain.from_iterable( cursor.execute('PRAGMA foreign_key_check(%s)' % table_name).fetchall() for table_name in table_names ) # See https://www.sqlite.org/pragma.html#pragma_foreign_key_check for table_name, rowid, referenced_table_name, foreign_key_index in violations: foreign_key = cursor.execute( 'PRAGMA foreign_key_list(%s)' % table_name ).fetchall()[foreign_key_index] column_name, referenced_column_name = foreign_key[3:5] primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) primary_key_value, bad_value = cursor.execute( 'SELECT %s, %s FROM %s WHERE rowid = %%s' % ( primary_key_column_name, column_name, table_name ), (rowid,), ).fetchone() raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, primary_key_value, table_name, column_name, bad_value, referenced_table_name, referenced_column_name ) ) else: with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) if not primary_key_column_name: continue key_columns = self.introspection.get_key_columns(cursor, table_name) for column_name, referenced_table_name, referenced_column_name in key_columns: cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): return True def _start_transaction_under_autocommit(self): """ Start a transaction explicitly in autocommit mode. Staying in autocommit mode works around a bug of sqlite3 that breaks savepoints when autocommit is disabled. """ self.cursor().execute("BEGIN") def is_in_memory_db(self): return self.creation.is_in_memory_db(self.settings_dict['NAME']) FORMAT_QMARK_REGEX = _lazy_re_compile(r'(?<!%)%s') class SQLiteCursorWrapper(Database.Cursor): """ Django uses "format" style placeholders, but pysqlite2 uses "qmark" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". """ def execute(self, query, params=None): if params is None: return Database.Cursor.execute(self, query) query = self.convert_query(query) return Database.Cursor.execute(self, query, params) def executemany(self, query, param_list): query = self.convert_query(query) return Database.Cursor.executemany(self, query, param_list) def convert_query(self, query): return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%') def _sqlite_datetime_parse(dt, tzname=None, conn_tzname=None): if dt is None: return None try: dt = backend_utils.typecast_timestamp(dt) except (TypeError, ValueError): return None if conn_tzname: dt = dt.replace(tzinfo=pytz.timezone(conn_tzname)) if tzname is not None and tzname != conn_tzname: sign_index = tzname.find('+') + tzname.find('-') + 1 if sign_index > -1: sign = tzname[sign_index] tzname, offset = tzname.split(sign) if offset: hours, minutes = offset.split(':') offset_delta = datetime.timedelta(hours=int(hours), minutes=int(minutes)) dt += offset_delta if sign == '+' else -offset_delta dt = timezone.localtime(dt, pytz.timezone(tzname)) return dt def _sqlite_date_trunc(lookup_type, dt): dt = _sqlite_datetime_parse(dt) if dt is None: return None if lookup_type == 'year': return "%i-01-01" % dt.year elif lookup_type == 'quarter': month_in_quarter = dt.month - (dt.month - 1) % 3 return '%i-%02i-01' % (dt.year, month_in_quarter) elif lookup_type == 'month': return "%i-%02i-01" % (dt.year, dt.month) elif lookup_type == 'week': dt = dt - datetime.timedelta(days=dt.weekday()) return "%i-%02i-%02i" % (dt.year, dt.month, dt.day) elif lookup_type == 'day': return "%i-%02i-%02i" % (dt.year, dt.month, dt.day) def _sqlite_time_trunc(lookup_type, dt): if dt is None: return None try: dt = backend_utils.typecast_time(dt) except (ValueError, TypeError): return None if lookup_type == 'hour': return "%02i:00:00" % dt.hour elif lookup_type == 'minute': return "%02i:%02i:00" % (dt.hour, dt.minute) elif lookup_type == 'second': return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second) def _sqlite_datetime_cast_date(dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None return dt.date().isoformat() def _sqlite_datetime_cast_time(dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None return dt.time().isoformat() def _sqlite_datetime_extract(lookup_type, dt, tzname=None, conn_tzname=None): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None if lookup_type == 'week_day': return (dt.isoweekday() % 7) + 1 elif lookup_type == 'iso_week_day': return dt.isoweekday() elif lookup_type == 'week': return dt.isocalendar()[1] elif lookup_type == 'quarter': return math.ceil(dt.month / 3) elif lookup_type == 'iso_year': return dt.isocalendar()[0] else: return getattr(dt, lookup_type) def _sqlite_datetime_trunc(lookup_type, dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None if lookup_type == 'year': return "%i-01-01 00:00:00" % dt.year elif lookup_type == 'quarter': month_in_quarter = dt.month - (dt.month - 1) % 3 return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter) elif lookup_type == 'month': return "%i-%02i-01 00:00:00" % (dt.year, dt.month) elif lookup_type == 'week': dt = dt - datetime.timedelta(days=dt.weekday()) return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'day': return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'hour': return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour) elif lookup_type == 'minute': return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) elif lookup_type == 'second': return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) def _sqlite_time_extract(lookup_type, dt): if dt is None: return None try: dt = backend_utils.typecast_time(dt) except (ValueError, TypeError): return None return getattr(dt, lookup_type) @none_guard def _sqlite_format_dtdelta(conn, lhs, rhs): """ LHS and RHS can be either: - An integer number of microseconds - A string representing a datetime """ try: real_lhs = datetime.timedelta(0, 0, lhs) if isinstance(lhs, int) else backend_utils.typecast_timestamp(lhs) real_rhs = datetime.timedelta(0, 0, rhs) if isinstance(rhs, int) else backend_utils.typecast_timestamp(rhs) if conn.strip() == '+': out = real_lhs + real_rhs else: out = real_lhs - real_rhs except (ValueError, TypeError): return None # typecast_timestamp returns a date or a datetime without timezone. # It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]" return str(out) @none_guard def _sqlite_time_diff(lhs, rhs): left = backend_utils.typecast_time(lhs) right = backend_utils.typecast_time(rhs) return ( (left.hour * 60 * 60 * 1000000) + (left.minute * 60 * 1000000) + (left.second * 1000000) + (left.microsecond) - (right.hour * 60 * 60 * 1000000) - (right.minute * 60 * 1000000) - (right.second * 1000000) - (right.microsecond) ) @none_guard def _sqlite_timestamp_diff(lhs, rhs): left = backend_utils.typecast_timestamp(lhs) right = backend_utils.typecast_timestamp(rhs) return duration_microseconds(left - right) @none_guard def _sqlite_regexp(re_pattern, re_string): return bool(re.search(re_pattern, str(re_string))) @none_guard def _sqlite_lpad(text, length, fill_text): if len(text) >= length: return text[:length] return (fill_text * length)[:length - len(text)] + text @none_guard def _sqlite_rpad(text, length, fill_text): return (text + fill_text * length)[:length]
6eefa9a8c40199e7b97e8f2670a16ff6205aa5513b51ef0be23d160162c48194
import datetime import decimal import uuid from functools import lru_cache from itertools import chain from django.conf import settings from django.core.exceptions import FieldError from django.db import DatabaseError, NotSupportedError, models from django.db.backends.base.operations import BaseDatabaseOperations from django.db.models.expressions import Col from django.utils import timezone from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.duration import duration_microseconds from django.utils.functional import cached_property class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'text' cast_data_types = { 'DateField': 'TEXT', 'DateTimeField': 'TEXT', } explain_prefix = 'EXPLAIN QUERY PLAN' def bulk_batch_size(self, fields, objs): """ SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of 999 variables per query. If there's only a single field to insert, the limit is 500 (SQLITE_MAX_COMPOUND_SELECT). """ if len(fields) == 1: return 500 elif len(fields) > 1: return self.connection.features.max_query_params // len(fields) else: return len(objs) def check_expression_support(self, expression): bad_fields = (models.DateField, models.DateTimeField, models.TimeField) bad_aggregates = (models.Sum, models.Avg, models.Variance, models.StdDev) if isinstance(expression, bad_aggregates): for expr in expression.get_source_expressions(): try: output_field = expr.output_field except (AttributeError, FieldError): # Not every subexpression has an output_field which is fine # to ignore. pass else: if isinstance(output_field, bad_fields): raise NotSupportedError( 'You cannot use Sum, Avg, StdDev, and Variance ' 'aggregations on date/time fields in sqlite3 ' 'since date/time is saved as text.' ) if isinstance(expression, models.Aggregate) and len(expression.source_expressions) > 1: raise NotSupportedError( "SQLite doesn't support DISTINCT on aggregate functions " "accepting multiple arguments." ) def date_extract_sql(self, lookup_type, field_name): """ Support EXTRACT with a user-defined function django_date_extract() that's registered in connect(). Use single quotes because this is a string and could otherwise cause a collision with a field name. """ return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name) def date_interval_sql(self, timedelta): return str(duration_microseconds(timedelta)) def format_for_duration_arithmetic(self, sql): """Do nothing since formatting is handled in the custom function.""" return sql def date_trunc_sql(self, lookup_type, field_name): return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name) def time_trunc_sql(self, lookup_type, field_name): return "django_time_trunc('%s', %s)" % (lookup_type.lower(), field_name) def _convert_tznames_to_sql(self, tzname): if settings.USE_TZ: return "'%s'" % tzname, "'%s'" % self.connection.timezone_name return 'NULL', 'NULL' def datetime_cast_date_sql(self, field_name, tzname): return 'django_datetime_cast_date(%s, %s, %s)' % ( field_name, *self._convert_tznames_to_sql(tzname), ) def datetime_cast_time_sql(self, field_name, tzname): return 'django_datetime_cast_time(%s, %s, %s)' % ( field_name, *self._convert_tznames_to_sql(tzname), ) def datetime_extract_sql(self, lookup_type, field_name, tzname): return "django_datetime_extract('%s', %s, %s, %s)" % ( lookup_type.lower(), field_name, *self._convert_tznames_to_sql(tzname), ) def datetime_trunc_sql(self, lookup_type, field_name, tzname): return "django_datetime_trunc('%s', %s, %s, %s)" % ( lookup_type.lower(), field_name, *self._convert_tznames_to_sql(tzname), ) def time_extract_sql(self, lookup_type, field_name): return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name) def pk_default_value(self): return "NULL" def _quote_params_for_last_executed_query(self, params): """ Only for last_executed_query! Don't use this to execute SQL queries! """ # This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the # number of parameters, default = 999) and SQLITE_MAX_COLUMN (the # number of return values, default = 2000). Since Python's sqlite3 # module doesn't expose the get_limit() C API, assume the default # limits are in effect and split the work in batches if needed. BATCH_SIZE = 999 if len(params) > BATCH_SIZE: results = () for index in range(0, len(params), BATCH_SIZE): chunk = params[index:index + BATCH_SIZE] results += self._quote_params_for_last_executed_query(chunk) return results sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params)) # Bypass Django's wrappers and use the underlying sqlite3 connection # to avoid logging this query - it would trigger infinite recursion. cursor = self.connection.connection.cursor() # Native sqlite3 cursors cannot be used as context managers. try: return cursor.execute(sql, params).fetchone() finally: cursor.close() def last_executed_query(self, cursor, sql, params): # Python substitutes parameters in Modules/_sqlite/cursor.c with: # pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars); # Unfortunately there is no way to reach self->statement from Python, # so we quote and substitute parameters manually. if params: if isinstance(params, (list, tuple)): params = self._quote_params_for_last_executed_query(params) else: values = tuple(params.values()) values = self._quote_params_for_last_executed_query(values) params = dict(zip(params, values)) return sql % params # For consistency with SQLiteCursorWrapper.execute(), just return sql # when there are no parameters. See #13648 and #17158. else: return sql def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def no_limit_value(self): return -1 def __references_graph(self, table_name): query = """ WITH tables AS ( SELECT %s name UNION SELECT sqlite_master.name FROM sqlite_master JOIN tables ON (sql REGEXP %s || tables.name || %s) ) SELECT name FROM tables; """ params = ( table_name, r'(?i)\s+references\s+("|\')?', r'("|\')?\s*\(', ) with self.connection.cursor() as cursor: results = cursor.execute(query, params) return [row[0] for row in results.fetchall()] @cached_property def _references_graph(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__references_graph) def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables and allow_cascade: # Simulate TRUNCATE CASCADE by recursively collecting the tables # referencing the tables to be flushed. tables = set(chain.from_iterable(self._references_graph(table) for table in tables)) # Note: No requirement for reset of auto-incremented indices (cf. other # sql_flush() implementations). Just return SQL at this point return ['%s %s %s;' % ( style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'), style.SQL_FIELD(self.quote_name(table)) ) for table in tables] def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.") return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): raise ValueError("SQLite backend does not support timezone-aware times.") return str(value) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'DateTimeField': converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'DecimalField': converters.append(self.get_decimalfield_converter(expression)) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) elif internal_type in ('NullBooleanField', 'BooleanField'): converters.append(self.convert_booleanfield_value) return converters def convert_datetimefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.datetime): value = parse_datetime(value) if settings.USE_TZ and not timezone.is_aware(value): value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.date): value = parse_date(value) return value def convert_timefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.time): value = parse_time(value) return value def get_decimalfield_converter(self, expression): # SQLite stores only 15 significant digits. Digits coming from # float inaccuracy must be removed. create_decimal = decimal.Context(prec=15).create_decimal_from_float if isinstance(expression, Col): quantize_value = decimal.Decimal(1).scaleb(-expression.output_field.decimal_places) def converter(value, expression, connection): if value is not None: return create_decimal(value).quantize(quantize_value, context=expression.output_field.context) else: def converter(value, expression, connection): if value is not None: return create_decimal(value) return converter def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def convert_booleanfield_value(self, value, expression, connection): return bool(value) if value in (1, 0) else value def bulk_insert_sql(self, fields, placeholder_rows): return " UNION ALL ".join( "SELECT %s" % ", ".join(row) for row in placeholder_rows ) def combine_expression(self, connector, sub_expressions): # SQLite doesn't have a ^ operator, so use the user-defined POWER # function that's registered in connect(). if connector == '^': return 'POWER(%s)' % ','.join(sub_expressions) return super().combine_expression(connector, sub_expressions) def combine_duration_expression(self, connector, sub_expressions): if connector not in ['+', '-']: raise DatabaseError('Invalid connector for timedelta: %s.' % connector) fn_params = ["'%s'" % connector] + sub_expressions if len(fn_params) > 3: raise ValueError('Too many params for timedelta operations.') return "django_format_dtdelta(%s)" % ', '.join(fn_params) def integer_field_range(self, internal_type): # SQLite doesn't enforce any integer constraints return (None, None) def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) if internal_type == 'TimeField': return 'django_time_diff(%s, %s)' % (lhs_sql, rhs_sql), params return 'django_timestamp_diff(%s, %s)' % (lhs_sql, rhs_sql), params def insert_statement(self, ignore_conflicts=False): return 'INSERT OR IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts)
c25061766665f3efac55e27a70d96129560fa85065508c93860ce03e74697f6b
import copy from decimal import Decimal from django.apps.registry import Apps from django.db import NotSupportedError from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.backends.ddl_references import Statement from django.db.backends.utils import strip_quotes from django.db.models import UniqueConstraint from django.db.transaction import atomic class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_delete_table = "DROP TABLE %(table)s" sql_create_fk = None sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED" sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)" sql_delete_unique = "DROP INDEX %(name)s" def __enter__(self): # Some SQLite schema alterations need foreign key constraints to be # disabled. Enforce it here for the duration of the schema edition. if not self.connection.disable_constraint_checking(): raise NotSupportedError( 'SQLite schema editor cannot be used while foreign key ' 'constraint checks are enabled. Make sure to disable them ' 'before entering a transaction.atomic() context because ' 'SQLite does not support disabling them in the middle of ' 'a multi-statement transaction.' ) return super().__enter__() def __exit__(self, exc_type, exc_value, traceback): self.connection.check_constraints() super().__exit__(exc_type, exc_value, traceback) self.connection.enable_constraint_checking() def quote_value(self, value): # The backend "mostly works" without this function and there are use # cases for compiling Python without the sqlite3 libraries (e.g. # security hardening). try: import sqlite3 value = sqlite3.adapt(value) except ImportError: pass except sqlite3.ProgrammingError: pass # Manual emulation of SQLite parameter quoting if isinstance(value, bool): return str(int(value)) elif isinstance(value, (Decimal, float, int)): return str(value) elif isinstance(value, str): return "'%s'" % value.replace("\'", "\'\'") elif value is None: return "NULL" elif isinstance(value, (bytes, bytearray, memoryview)): # Bytes are only allowed for BLOB fields, encoded as string # literals containing hexadecimal data and preceded by a single "X" # character. return "X'%s'" % value.hex() else: raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value))) def _is_referenced_by_fk_constraint(self, table_name, column_name=None, ignore_self=False): """ Return whether or not the provided table name is referenced by another one. If `column_name` is specified, only references pointing to that column are considered. If `ignore_self` is True, self-referential constraints are ignored. """ with self.connection.cursor() as cursor: for other_table in self.connection.introspection.get_table_list(cursor): if ignore_self and other_table.name == table_name: continue constraints = self.connection.introspection._get_foreign_key_constraints(cursor, other_table.name) for constraint in constraints.values(): constraint_table, constraint_column = constraint['foreign_key'] if (constraint_table == table_name and (column_name is None or constraint_column == column_name)): return True return False def alter_db_table(self, model, old_db_table, new_db_table, disable_constraints=True): if (not self.connection.features.supports_atomic_references_rename and disable_constraints and self._is_referenced_by_fk_constraint(old_db_table)): if self.connection.in_atomic_block: raise NotSupportedError(( 'Renaming the %r table while in a transaction is not ' 'supported on SQLite < 3.26 because it would break referential ' 'integrity. Try adding `atomic = False` to the Migration class.' ) % old_db_table) self.connection.enable_constraint_checking() super().alter_db_table(model, old_db_table, new_db_table) self.connection.disable_constraint_checking() else: super().alter_db_table(model, old_db_table, new_db_table) def alter_field(self, model, old_field, new_field, strict=False): old_field_name = old_field.name table_name = model._meta.db_table _, old_column_name = old_field.get_attname_column() if (new_field.name != old_field_name and not self.connection.features.supports_atomic_references_rename and self._is_referenced_by_fk_constraint(table_name, old_column_name, ignore_self=True)): if self.connection.in_atomic_block: raise NotSupportedError(( 'Renaming the %r.%r column while in a transaction is not ' 'supported on SQLite < 3.26 because it would break referential ' 'integrity. Try adding `atomic = False` to the Migration class.' ) % (model._meta.db_table, old_field_name)) with atomic(self.connection.alias): super().alter_field(model, old_field, new_field, strict=strict) # Follow SQLite's documented procedure for performing changes # that don't affect the on-disk content. # https://sqlite.org/lang_altertable.html#otheralter with self.connection.cursor() as cursor: schema_version = cursor.execute('PRAGMA schema_version').fetchone()[0] cursor.execute('PRAGMA writable_schema = 1') references_template = ' REFERENCES "%s" ("%%s") ' % table_name new_column_name = new_field.get_attname_column()[1] search = references_template % old_column_name replacement = references_template % new_column_name cursor.execute('UPDATE sqlite_master SET sql = replace(sql, %s, %s)', (search, replacement)) cursor.execute('PRAGMA schema_version = %d' % (schema_version + 1)) cursor.execute('PRAGMA writable_schema = 0') # The integrity check will raise an exception and rollback # the transaction if the sqlite_master updates corrupt the # database. cursor.execute('PRAGMA integrity_check') # Perform a VACUUM to refresh the database representation from # the sqlite_master table. with self.connection.cursor() as cursor: cursor.execute('VACUUM') else: super().alter_field(model, old_field, new_field, strict=strict) def _remake_table(self, model, create_field=None, delete_field=None, alter_field=None): """ Shortcut to transform a model from old_model into new_model This follows the correct procedure to perform non-rename or column addition operations based on SQLite's documentation https://www.sqlite.org/lang_altertable.html#caution The essential steps are: 1. Create a table with the updated definition called "new__app_model" 2. Copy the data from the existing "app_model" table to the new table 3. Drop the "app_model" table 4. Rename the "new__app_model" table to "app_model" 5. Restore any index of the previous "app_model" table. """ # Self-referential fields must be recreated rather than copied from # the old model to ensure their remote_field.field_name doesn't refer # to an altered field. def is_self_referential(f): return f.is_relation and f.remote_field.model is model # Work out the new fields dict / mapping body = { f.name: f.clone() if is_self_referential(f) else f for f in model._meta.local_concrete_fields } # Since mapping might mix column names and default values, # its values must be already quoted. mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields} # This maps field names (not columns) for things like unique_together rename_mapping = {} # If any of the new or altered fields is introducing a new PK, # remove the old one restore_pk_field = None if getattr(create_field, 'primary_key', False) or ( alter_field and getattr(alter_field[1], 'primary_key', False)): for name, field in list(body.items()): if field.primary_key: field.primary_key = False restore_pk_field = field if field.auto_created: del body[name] del mapping[field.column] # Add in any created fields if create_field: body[create_field.name] = create_field # Choose a default and insert it into the copy map if not create_field.many_to_many and create_field.concrete: mapping[create_field.column] = self.quote_value( self.effective_default(create_field) ) # Add in any altered fields if alter_field: old_field, new_field = alter_field body.pop(old_field.name, None) mapping.pop(old_field.column, None) body[new_field.name] = new_field if old_field.null and not new_field.null: case_sql = "coalesce(%(col)s, %(default)s)" % { 'col': self.quote_name(old_field.column), 'default': self.quote_value(self.effective_default(new_field)) } mapping[new_field.column] = case_sql else: mapping[new_field.column] = self.quote_name(old_field.column) rename_mapping[old_field.name] = new_field.name # Remove any deleted fields if delete_field: del body[delete_field.name] del mapping[delete_field.column] # Remove any implicit M2M tables if delete_field.many_to_many and delete_field.remote_field.through._meta.auto_created: return self.delete_model(delete_field.remote_field.through) # Work inside a new app registry apps = Apps() # Work out the new value of unique_together, taking renames into # account unique_together = [ [rename_mapping.get(n, n) for n in unique] for unique in model._meta.unique_together ] # Work out the new value for index_together, taking renames into # account index_together = [ [rename_mapping.get(n, n) for n in index] for index in model._meta.index_together ] indexes = model._meta.indexes if delete_field: indexes = [ index for index in indexes if delete_field.name not in index.fields ] constraints = list(model._meta.constraints) # Provide isolated instances of the fields to the new model body so # that the existing model's internals aren't interfered with when # the dummy model is constructed. body_copy = copy.deepcopy(body) # Construct a new model with the new fields to allow self referential # primary key to resolve to. This model won't ever be materialized as a # table and solely exists for foreign key reference resolution purposes. # This wouldn't be required if the schema editor was operating on model # states instead of rendered models. meta_contents = { 'app_label': model._meta.app_label, 'db_table': model._meta.db_table, 'unique_together': unique_together, 'index_together': index_together, 'indexes': indexes, 'constraints': constraints, 'apps': apps, } meta = type("Meta", (), meta_contents) body_copy['Meta'] = meta body_copy['__module__'] = model.__module__ type(model._meta.object_name, model.__bases__, body_copy) # Construct a model with a renamed table name. body_copy = copy.deepcopy(body) meta_contents = { 'app_label': model._meta.app_label, 'db_table': 'new__%s' % strip_quotes(model._meta.db_table), 'unique_together': unique_together, 'index_together': index_together, 'indexes': indexes, 'constraints': constraints, 'apps': apps, } meta = type("Meta", (), meta_contents) body_copy['Meta'] = meta body_copy['__module__'] = model.__module__ new_model = type('New%s' % model._meta.object_name, model.__bases__, body_copy) # Create a new table with the updated schema. self.create_model(new_model) # Copy data from the old table into the new table self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_model._meta.db_table), ', '.join(self.quote_name(x) for x in mapping), ', '.join(mapping.values()), self.quote_name(model._meta.db_table), )) # Delete the old table to make way for the new self.delete_model(model, handle_autom2m=False) # Rename the new table to take way for the old self.alter_db_table( new_model, new_model._meta.db_table, model._meta.db_table, disable_constraints=False, ) # Run deferred SQL on correct table for sql in self.deferred_sql: self.execute(sql) self.deferred_sql = [] # Fix any PK-removed field if restore_pk_field: restore_pk_field.primary_key = True def delete_model(self, model, handle_autom2m=True): if handle_autom2m: super().delete_model(model) else: # Delete the table (and only that) self.execute(self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), }) # Remove all deferred statements referencing the deleted table. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_table(model._meta.db_table): self.deferred_sql.remove(sql) def add_field(self, model, field): """ Create a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields). """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.create_model(field.remote_field.through) self._remake_table(model, create_field=field) def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # M2M fields are a special case if field.many_to_many: # For implicit M2M tables, delete the auto-created table if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # For explicit "through" M2M fields, do nothing # For everything else, remake. else: # It might not actually have a column behind it if field.db_parameters(connection=self.connection)['type'] is None: return self._remake_table(model, delete_field=field) def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): """Perform a "physical" (non-ManyToMany) field update.""" # Use "ALTER TABLE ... RENAME COLUMN" if only the column name # changed and there aren't any constraints. if (self.connection.features.can_alter_table_rename_column and old_field.column != new_field.column and self.column_sql(model, old_field) == self.column_sql(model, new_field) and not (old_field.remote_field and old_field.db_constraint or new_field.remote_field and new_field.db_constraint)): return self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type)) # Alter by remaking table self._remake_table(model, alter_field=(old_field, new_field)) # Rebuild tables with FKs pointing to this field if the PK type changed. if old_field.primary_key and new_field.primary_key and old_type != new_type: for rel in new_field.model._meta.related_objects: if not rel.many_to_many: self._remake_table(rel.related_model) def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table: # The field name didn't change, but some options did; we have to propagate this altering. self._remake_table( old_field.remote_field.through, alter_field=( # We need the field that points to the target model, so we can tell alter_field to change it - # this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model) old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()), ), ) return # Make a new through table self.create_model(new_field.remote_field.through) # Copy the data across self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_field.remote_field.through._meta.db_table), ', '.join([ "id", new_field.m2m_column_name(), new_field.m2m_reverse_name(), ]), ', '.join([ "id", old_field.m2m_column_name(), old_field.m2m_reverse_name(), ]), self.quote_name(old_field.remote_field.through._meta.db_table), )) # Delete the old through table self.delete_model(old_field.remote_field.through) def add_constraint(self, model, constraint): if isinstance(constraint, UniqueConstraint) and constraint.condition: super().add_constraint(model, constraint) else: self._remake_table(model) def remove_constraint(self, model, constraint): if isinstance(constraint, UniqueConstraint) and constraint.condition: super().remove_constraint(model, constraint) else: self._remake_table(model)
34aea7b435c69081b50bfc2740096d527187781535026fafcc955eed5df9006a
""" Interfaces for serializing Django objects. Usage:: from django.core import serializers json = serializers.serialize("json", some_queryset) objects = list(serializers.deserialize("json", json)) To add your own serializers, use the SERIALIZATION_MODULES setting:: SERIALIZATION_MODULES = { "csv": "path.to.csv.serializer", "txt": "path.to.txt.serializer", } """ import importlib from django.apps import apps from django.conf import settings from django.core.serializers.base import SerializerDoesNotExist # Built-in serializers BUILTIN_SERIALIZERS = { "xml": "django.core.serializers.xml_serializer", "python": "django.core.serializers.python", "json": "django.core.serializers.json", "yaml": "django.core.serializers.pyyaml", } _serializers = {} class BadSerializer: """ Stub serializer to hold exception raised during registration This allows the serializer registration to cache serializers and if there is an error raised in the process of creating a serializer it will be raised and passed along to the caller when the serializer is used. """ internal_use_only = False def __init__(self, exception): self.exception = exception def __call__(self, *args, **kwargs): raise self.exception def register_serializer(format, serializer_module, serializers=None): """Register a new serializer. ``serializer_module`` should be the fully qualified module name for the serializer. If ``serializers`` is provided, the registration will be added to the provided dictionary. If ``serializers`` is not provided, the registration will be made directly into the global register of serializers. Adding serializers directly is not a thread-safe operation. """ if serializers is None and not _serializers: _load_serializers() try: module = importlib.import_module(serializer_module) except ImportError as exc: bad_serializer = BadSerializer(exc) module = type('BadSerializerModule', (), { 'Deserializer': bad_serializer, 'Serializer': bad_serializer, }) if serializers is None: _serializers[format] = module else: serializers[format] = module def unregister_serializer(format): "Unregister a given serializer. This is not a thread-safe operation." if not _serializers: _load_serializers() if format not in _serializers: raise SerializerDoesNotExist(format) del _serializers[format] def get_serializer(format): if not _serializers: _load_serializers() if format not in _serializers: raise SerializerDoesNotExist(format) return _serializers[format].Serializer def get_serializer_formats(): if not _serializers: _load_serializers() return list(_serializers) def get_public_serializer_formats(): if not _serializers: _load_serializers() return [k for k, v in _serializers.items() if not v.Serializer.internal_use_only] def get_deserializer(format): if not _serializers: _load_serializers() if format not in _serializers: raise SerializerDoesNotExist(format) return _serializers[format].Deserializer def serialize(format, queryset, **options): """ Serialize a queryset (or any iterator that returns database objects) using a certain serializer. """ s = get_serializer(format)() s.serialize(queryset, **options) return s.getvalue() def deserialize(format, stream_or_string, **options): """ Deserialize a stream or a string. Return an iterator that yields ``(obj, m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* -- object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name : list_of_related_objects}``. """ d = get_deserializer(format) return d(stream_or_string, **options) def _load_serializers(): """ Register built-in and settings-defined serializers. This is done lazily so that user code has a chance to (e.g.) set up custom settings without needing to be careful of import order. """ global _serializers serializers = {} for format in BUILTIN_SERIALIZERS: register_serializer(format, BUILTIN_SERIALIZERS[format], serializers) if hasattr(settings, "SERIALIZATION_MODULES"): for format in settings.SERIALIZATION_MODULES: register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers) _serializers = serializers def sort_dependencies(app_list): """Sort a list of (app_config, models) pairs into a single list of models. The single list of models is sorted so that any model with a natural key is serialized before a normal model, and any model with a natural key dependency has it's dependencies serialized first. """ # Process the list of models, and get the list of dependencies model_dependencies = [] models = set() for app_config, model_list in app_list: if model_list is None: model_list = app_config.get_models() for model in model_list: models.add(model) # Add any explicitly defined dependencies if hasattr(model, 'natural_key'): deps = getattr(model.natural_key, 'dependencies', []) if deps: deps = [apps.get_model(dep) for dep in deps] else: deps = [] # Now add a dependency for any FK relation with a model that # defines a natural key for field in model._meta.fields: if field.remote_field: rel_model = field.remote_field.model if hasattr(rel_model, 'natural_key') and rel_model != model: deps.append(rel_model) # Also add a dependency for any simple M2M relation with a model # that defines a natural key. M2M relations with explicit through # models don't count as dependencies. for field in model._meta.many_to_many: if field.remote_field.through._meta.auto_created: rel_model = field.remote_field.model if hasattr(rel_model, 'natural_key') and rel_model != model: deps.append(rel_model) model_dependencies.append((model, deps)) model_dependencies.reverse() # Now sort the models to ensure that dependencies are met. This # is done by repeatedly iterating over the input list of models. # If all the dependencies of a given model are in the final list, # that model is promoted to the end of the final list. This process # continues until the input list is empty, or we do a full iteration # over the input models without promoting a model to the final list. # If we do a full iteration without a promotion, that means there are # circular dependencies in the list. model_list = [] while model_dependencies: skipped = [] changed = False while model_dependencies: model, deps = model_dependencies.pop() # If all of the models in the dependency list are either already # on the final model list, or not on the original serialization list, # then we've found another model with all it's dependencies satisfied. if all(d not in models or d in model_list for d in deps): model_list.append(model) changed = True else: skipped.append((model, deps)) if not changed: raise RuntimeError( "Can't resolve dependencies for %s in serialized app list." % ', '.join( model._meta.label for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__) ) ) model_dependencies = skipped return model_list
80c3ba94562aa978991d4d70dd677e9e6e47e9af7f6e0e171bdb1daf41a3a33a
from django.db.models import ( CharField, Expression, Field, FloatField, Func, Lookup, TextField, Value, ) from django.db.models.expressions import CombinedExpression from django.db.models.functions import Cast, Coalesce class SearchVectorExact(Lookup): lookup_name = 'exact' def process_rhs(self, qn, connection): if not hasattr(self.rhs, 'resolve_expression'): config = getattr(self.lhs, 'config', None) self.rhs = SearchQuery(self.rhs, config=config) rhs, rhs_params = super().process_rhs(qn, connection) return rhs, rhs_params def as_sql(self, qn, connection): lhs, lhs_params = self.process_lhs(qn, connection) rhs, rhs_params = self.process_rhs(qn, connection) params = lhs_params + rhs_params return '%s @@ %s = true' % (lhs, rhs), params class SearchVectorField(Field): def db_type(self, connection): return 'tsvector' class SearchQueryField(Field): def db_type(self, connection): return 'tsquery' class SearchConfig(Expression): def __init__(self, config): super().__init__() if not hasattr(config, 'resolve_expression'): config = Value(config) self.config = config def get_source_expressions(self): return [self.config] def set_source_expressions(self, exprs): self.config, = exprs def as_sql(self, compiler, connection): sql, params = compiler.compile(self.config) return '%s::regconfig' % sql, params class SearchVectorCombinable: ADD = '||' def _combine(self, other, connector, reversed): if not isinstance(other, SearchVectorCombinable) or not self.config == other.config: raise TypeError( 'SearchVector can only be combined with other SearchVector ' 'instances, got %s.' % type(other).__name__ ) if reversed: return CombinedSearchVector(other, connector, self, self.config) return CombinedSearchVector(self, connector, other, self.config) class SearchVector(SearchVectorCombinable, Func): function = 'to_tsvector' arg_joiner = " || ' ' || " output_field = SearchVectorField() config = None def __init__(self, *expressions, **extra): super().__init__(*expressions, **extra) config = self.extra.get('config', self.config) self.config = SearchConfig(config) if config else None weight = self.extra.get('weight') if weight is not None and not hasattr(weight, 'resolve_expression'): weight = Value(weight) self.weight = weight def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): resolved = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) if self.config: resolved.config = self.config.resolve_expression(query, allow_joins, reuse, summarize, for_save) return resolved def as_sql(self, compiler, connection, function=None, template=None): clone = self.copy() clone.set_source_expressions([ Coalesce( expression if isinstance(expression.output_field, (CharField, TextField)) else Cast(expression, TextField()), Value('') ) for expression in clone.get_source_expressions() ]) config_sql = None config_params = [] if template is None: if clone.config: config_sql, config_params = compiler.compile(clone.config) template = '%(function)s(%(config)s, %(expressions)s)' else: template = clone.template sql, params = super(SearchVector, clone).as_sql( compiler, connection, function=function, template=template, config=config_sql, ) extra_params = [] if clone.weight: weight_sql, extra_params = compiler.compile(clone.weight) sql = 'setweight({}, {})'.format(sql, weight_sql) return sql, config_params + params + extra_params class CombinedSearchVector(SearchVectorCombinable, CombinedExpression): def __init__(self, lhs, connector, rhs, config, output_field=None): self.config = config super().__init__(lhs, connector, rhs, output_field) class SearchQueryCombinable: BITAND = '&&' BITOR = '||' def _combine(self, other, connector, reversed): if not isinstance(other, SearchQueryCombinable): raise TypeError( 'SearchQuery can only be combined with other SearchQuery ' 'instances, got %s.' % type(other).__name__ ) if reversed: return CombinedSearchQuery(other, connector, self, self.config) return CombinedSearchQuery(self, connector, other, self.config) # On Combinable, these are not implemented to reduce confusion with Q. In # this case we are actually (ab)using them to do logical combination so # it's consistent with other usage in Django. def __or__(self, other): return self._combine(other, self.BITOR, False) def __ror__(self, other): return self._combine(other, self.BITOR, True) def __and__(self, other): return self._combine(other, self.BITAND, False) def __rand__(self, other): return self._combine(other, self.BITAND, True) class SearchQuery(SearchQueryCombinable, Value): output_field = SearchQueryField() SEARCH_TYPES = { 'plain': 'plainto_tsquery', 'phrase': 'phraseto_tsquery', 'raw': 'to_tsquery', 'websearch': 'websearch_to_tsquery', } def __init__(self, value, output_field=None, *, config=None, invert=False, search_type='plain'): self.config = SearchConfig(config) if config else None self.invert = invert if search_type not in self.SEARCH_TYPES: raise ValueError("Unknown search_type argument '%s'." % search_type) self.search_type = search_type super().__init__(value, output_field=output_field) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): resolved = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) if self.config: resolved.config = self.config.resolve_expression(query, allow_joins, reuse, summarize, for_save) return resolved def as_sql(self, compiler, connection): params = [self.value] function = self.SEARCH_TYPES[self.search_type] if self.config: config_sql, config_params = compiler.compile(self.config) template = '{}({}, %s)'.format(function, config_sql) params = config_params + [self.value] else: template = '{}(%s)'.format(function) if self.invert: template = '!!({})'.format(template) return template, params def _combine(self, other, connector, reversed): combined = super()._combine(other, connector, reversed) combined.output_field = SearchQueryField() return combined def __invert__(self): return type(self)(self.value, config=self.config, invert=not self.invert) def __str__(self): result = super().__str__() return ('~%s' % result) if self.invert else result class CombinedSearchQuery(SearchQueryCombinable, CombinedExpression): def __init__(self, lhs, connector, rhs, config, output_field=None): self.config = config super().__init__(lhs, connector, rhs, output_field) def __str__(self): return '(%s)' % super().__str__() class SearchRank(Func): function = 'ts_rank' output_field = FloatField() def __init__(self, vector, query, **extra): if not hasattr(vector, 'resolve_expression'): vector = SearchVector(vector) if not hasattr(query, 'resolve_expression'): query = SearchQuery(query) weights = extra.get('weights') if weights is not None and not hasattr(weights, 'resolve_expression'): weights = Value(weights) self.weights = weights super().__init__(vector, query, **extra) def as_sql(self, compiler, connection, function=None, template=None): extra_params = [] extra_context = {} if template is None and self.extra.get('weights'): if self.weights: template = '%(function)s(%(weights)s, %(expressions)s)' weight_sql, extra_params = compiler.compile(self.weights) extra_context['weights'] = weight_sql sql, params = super().as_sql( compiler, connection, function=function, template=template, **extra_context ) return sql, extra_params + params SearchVectorField.register_lookup(SearchVectorExact) class TrigramBase(Func): output_field = FloatField() def __init__(self, expression, string, **extra): if not hasattr(string, 'resolve_expression'): string = Value(string) super().__init__(expression, string, **extra) class TrigramSimilarity(TrigramBase): function = 'SIMILARITY' class TrigramDistance(TrigramBase): function = '' arg_joiner = ' <-> '
acecec76def0cacb5b5de5cdb2567dd38842d9133e3791af8460c8cd87174abe
from django.contrib.postgres.signals import ( get_citext_oids, get_hstore_oids, register_type_handlers, ) from django.db import NotSupportedError from django.db.migrations import AddIndex, RemoveIndex from django.db.migrations.operations.base import Operation class CreateExtension(Operation): reversible = True def __init__(self, name): self.name = name def state_forwards(self, app_label, state): pass def database_forwards(self, app_label, schema_editor, from_state, to_state): if schema_editor.connection.vendor != 'postgresql': return schema_editor.execute("CREATE EXTENSION IF NOT EXISTS %s" % schema_editor.quote_name(self.name)) # Clear cached, stale oids. get_hstore_oids.cache_clear() get_citext_oids.cache_clear() # Registering new type handlers cannot be done before the extension is # installed, otherwise a subsequent data migration would use the same # connection. register_type_handlers(schema_editor.connection) def database_backwards(self, app_label, schema_editor, from_state, to_state): schema_editor.execute("DROP EXTENSION %s" % schema_editor.quote_name(self.name)) # Clear cached, stale oids. get_hstore_oids.cache_clear() get_citext_oids.cache_clear() def describe(self): return "Creates extension %s" % self.name class BloomExtension(CreateExtension): def __init__(self): self.name = 'bloom' class BtreeGinExtension(CreateExtension): def __init__(self): self.name = 'btree_gin' class BtreeGistExtension(CreateExtension): def __init__(self): self.name = 'btree_gist' class CITextExtension(CreateExtension): def __init__(self): self.name = 'citext' class CryptoExtension(CreateExtension): def __init__(self): self.name = 'pgcrypto' class HStoreExtension(CreateExtension): def __init__(self): self.name = 'hstore' class TrigramExtension(CreateExtension): def __init__(self): self.name = 'pg_trgm' class UnaccentExtension(CreateExtension): def __init__(self): self.name = 'unaccent' class NotInTransactionMixin: def _ensure_not_in_transaction(self, schema_editor): if schema_editor.connection.in_atomic_block: raise NotSupportedError( 'The %s operation cannot be executed inside a transaction ' '(set atomic = False on the migration).' % self.__class__.__name__ ) class AddIndexConcurrently(NotInTransactionMixin, AddIndex): """Create an index using PostgreSQL's CREATE INDEX CONCURRENTLY syntax.""" atomic = False def describe(self): return 'Concurrently create index %s on field(s) %s of model %s' % ( self.index.name, ', '.join(self.index.fields), self.model_name, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): self._ensure_not_in_transaction(schema_editor) model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.add_index(model, self.index, concurrently=True) def database_backwards(self, app_label, schema_editor, from_state, to_state): self._ensure_not_in_transaction(schema_editor) model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.remove_index(model, self.index, concurrently=True) class RemoveIndexConcurrently(NotInTransactionMixin, RemoveIndex): """Remove an index using PostgreSQL's DROP INDEX CONCURRENTLY syntax.""" atomic = False def describe(self): return 'Concurrently remove index %s from %s' % (self.name, self.model_name) def database_forwards(self, app_label, schema_editor, from_state, to_state): self._ensure_not_in_transaction(schema_editor) model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): from_model_state = from_state.models[app_label, self.model_name_lower] index = from_model_state.get_index_by_name(self.name) schema_editor.remove_index(model, index, concurrently=True) def database_backwards(self, app_label, schema_editor, from_state, to_state): self._ensure_not_in_transaction(schema_editor) model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): to_model_state = to_state.models[app_label, self.model_name_lower] index = to_model_state.get_index_by_name(self.name) schema_editor.add_index(model, index, concurrently=True)
020915fd4792cbd43fb7f586e049ef40bdf815b17917711ea5e67ee98f24d354
from itertools import chain from types import MethodType from django.apps import apps from django.conf import settings from django.core import checks from .management import _get_builtin_permissions def check_user_model(app_configs=None, **kwargs): if app_configs is None: cls = apps.get_model(settings.AUTH_USER_MODEL) else: app_label, model_name = settings.AUTH_USER_MODEL.split('.') for app_config in app_configs: if app_config.label == app_label: cls = app_config.get_model(model_name) break else: # Checks might be run against a set of app configs that don't # include the specified user model. In this case we simply don't # perform the checks defined below. return [] errors = [] # Check that REQUIRED_FIELDS is a list if not isinstance(cls.REQUIRED_FIELDS, (list, tuple)): errors.append( checks.Error( "'REQUIRED_FIELDS' must be a list or tuple.", obj=cls, id='auth.E001', ) ) # Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS. if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS: errors.append( checks.Error( "The field named as the 'USERNAME_FIELD' " "for a custom user model must not be included in 'REQUIRED_FIELDS'.", obj=cls, id='auth.E002', ) ) # Check that the username field is unique if not cls._meta.get_field(cls.USERNAME_FIELD).unique: if (settings.AUTHENTICATION_BACKENDS == ['django.contrib.auth.backends.ModelBackend']): errors.append( checks.Error( "'%s.%s' must be unique because it is named as the 'USERNAME_FIELD'." % ( cls._meta.object_name, cls.USERNAME_FIELD ), obj=cls, id='auth.E003', ) ) else: errors.append( checks.Warning( "'%s.%s' is named as the 'USERNAME_FIELD', but it is not unique." % ( cls._meta.object_name, cls.USERNAME_FIELD ), hint='Ensure that your authentication backend(s) can handle non-unique usernames.', obj=cls, id='auth.W004', ) ) if isinstance(cls().is_anonymous, MethodType): errors.append( checks.Critical( '%s.is_anonymous must be an attribute or property rather than ' 'a method. Ignoring this is a security issue as anonymous ' 'users will be treated as authenticated!' % cls, obj=cls, id='auth.C009', ) ) if isinstance(cls().is_authenticated, MethodType): errors.append( checks.Critical( '%s.is_authenticated must be an attribute or property rather ' 'than a method. Ignoring this is a security issue as anonymous ' 'users will be treated as authenticated!' % cls, obj=cls, id='auth.C010', ) ) return errors def check_models_permissions(app_configs=None, **kwargs): if app_configs is None: models = apps.get_models() else: models = chain.from_iterable(app_config.get_models() for app_config in app_configs) Permission = apps.get_model('auth', 'Permission') permission_name_max_length = Permission._meta.get_field('name').max_length permission_codename_max_length = Permission._meta.get_field('codename').max_length errors = [] for model in models: opts = model._meta builtin_permissions = dict(_get_builtin_permissions(opts)) # Check builtin permission name length. max_builtin_permission_name_length = ( max(len(name) for name in builtin_permissions.values()) if builtin_permissions else 0 ) if max_builtin_permission_name_length > permission_name_max_length: verbose_name_max_length = ( permission_name_max_length - (max_builtin_permission_name_length - len(opts.verbose_name_raw)) ) errors.append( checks.Error( "The verbose_name of model '%s' must be at most %d " "characters for its builtin permission names to be at " "most %d characters." % ( opts.label, verbose_name_max_length, permission_name_max_length ), obj=model, id='auth.E007', ) ) # Check builtin permission codename length. max_builtin_permission_codename_length = ( max(len(codename) for codename in builtin_permissions.keys()) if builtin_permissions else 0 ) if max_builtin_permission_codename_length > permission_codename_max_length: model_name_max_length = permission_codename_max_length - ( max_builtin_permission_codename_length - len(opts.model_name) ) errors.append( checks.Error( "The name of model '%s' must be at most %d characters " "for its builtin permission codenames to be at most %d " "characters." % ( opts.label, model_name_max_length, permission_codename_max_length, ), obj=model, id='auth.E011', ) ) codenames = set() for codename, name in opts.permissions: # Check custom permission name length. if len(name) > permission_name_max_length: errors.append( checks.Error( "The permission named '%s' of model '%s' is longer " "than %d characters." % ( name, opts.label, permission_name_max_length, ), obj=model, id='auth.E008', ) ) # Check custom permission codename length. if len(codename) > permission_codename_max_length: errors.append( checks.Error( "The permission codenamed '%s' of model '%s' is " "longer than %d characters." % ( codename, opts.label, permission_codename_max_length, ), obj=model, id='auth.E012', ) ) # Check custom permissions codename clashing. if codename in builtin_permissions: errors.append( checks.Error( "The permission codenamed '%s' clashes with a builtin permission " "for model '%s'." % (codename, opts.label), obj=model, id='auth.E005', ) ) elif codename in codenames: errors.append( checks.Error( "The permission codenamed '%s' is duplicated for " "model '%s'." % (codename, opts.label), obj=model, id='auth.E006', ) ) codenames.add(codename) return errors
2b9c4655499bb362c0828fdac276936567fad987a09470afa3f81708ad94e5b8
import copy import json import operator import re from functools import partial, reduce, update_wrapper from urllib.parse import quote as urlquote from django import forms from django.conf import settings from django.contrib import messages from django.contrib.admin import helpers, widgets from django.contrib.admin.checks import ( BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks, ) from django.contrib.admin.exceptions import DisallowedModelAdminToField from django.contrib.admin.templatetags.admin_urls import add_preserved_filters from django.contrib.admin.utils import ( NestedObjects, construct_change_message, flatten_fieldsets, get_deleted_objects, lookup_needs_distinct, model_format_dict, model_ngettext, quote, unquote, ) from django.contrib.admin.views.autocomplete import AutocompleteJsonView from django.contrib.admin.widgets import ( AutocompleteSelect, AutocompleteSelectMultiple, ) from django.contrib.auth import get_permission_codename from django.core.exceptions import ( FieldDoesNotExist, FieldError, PermissionDenied, ValidationError, ) from django.core.paginator import Paginator from django.db import models, router, transaction from django.db.models.constants import LOOKUP_SEP from django.forms.formsets import DELETION_FIELD_NAME, all_valid from django.forms.models import ( BaseInlineFormSet, inlineformset_factory, modelform_defines_fields, modelform_factory, modelformset_factory, ) from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple from django.http import HttpResponseRedirect from django.http.response import HttpResponseBase from django.template.response import SimpleTemplateResponse, TemplateResponse from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.html import format_html from django.utils.http import urlencode from django.utils.safestring import mark_safe from django.utils.text import capfirst, format_lazy, get_text_list from django.utils.translation import gettext as _, ngettext from django.views.decorators.csrf import csrf_protect from django.views.generic import RedirectView IS_POPUP_VAR = '_popup' TO_FIELD_VAR = '_to_field' HORIZONTAL, VERTICAL = 1, 2 def get_content_type_for_model(obj): # Since this module gets imported in the application's root package, # it cannot import models from other applications at the module level. from django.contrib.contenttypes.models import ContentType return ContentType.objects.get_for_model(obj, for_concrete_model=False) def get_ul_class(radio_style): return 'radiolist' if radio_style == VERTICAL else 'radiolist inline' class IncorrectLookupParameters(Exception): pass # Defaults for formfield_overrides. ModelAdmin subclasses can change this # by adding to ModelAdmin.formfield_overrides. FORMFIELD_FOR_DBFIELD_DEFAULTS = { models.DateTimeField: { 'form_class': forms.SplitDateTimeField, 'widget': widgets.AdminSplitDateTime }, models.DateField: {'widget': widgets.AdminDateWidget}, models.TimeField: {'widget': widgets.AdminTimeWidget}, models.TextField: {'widget': widgets.AdminTextareaWidget}, models.URLField: {'widget': widgets.AdminURLFieldWidget}, models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget}, models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget}, models.CharField: {'widget': widgets.AdminTextInputWidget}, models.ImageField: {'widget': widgets.AdminFileWidget}, models.FileField: {'widget': widgets.AdminFileWidget}, models.EmailField: {'widget': widgets.AdminEmailInputWidget}, models.UUIDField: {'widget': widgets.AdminUUIDInputWidget}, } csrf_protect_m = method_decorator(csrf_protect) class BaseModelAdmin(metaclass=forms.MediaDefiningClass): """Functionality common to both ModelAdmin and InlineAdmin.""" autocomplete_fields = () raw_id_fields = () fields = None exclude = None fieldsets = None form = forms.ModelForm filter_vertical = () filter_horizontal = () radio_fields = {} prepopulated_fields = {} formfield_overrides = {} readonly_fields = () ordering = None sortable_by = None view_on_site = True show_full_result_count = True checks_class = BaseModelAdminChecks def check(self, **kwargs): return self.checks_class().check(self, **kwargs) def __init__(self): # Merge FORMFIELD_FOR_DBFIELD_DEFAULTS with the formfield_overrides # rather than simply overwriting. overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS) for k, v in self.formfield_overrides.items(): overrides.setdefault(k, {}).update(v) self.formfield_overrides = overrides def formfield_for_dbfield(self, db_field, request, **kwargs): """ Hook for specifying the form Field instance for a given database Field instance. If kwargs are given, they're passed to the form Field's constructor. """ # If the field specifies choices, we don't need to look for special # admin widgets - we just need to use a select widget of some kind. if db_field.choices: return self.formfield_for_choice_field(db_field, request, **kwargs) # ForeignKey or ManyToManyFields if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)): # Combine the field kwargs with any options for formfield_overrides. # Make sure the passed in **kwargs override anything in # formfield_overrides because **kwargs is more specific, and should # always win. if db_field.__class__ in self.formfield_overrides: kwargs = {**self.formfield_overrides[db_field.__class__], **kwargs} # Get the correct formfield. if isinstance(db_field, models.ForeignKey): formfield = self.formfield_for_foreignkey(db_field, request, **kwargs) elif isinstance(db_field, models.ManyToManyField): formfield = self.formfield_for_manytomany(db_field, request, **kwargs) # For non-raw_id fields, wrap the widget with a wrapper that adds # extra HTML -- the "add other" interface -- to the end of the # rendered output. formfield can be None if it came from a # OneToOneField with parent_link=True or a M2M intermediary. if formfield and db_field.name not in self.raw_id_fields: related_modeladmin = self.admin_site._registry.get(db_field.remote_field.model) wrapper_kwargs = {} if related_modeladmin: wrapper_kwargs.update( can_add_related=related_modeladmin.has_add_permission(request), can_change_related=related_modeladmin.has_change_permission(request), can_delete_related=related_modeladmin.has_delete_permission(request), can_view_related=related_modeladmin.has_view_permission(request), ) formfield.widget = widgets.RelatedFieldWidgetWrapper( formfield.widget, db_field.remote_field, self.admin_site, **wrapper_kwargs ) return formfield # If we've got overrides for the formfield defined, use 'em. **kwargs # passed to formfield_for_dbfield override the defaults. for klass in db_field.__class__.mro(): if klass in self.formfield_overrides: kwargs = {**copy.deepcopy(self.formfield_overrides[klass]), **kwargs} return db_field.formfield(**kwargs) # For any other type of field, just call its formfield() method. return db_field.formfield(**kwargs) def formfield_for_choice_field(self, db_field, request, **kwargs): """ Get a form Field for a database Field that has declared choices. """ # If the field is named as a radio_field, use a RadioSelect if db_field.name in self.radio_fields: # Avoid stomping on custom widget/choices arguments. if 'widget' not in kwargs: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) if 'choices' not in kwargs: kwargs['choices'] = db_field.get_choices( include_blank=db_field.blank, blank_choice=[('', _('None'))] ) return db_field.formfield(**kwargs) def get_field_queryset(self, db, db_field, request): """ If the ModelAdmin specifies ordering, the queryset should respect that ordering. Otherwise don't specify the queryset, let the field decide (return None in that case). """ related_admin = self.admin_site._registry.get(db_field.remote_field.model) if related_admin is not None: ordering = related_admin.get_ordering(request) if ordering is not None and ordering != (): return db_field.remote_field.model._default_manager.using(db).order_by(*ordering) return None def formfield_for_foreignkey(self, db_field, request, **kwargs): """ Get a form Field for a ForeignKey. """ db = kwargs.get('using') if 'widget' not in kwargs: if db_field.name in self.get_autocomplete_fields(request): kwargs['widget'] = AutocompleteSelect(db_field.remote_field, self.admin_site, using=db) elif db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.remote_field, self.admin_site, using=db) elif db_field.name in self.radio_fields: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) kwargs['empty_label'] = _('None') if db_field.blank else None if 'queryset' not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs['queryset'] = queryset return db_field.formfield(**kwargs) def formfield_for_manytomany(self, db_field, request, **kwargs): """ Get a form Field for a ManyToManyField. """ # If it uses an intermediary model that isn't auto created, don't show # a field in admin. if not db_field.remote_field.through._meta.auto_created: return None db = kwargs.get('using') autocomplete_fields = self.get_autocomplete_fields(request) if db_field.name in autocomplete_fields: kwargs['widget'] = AutocompleteSelectMultiple(db_field.remote_field, self.admin_site, using=db) elif db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.remote_field, self.admin_site, using=db) elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]: kwargs['widget'] = widgets.FilteredSelectMultiple( db_field.verbose_name, db_field.name in self.filter_vertical ) if 'queryset' not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs['queryset'] = queryset form_field = db_field.formfield(**kwargs) if (isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple))): msg = _('Hold down “Control”, or “Command” on a Mac, to select more than one.') help_text = form_field.help_text form_field.help_text = format_lazy('{} {}', help_text, msg) if help_text else msg return form_field def get_autocomplete_fields(self, request): """ Return a list of ForeignKey and/or ManyToMany fields which should use an autocomplete widget. """ return self.autocomplete_fields def get_view_on_site_url(self, obj=None): if obj is None or not self.view_on_site: return None if callable(self.view_on_site): return self.view_on_site(obj) elif self.view_on_site and hasattr(obj, 'get_absolute_url'): # use the ContentType lookup if view_on_site is True return reverse('admin:view_on_site', kwargs={ 'content_type_id': get_content_type_for_model(obj).pk, 'object_id': obj.pk }) def get_empty_value_display(self): """ Return the empty_value_display set on ModelAdmin or AdminSite. """ try: return mark_safe(self.empty_value_display) except AttributeError: return mark_safe(self.admin_site.empty_value_display) def get_exclude(self, request, obj=None): """ Hook for specifying exclude. """ return self.exclude def get_fields(self, request, obj=None): """ Hook for specifying fields. """ if self.fields: return self.fields # _get_form_for_get_fields() is implemented in subclasses. form = self._get_form_for_get_fields(request, obj) return [*form.base_fields, *self.get_readonly_fields(request, obj)] def get_fieldsets(self, request, obj=None): """ Hook for specifying fieldsets. """ if self.fieldsets: return self.fieldsets return [(None, {'fields': self.get_fields(request, obj)})] def get_inlines(self, request, obj): """Hook for specifying custom inlines.""" return self.inlines def get_ordering(self, request): """ Hook for specifying field ordering. """ return self.ordering or () # otherwise we might try to *None, which is bad ;) def get_readonly_fields(self, request, obj=None): """ Hook for specifying custom readonly fields. """ return self.readonly_fields def get_prepopulated_fields(self, request, obj=None): """ Hook for specifying custom prepopulated fields. """ return self.prepopulated_fields def get_queryset(self, request): """ Return a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view. """ qs = self.model._default_manager.get_queryset() # TODO: this should be handled by some parameter to the ChangeList. ordering = self.get_ordering(request) if ordering: qs = qs.order_by(*ordering) return qs def get_sortable_by(self, request): """Hook for specifying which fields can be sorted in the changelist.""" return self.sortable_by if self.sortable_by is not None else self.get_list_display(request) def lookup_allowed(self, lookup, value): from django.contrib.admin.filters import SimpleListFilter model = self.model # Check FKey lookups that are allowed, so that popups produced by # ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to, # are allowed to work. for fk_lookup in model._meta.related_fkey_lookups: # As ``limit_choices_to`` can be a callable, invoke it here. if callable(fk_lookup): fk_lookup = fk_lookup() if (lookup, value) in widgets.url_params_from_lookup_dict(fk_lookup).items(): return True relation_parts = [] prev_field = None for part in lookup.split(LOOKUP_SEP): try: field = model._meta.get_field(part) except FieldDoesNotExist: # Lookups on nonexistent fields are ok, since they're ignored # later. break # It is allowed to filter on values that would be found from local # model anyways. For example, if you filter on employee__department__id, # then the id value would be found already from employee__department_id. if not prev_field or (prev_field.is_relation and field not in prev_field.get_path_info()[-1].target_fields): relation_parts.append(part) if not getattr(field, 'get_path_info', None): # This is not a relational field, so further parts # must be transforms. break prev_field = field model = field.get_path_info()[-1].to_opts.model if len(relation_parts) <= 1: # Either a local field filter, or no fields at all. return True valid_lookups = {self.date_hierarchy} for filter_item in self.list_filter: if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter): valid_lookups.add(filter_item.parameter_name) elif isinstance(filter_item, (list, tuple)): valid_lookups.add(filter_item[0]) else: valid_lookups.add(filter_item) # Is it a valid relational lookup? return not { LOOKUP_SEP.join(relation_parts), LOOKUP_SEP.join(relation_parts + [part]) }.isdisjoint(valid_lookups) def to_field_allowed(self, request, to_field): """ Return True if the model associated with this admin should be allowed to be referenced by the specified field. """ opts = self.model._meta try: field = opts.get_field(to_field) except FieldDoesNotExist: return False # Always allow referencing the primary key since it's already possible # to get this information from the change view URL. if field.primary_key: return True # Allow reverse relationships to models defining m2m fields if they # target the specified field. for many_to_many in opts.many_to_many: if many_to_many.m2m_target_field_name() == to_field: return True # Make sure at least one of the models registered for this site # references this field through a FK or a M2M relationship. registered_models = set() for model, admin in self.admin_site._registry.items(): registered_models.add(model) for inline in admin.inlines: registered_models.add(inline.model) related_objects = ( f for f in opts.get_fields(include_hidden=True) if (f.auto_created and not f.concrete) ) for related_object in related_objects: related_model = related_object.related_model remote_field = related_object.field.remote_field if (any(issubclass(model, related_model) for model in registered_models) and hasattr(remote_field, 'get_related_field') and remote_field.get_related_field() == field): return True return False def has_add_permission(self, request): """ Return True if the given request has permission to add an object. Can be overridden by the user in subclasses. """ opts = self.opts codename = get_permission_codename('add', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_change_permission(self, request, obj=None): """ Return True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to change the `obj` model instance. If `obj` is None, this should return True if the given request has permission to change *any* object of the given type. """ opts = self.opts codename = get_permission_codename('change', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_delete_permission(self, request, obj=None): """ Return True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to delete the `obj` model instance. If `obj` is None, this should return True if the given request has permission to delete *any* object of the given type. """ opts = self.opts codename = get_permission_codename('delete', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_view_permission(self, request, obj=None): """ Return True if the given request has permission to view the given Django model instance. The default implementation doesn't examine the `obj` parameter. If overridden by the user in subclasses, it should return True if the given request has permission to view the `obj` model instance. If `obj` is None, it should return True if the request has permission to view any object of the given type. """ opts = self.opts codename_view = get_permission_codename('view', opts) codename_change = get_permission_codename('change', opts) return ( request.user.has_perm('%s.%s' % (opts.app_label, codename_view)) or request.user.has_perm('%s.%s' % (opts.app_label, codename_change)) ) def has_view_or_change_permission(self, request, obj=None): return self.has_view_permission(request, obj) or self.has_change_permission(request, obj) def has_module_permission(self, request): """ Return True if the given request has any permission in the given app label. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to view the module on the admin index page and access the module's index page. Overriding it does not restrict access to the add, change or delete views. Use `ModelAdmin.has_(add|change|delete)_permission` for that. """ return request.user.has_module_perms(self.opts.app_label) class ModelAdmin(BaseModelAdmin): """Encapsulate all admin options and functionality for a given model.""" list_display = ('__str__',) list_display_links = () list_filter = () list_select_related = False list_per_page = 100 list_max_show_all = 200 list_editable = () search_fields = () date_hierarchy = None save_as = False save_as_continue = True save_on_top = False paginator = Paginator preserve_filters = True inlines = [] # Custom templates (designed to be over-ridden in subclasses) add_form_template = None change_form_template = None change_list_template = None delete_confirmation_template = None delete_selected_confirmation_template = None object_history_template = None popup_response_template = None # Actions actions = [] action_form = helpers.ActionForm actions_on_top = True actions_on_bottom = False actions_selection_counter = True checks_class = ModelAdminChecks def __init__(self, model, admin_site): self.model = model self.opts = model._meta self.admin_site = admin_site super().__init__() def __str__(self): return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__) def get_inline_instances(self, request, obj=None): inline_instances = [] for inline_class in self.get_inlines(request, obj): inline = inline_class(self.model, self.admin_site) if request: if not (inline.has_view_or_change_permission(request, obj) or inline.has_add_permission(request, obj) or inline.has_delete_permission(request, obj)): continue if not inline.has_add_permission(request, obj): inline.max_num = 0 inline_instances.append(inline) return inline_instances def get_urls(self): from django.urls import path def wrap(view): def wrapper(*args, **kwargs): return self.admin_site.admin_view(view)(*args, **kwargs) wrapper.model_admin = self return update_wrapper(wrapper, view) info = self.model._meta.app_label, self.model._meta.model_name return [ path('', wrap(self.changelist_view), name='%s_%s_changelist' % info), path('add/', wrap(self.add_view), name='%s_%s_add' % info), path('autocomplete/', wrap(self.autocomplete_view), name='%s_%s_autocomplete' % info), path('<path:object_id>/history/', wrap(self.history_view), name='%s_%s_history' % info), path('<path:object_id>/delete/', wrap(self.delete_view), name='%s_%s_delete' % info), path('<path:object_id>/change/', wrap(self.change_view), name='%s_%s_change' % info), # For backwards compatibility (was the change url before 1.9) path('<path:object_id>/', wrap(RedirectView.as_view( pattern_name='%s:%s_%s_change' % ((self.admin_site.name,) + info) ))), ] @property def urls(self): return self.get_urls() @property def media(self): extra = '' if settings.DEBUG else '.min' js = [ 'vendor/jquery/jquery%s.js' % extra, 'jquery.init.js', 'core.js', 'admin/RelatedObjectLookups.js', 'actions%s.js' % extra, 'urlify.js', 'prepopulate%s.js' % extra, 'vendor/xregexp/xregexp%s.js' % extra, ] return forms.Media(js=['admin/js/%s' % url for url in js]) def get_model_perms(self, request): """ Return a dict of all perms for this model. This dict has the keys ``add``, ``change``, ``delete``, and ``view`` mapping to the True/False for each of those actions. """ return { 'add': self.has_add_permission(request), 'change': self.has_change_permission(request), 'delete': self.has_delete_permission(request), 'view': self.has_view_permission(request), } def _get_form_for_get_fields(self, request, obj): return self.get_form(request, obj, fields=None) def get_form(self, request, obj=None, change=False, **kwargs): """ Return a Form class for use in the admin add view. This is used by add_view and change_view. """ if 'fields' in kwargs: fields = kwargs.pop('fields') else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) excluded = self.get_exclude(request, obj) exclude = [] if excluded is None else list(excluded) readonly_fields = self.get_readonly_fields(request, obj) exclude.extend(readonly_fields) # Exclude all fields if it's a change form and the user doesn't have # the change permission. if change and hasattr(request, 'user') and not self.has_change_permission(request, obj): exclude.extend(fields) if excluded is None and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # ModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # if exclude is an empty list we pass None to be consistent with the # default on modelform_factory exclude = exclude or None # Remove declared form fields which are in readonly_fields. new_attrs = dict.fromkeys(f for f in readonly_fields if f in self.form.declared_fields) form = type(self.form.__name__, (self.form,), new_attrs) defaults = { 'form': form, 'fields': fields, 'exclude': exclude, 'formfield_callback': partial(self.formfield_for_dbfield, request=request), **kwargs, } if defaults['fields'] is None and not modelform_defines_fields(defaults['form']): defaults['fields'] = forms.ALL_FIELDS try: return modelform_factory(self.model, **defaults) except FieldError as e: raise FieldError( '%s. Check fields/fieldsets/exclude attributes of class %s.' % (e, self.__class__.__name__) ) def get_changelist(self, request, **kwargs): """ Return the ChangeList class for use on the changelist page. """ from django.contrib.admin.views.main import ChangeList return ChangeList def get_changelist_instance(self, request): """ Return a `ChangeList` instance based on `request`. May raise `IncorrectLookupParameters`. """ list_display = self.get_list_display(request) list_display_links = self.get_list_display_links(request, list_display) # Add the action checkboxes if any actions are available. if self.get_actions(request): list_display = ['action_checkbox', *list_display] sortable_by = self.get_sortable_by(request) ChangeList = self.get_changelist(request) return ChangeList( request, self.model, list_display, list_display_links, self.get_list_filter(request), self.date_hierarchy, self.get_search_fields(request), self.get_list_select_related(request), self.list_per_page, self.list_max_show_all, self.list_editable, self, sortable_by, ) def get_object(self, request, object_id, from_field=None): """ Return an instance matching the field and value provided, the primary key is used if no field is provided. Return ``None`` if no match is found or the object_id fails validation. """ queryset = self.get_queryset(request) model = queryset.model field = model._meta.pk if from_field is None else model._meta.get_field(from_field) try: object_id = field.to_python(object_id) return queryset.get(**{field.name: object_id}) except (model.DoesNotExist, ValidationError, ValueError): return None def get_changelist_form(self, request, **kwargs): """ Return a Form class for use in the Formset on the changelist page. """ defaults = { 'formfield_callback': partial(self.formfield_for_dbfield, request=request), **kwargs, } if defaults.get('fields') is None and not modelform_defines_fields(defaults.get('form')): defaults['fields'] = forms.ALL_FIELDS return modelform_factory(self.model, **defaults) def get_changelist_formset(self, request, **kwargs): """ Return a FormSet class for use on the changelist page if list_editable is used. """ defaults = { 'formfield_callback': partial(self.formfield_for_dbfield, request=request), **kwargs, } return modelformset_factory( self.model, self.get_changelist_form(request), extra=0, fields=self.list_editable, **defaults ) def get_formsets_with_inlines(self, request, obj=None): """ Yield formsets and the corresponding inlines. """ for inline in self.get_inline_instances(request, obj): yield inline.get_formset(request, obj), inline def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True): return self.paginator(queryset, per_page, orphans, allow_empty_first_page) def log_addition(self, request, object, message): """ Log that an object has been successfully added. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, ADDITION return LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(object).pk, object_id=object.pk, object_repr=str(object), action_flag=ADDITION, change_message=message, ) def log_change(self, request, object, message): """ Log that an object has been successfully changed. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, CHANGE return LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(object).pk, object_id=object.pk, object_repr=str(object), action_flag=CHANGE, change_message=message, ) def log_deletion(self, request, object, object_repr): """ Log that an object will be deleted. Note that this method must be called before the deletion. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, DELETION return LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(object).pk, object_id=object.pk, object_repr=object_repr, action_flag=DELETION, ) def action_checkbox(self, obj): """ A list_display column containing a checkbox widget. """ return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, str(obj.pk)) action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle">') def _get_base_actions(self): """Return the list of actions, prior to any request-based filtering.""" actions = [] # Gather actions from the admin site first for (name, func) in self.admin_site.actions: description = getattr(func, 'short_description', name.replace('_', ' ')) actions.append((func, name, description)) # Add actions from this ModelAdmin. actions.extend(self.get_action(action) for action in self.actions or []) # get_action might have returned None, so filter any of those out. return filter(None, actions) def _filter_actions_by_permissions(self, request, actions): """Filter out any actions that the user doesn't have access to.""" filtered_actions = [] for action in actions: callable = action[0] if not hasattr(callable, 'allowed_permissions'): filtered_actions.append(action) continue permission_checks = ( getattr(self, 'has_%s_permission' % permission) for permission in callable.allowed_permissions ) if any(has_permission(request) for has_permission in permission_checks): filtered_actions.append(action) return filtered_actions def get_actions(self, request): """ Return a dictionary mapping the names of all actions for this ModelAdmin to a tuple of (callable, name, description) for each action. """ # If self.actions is set to None that means actions are disabled on # this page. if self.actions is None or IS_POPUP_VAR in request.GET: return {} actions = self._filter_actions_by_permissions(request, self._get_base_actions()) return {name: (func, name, desc) for func, name, desc in actions} def get_action_choices(self, request, default_choices=models.BLANK_CHOICE_DASH): """ Return a list of choices for use in a form object. Each choice is a tuple (name, description). """ choices = [] + default_choices for func, name, description in self.get_actions(request).values(): choice = (name, description % model_format_dict(self.opts)) choices.append(choice) return choices def get_action(self, action): """ Return a given action from a parameter, which can either be a callable, or the name of a method on the ModelAdmin. Return is a tuple of (callable, name, description). """ # If the action is a callable, just use it. if callable(action): func = action action = action.__name__ # Next, look for a method. Grab it off self.__class__ to get an unbound # method instead of a bound one; this ensures that the calling # conventions are the same for functions and methods. elif hasattr(self.__class__, action): func = getattr(self.__class__, action) # Finally, look for a named method on the admin site else: try: func = self.admin_site.get_action(action) except KeyError: return None if hasattr(func, 'short_description'): description = func.short_description else: description = capfirst(action.replace('_', ' ')) return func, action, description def get_list_display(self, request): """ Return a sequence containing the fields to be displayed on the changelist. """ return self.list_display def get_list_display_links(self, request, list_display): """ Return a sequence containing the fields to be displayed as links on the changelist. The list_display parameter is the list of fields returned by get_list_display(). """ if self.list_display_links or self.list_display_links is None or not list_display: return self.list_display_links else: # Use only the first item in list_display as link return list(list_display)[:1] def get_list_filter(self, request): """ Return a sequence containing the fields to be displayed as filters in the right sidebar of the changelist page. """ return self.list_filter def get_list_select_related(self, request): """ Return a list of fields to add to the select_related() part of the changelist items query. """ return self.list_select_related def get_search_fields(self, request): """ Return a sequence containing the fields to be searched whenever somebody submits a search query. """ return self.search_fields def get_search_results(self, request, queryset, search_term): """ Return a tuple containing a queryset to implement the search and a boolean indicating if the results may contain duplicates. """ # Apply keyword searches. def construct_search(field_name): if field_name.startswith('^'): return "%s__istartswith" % field_name[1:] elif field_name.startswith('='): return "%s__iexact" % field_name[1:] elif field_name.startswith('@'): return "%s__search" % field_name[1:] # Use field_name if it includes a lookup. opts = queryset.model._meta lookup_fields = field_name.split(LOOKUP_SEP) # Go through the fields, following all relations. prev_field = None for path_part in lookup_fields: if path_part == 'pk': path_part = opts.pk.name try: field = opts.get_field(path_part) except FieldDoesNotExist: # Use valid query lookups. if prev_field and prev_field.get_lookup(path_part): return field_name else: prev_field = field if hasattr(field, 'get_path_info'): # Update opts to follow the relation. opts = field.get_path_info()[-1].to_opts # Otherwise, use the field with icontains. return "%s__icontains" % field_name use_distinct = False search_fields = self.get_search_fields(request) if search_fields and search_term: orm_lookups = [construct_search(str(search_field)) for search_field in search_fields] for bit in search_term.split(): or_queries = [models.Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups] queryset = queryset.filter(reduce(operator.or_, or_queries)) use_distinct |= any(lookup_needs_distinct(self.opts, search_spec) for search_spec in orm_lookups) return queryset, use_distinct def get_preserved_filters(self, request): """ Return the preserved filters querystring. """ match = request.resolver_match if self.preserve_filters and match: opts = self.model._meta current_url = '%s:%s' % (match.app_name, match.url_name) changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name) if current_url == changelist_url: preserved_filters = request.GET.urlencode() else: preserved_filters = request.GET.get('_changelist_filters') if preserved_filters: return urlencode({'_changelist_filters': preserved_filters}) return '' def construct_change_message(self, request, form, formsets, add=False): """ Construct a JSON structure describing changes from a changed object. """ return construct_change_message(form, formsets, add) def message_user(self, request, message, level=messages.INFO, extra_tags='', fail_silently=False): """ Send a message to the user. The default implementation posts a message using the django.contrib.messages backend. Exposes almost the same API as messages.add_message(), but accepts the positional arguments in a different order to maintain backwards compatibility. For convenience, it accepts the `level` argument as a string rather than the usual level number. """ if not isinstance(level, int): # attempt to get the level if passed a string try: level = getattr(messages.constants, level.upper()) except AttributeError: levels = messages.constants.DEFAULT_TAGS.values() levels_repr = ', '.join('`%s`' % l for l in levels) raise ValueError( 'Bad message level string: `%s`. Possible values are: %s' % (level, levels_repr) ) messages.add_message(request, level, message, extra_tags=extra_tags, fail_silently=fail_silently) def save_form(self, request, form, change): """ Given a ModelForm return an unsaved instance. ``change`` is True if the object is being changed, and False if it's being added. """ return form.save(commit=False) def save_model(self, request, obj, form, change): """ Given a model instance save it to the database. """ obj.save() def delete_model(self, request, obj): """ Given a model instance delete it from the database. """ obj.delete() def delete_queryset(self, request, queryset): """Given a queryset, delete it from the database.""" queryset.delete() def save_formset(self, request, form, formset, change): """ Given an inline formset save it to the database. """ formset.save() def save_related(self, request, form, formsets, change): """ Given the ``HttpRequest``, the parent ``ModelForm`` instance, the list of inline formsets and a boolean value based on whether the parent is being added or changed, save the related objects to the database. Note that at this point save_form() and save_model() have already been called. """ form.save_m2m() for formset in formsets: self.save_formset(request, form, formset, change=change) def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None): opts = self.model._meta app_label = opts.app_label preserved_filters = self.get_preserved_filters(request) form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url) view_on_site_url = self.get_view_on_site_url(obj) has_editable_inline_admin_formsets = False for inline in context['inline_admin_formsets']: if inline.has_add_permission or inline.has_change_permission or inline.has_delete_permission: has_editable_inline_admin_formsets = True break context.update({ 'add': add, 'change': change, 'has_view_permission': self.has_view_permission(request, obj), 'has_add_permission': self.has_add_permission(request), 'has_change_permission': self.has_change_permission(request, obj), 'has_delete_permission': self.has_delete_permission(request, obj), 'has_editable_inline_admin_formsets': has_editable_inline_admin_formsets, 'has_file_field': context['adminform'].form.is_multipart() or any( admin_formset.formset.is_multipart() for admin_formset in context['inline_admin_formsets'] ), 'has_absolute_url': view_on_site_url is not None, 'absolute_url': view_on_site_url, 'form_url': form_url, 'opts': opts, 'content_type_id': get_content_type_for_model(self.model).pk, 'save_as': self.save_as, 'save_on_top': self.save_on_top, 'to_field_var': TO_FIELD_VAR, 'is_popup_var': IS_POPUP_VAR, 'app_label': app_label, }) if add and self.add_form_template is not None: form_template = self.add_form_template else: form_template = self.change_form_template request.current_app = self.admin_site.name return TemplateResponse(request, form_template or [ "admin/%s/%s/change_form.html" % (app_label, opts.model_name), "admin/%s/change_form.html" % app_label, "admin/change_form.html" ], context) def response_add(self, request, obj, post_url_continue=None): """ Determine the HttpResponse for the add_view stage. """ opts = obj._meta preserved_filters = self.get_preserved_filters(request) obj_url = reverse( 'admin:%s_%s_change' % (opts.app_label, opts.model_name), args=(quote(obj.pk),), current_app=self.admin_site.name, ) # Add a link to the object's change form if the user can edit the obj. if self.has_change_permission(request, obj): obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj) else: obj_repr = str(obj) msg_dict = { 'name': opts.verbose_name, 'obj': obj_repr, } # Here, we distinguish between different save types by checking for # the presence of keys in request.POST. if IS_POPUP_VAR in request.POST: to_field = request.POST.get(TO_FIELD_VAR) if to_field: attr = str(to_field) else: attr = obj._meta.pk.attname value = obj.serializable_value(attr) popup_response_data = json.dumps({ 'value': str(value), 'obj': str(obj), }) return TemplateResponse(request, self.popup_response_template or [ 'admin/%s/%s/popup_response.html' % (opts.app_label, opts.model_name), 'admin/%s/popup_response.html' % opts.app_label, 'admin/popup_response.html', ], { 'popup_response_data': popup_response_data, }) elif "_continue" in request.POST or ( # Redirecting after "Save as new". "_saveasnew" in request.POST and self.save_as_continue and self.has_change_permission(request, obj) ): msg = _('The {name} “{obj}” was added successfully.') if self.has_change_permission(request, obj): msg += ' ' + _('You may edit it again below.') self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS) if post_url_continue is None: post_url_continue = obj_url post_url_continue = add_preserved_filters( {'preserved_filters': preserved_filters, 'opts': opts}, post_url_continue ) return HttpResponseRedirect(post_url_continue) elif "_addanother" in request.POST: msg = format_html( _('The {name} “{obj}” was added successfully. You may add another {name} below.'), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) else: msg = format_html( _('The {name} “{obj}” was added successfully.'), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_add(request, obj) def response_change(self, request, obj): """ Determine the HttpResponse for the change_view stage. """ if IS_POPUP_VAR in request.POST: opts = obj._meta to_field = request.POST.get(TO_FIELD_VAR) attr = str(to_field) if to_field else opts.pk.attname value = request.resolver_match.kwargs['object_id'] new_value = obj.serializable_value(attr) popup_response_data = json.dumps({ 'action': 'change', 'value': str(value), 'obj': str(obj), 'new_value': str(new_value), }) return TemplateResponse(request, self.popup_response_template or [ 'admin/%s/%s/popup_response.html' % (opts.app_label, opts.model_name), 'admin/%s/popup_response.html' % opts.app_label, 'admin/popup_response.html', ], { 'popup_response_data': popup_response_data, }) opts = self.model._meta preserved_filters = self.get_preserved_filters(request) msg_dict = { 'name': opts.verbose_name, 'obj': format_html('<a href="{}">{}</a>', urlquote(request.path), obj), } if "_continue" in request.POST: msg = format_html( _('The {name} “{obj}” was changed successfully. You may edit it again below.'), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) elif "_saveasnew" in request.POST: msg = format_html( _('The {name} “{obj}” was added successfully. You may edit it again below.'), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) redirect_url = reverse('admin:%s_%s_change' % (opts.app_label, opts.model_name), args=(obj.pk,), current_app=self.admin_site.name) redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) elif "_addanother" in request.POST: msg = format_html( _('The {name} “{obj}” was changed successfully. You may add another {name} below.'), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) redirect_url = reverse('admin:%s_%s_add' % (opts.app_label, opts.model_name), current_app=self.admin_site.name) redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) else: msg = format_html( _('The {name} “{obj}” was changed successfully.'), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_change(request, obj) def _response_post_save(self, request, obj): opts = self.model._meta if self.has_view_or_change_permission(request): post_url = reverse('admin:%s_%s_changelist' % (opts.app_label, opts.model_name), current_app=self.admin_site.name) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url) else: post_url = reverse('admin:index', current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def response_post_save_add(self, request, obj): """ Figure out where to redirect after the 'Save' button has been pressed when adding a new object. """ return self._response_post_save(request, obj) def response_post_save_change(self, request, obj): """ Figure out where to redirect after the 'Save' button has been pressed when editing an existing object. """ return self._response_post_save(request, obj) def response_action(self, request, queryset): """ Handle an admin action. This is called if a request is POSTed to the changelist; it returns an HttpResponse if the action was handled, and None otherwise. """ # There can be multiple action forms on the page (at the top # and bottom of the change list, for example). Get the action # whose button was pushed. try: action_index = int(request.POST.get('index', 0)) except ValueError: action_index = 0 # Construct the action form. data = request.POST.copy() data.pop(helpers.ACTION_CHECKBOX_NAME, None) data.pop("index", None) # Use the action whose button was pushed try: data.update({'action': data.getlist('action')[action_index]}) except IndexError: # If we didn't get an action from the chosen form that's invalid # POST data, so by deleting action it'll fail the validation check # below. So no need to do anything here pass action_form = self.action_form(data, auto_id=None) action_form.fields['action'].choices = self.get_action_choices(request) # If the form's valid we can handle the action. if action_form.is_valid(): action = action_form.cleaned_data['action'] select_across = action_form.cleaned_data['select_across'] func = self.get_actions(request)[action][0] # Get the list of selected PKs. If nothing's selected, we can't # perform an action on it, so bail. Except we want to perform # the action explicitly on all objects. selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) if not selected and not select_across: # Reminder that something needs to be selected or nothing will happen msg = _("Items must be selected in order to perform " "actions on them. No items have been changed.") self.message_user(request, msg, messages.WARNING) return None if not select_across: # Perform the action only on the selected objects queryset = queryset.filter(pk__in=selected) response = func(self, request, queryset) # Actions may return an HttpResponse-like object, which will be # used as the response from the POST. If not, we'll be a good # little HTTP citizen and redirect back to the changelist page. if isinstance(response, HttpResponseBase): return response else: return HttpResponseRedirect(request.get_full_path()) else: msg = _("No action selected.") self.message_user(request, msg, messages.WARNING) return None def response_delete(self, request, obj_display, obj_id): """ Determine the HttpResponse for the delete_view stage. """ opts = self.model._meta if IS_POPUP_VAR in request.POST: popup_response_data = json.dumps({ 'action': 'delete', 'value': str(obj_id), }) return TemplateResponse(request, self.popup_response_template or [ 'admin/%s/%s/popup_response.html' % (opts.app_label, opts.model_name), 'admin/%s/popup_response.html' % opts.app_label, 'admin/popup_response.html', ], { 'popup_response_data': popup_response_data, }) self.message_user( request, _('The %(name)s “%(obj)s” was deleted successfully.') % { 'name': opts.verbose_name, 'obj': obj_display, }, messages.SUCCESS, ) if self.has_change_permission(request, None): post_url = reverse( 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name), current_app=self.admin_site.name, ) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters( {'preserved_filters': preserved_filters, 'opts': opts}, post_url ) else: post_url = reverse('admin:index', current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def render_delete_form(self, request, context): opts = self.model._meta app_label = opts.app_label request.current_app = self.admin_site.name context.update( to_field_var=TO_FIELD_VAR, is_popup_var=IS_POPUP_VAR, media=self.media, ) return TemplateResponse( request, self.delete_confirmation_template or [ "admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name), "admin/{}/delete_confirmation.html".format(app_label), "admin/delete_confirmation.html", ], context, ) def get_inline_formsets(self, request, formsets, inline_instances, obj=None): # Edit permissions on parent model are required for editable inlines. can_edit_parent = self.has_change_permission(request, obj) if obj else self.has_add_permission(request) inline_admin_formsets = [] for inline, formset in zip(inline_instances, formsets): fieldsets = list(inline.get_fieldsets(request, obj)) readonly = list(inline.get_readonly_fields(request, obj)) if can_edit_parent: has_add_permission = inline.has_add_permission(request, obj) has_change_permission = inline.has_change_permission(request, obj) has_delete_permission = inline.has_delete_permission(request, obj) else: # Disable all edit-permissions, and overide formset settings. has_add_permission = has_change_permission = has_delete_permission = False formset.extra = formset.max_num = 0 has_view_permission = inline.has_view_permission(request, obj) prepopulated = dict(inline.get_prepopulated_fields(request, obj)) inline_admin_formset = helpers.InlineAdminFormSet( inline, formset, fieldsets, prepopulated, readonly, model_admin=self, has_add_permission=has_add_permission, has_change_permission=has_change_permission, has_delete_permission=has_delete_permission, has_view_permission=has_view_permission, ) inline_admin_formsets.append(inline_admin_formset) return inline_admin_formsets def get_changeform_initial_data(self, request): """ Get the initial form data from the request's GET params. """ initial = dict(request.GET.items()) for k in initial: try: f = self.model._meta.get_field(k) except FieldDoesNotExist: continue # We have to special-case M2Ms as a list of comma-separated PKs. if isinstance(f, models.ManyToManyField): initial[k] = initial[k].split(",") return initial def _get_obj_does_not_exist_redirect(self, request, opts, object_id): """ Create a message informing the user that the object doesn't exist and return a redirect to the admin index page. """ msg = _('%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?') % { 'name': opts.verbose_name, 'key': unquote(object_id), } self.message_user(request, msg, messages.WARNING) url = reverse('admin:index', current_app=self.admin_site.name) return HttpResponseRedirect(url) @csrf_protect_m def changeform_view(self, request, object_id=None, form_url='', extra_context=None): with transaction.atomic(using=router.db_for_write(self.model)): return self._changeform_view(request, object_id, form_url, extra_context) def _changeform_view(self, request, object_id, form_url, extra_context): to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR)) if to_field and not self.to_field_allowed(request, to_field): raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field) model = self.model opts = model._meta if request.method == 'POST' and '_saveasnew' in request.POST: object_id = None add = object_id is None if add: if not self.has_add_permission(request): raise PermissionDenied obj = None else: obj = self.get_object(request, unquote(object_id), to_field) if request.method == 'POST': if not self.has_change_permission(request, obj): raise PermissionDenied else: if not self.has_view_or_change_permission(request, obj): raise PermissionDenied if obj is None: return self._get_obj_does_not_exist_redirect(request, opts, object_id) fieldsets = self.get_fieldsets(request, obj) ModelForm = self.get_form( request, obj, change=not add, fields=flatten_fieldsets(fieldsets) ) if request.method == 'POST': form = ModelForm(request.POST, request.FILES, instance=obj) form_validated = form.is_valid() if form_validated: new_object = self.save_form(request, form, change=not add) else: new_object = form.instance formsets, inline_instances = self._create_formsets(request, new_object, change=not add) if all_valid(formsets) and form_validated: self.save_model(request, new_object, form, not add) self.save_related(request, form, formsets, not add) change_message = self.construct_change_message(request, form, formsets, add) if add: self.log_addition(request, new_object, change_message) return self.response_add(request, new_object) else: self.log_change(request, new_object, change_message) return self.response_change(request, new_object) else: form_validated = False else: if add: initial = self.get_changeform_initial_data(request) form = ModelForm(initial=initial) formsets, inline_instances = self._create_formsets(request, form.instance, change=False) else: form = ModelForm(instance=obj) formsets, inline_instances = self._create_formsets(request, obj, change=True) if not add and not self.has_change_permission(request, obj): readonly_fields = flatten_fieldsets(fieldsets) else: readonly_fields = self.get_readonly_fields(request, obj) adminForm = helpers.AdminForm( form, list(fieldsets), # Clear prepopulated fields on a view-only form to avoid a crash. self.get_prepopulated_fields(request, obj) if add or self.has_change_permission(request, obj) else {}, readonly_fields, model_admin=self) media = self.media + adminForm.media inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj) for inline_formset in inline_formsets: media = media + inline_formset.media if add: title = _('Add %s') elif self.has_change_permission(request, obj): title = _('Change %s') else: title = _('View %s') context = { **self.admin_site.each_context(request), 'title': title % opts.verbose_name, 'adminform': adminForm, 'object_id': object_id, 'original': obj, 'is_popup': IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET, 'to_field': to_field, 'media': media, 'inline_admin_formsets': inline_formsets, 'errors': helpers.AdminErrorList(form, formsets), 'preserved_filters': self.get_preserved_filters(request), } # Hide the "Save" and "Save and continue" buttons if "Save as New" was # previously chosen to prevent the interface from getting confusing. if request.method == 'POST' and not form_validated and "_saveasnew" in request.POST: context['show_save'] = False context['show_save_and_continue'] = False # Use the change template instead of the add template. add = False context.update(extra_context or {}) return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url) def autocomplete_view(self, request): return AutocompleteJsonView.as_view(model_admin=self)(request) def add_view(self, request, form_url='', extra_context=None): return self.changeform_view(request, None, form_url, extra_context) def change_view(self, request, object_id, form_url='', extra_context=None): return self.changeform_view(request, object_id, form_url, extra_context) def _get_edited_object_pks(self, request, prefix): """Return POST data values of list_editable primary keys.""" pk_pattern = re.compile( r'{}-\d+-{}$'.format(re.escape(prefix), self.model._meta.pk.name) ) return [value for key, value in request.POST.items() if pk_pattern.match(key)] def _get_list_editable_queryset(self, request, prefix): """ Based on POST data, return a queryset of the objects that were edited via list_editable. """ object_pks = self._get_edited_object_pks(request, prefix) queryset = self.get_queryset(request) validate = queryset.model._meta.pk.to_python try: for pk in object_pks: validate(pk) except ValidationError: # Disable the optimization if the POST data was tampered with. return queryset return queryset.filter(pk__in=object_pks) @csrf_protect_m def changelist_view(self, request, extra_context=None): """ The 'change list' admin view for this model. """ from django.contrib.admin.views.main import ERROR_FLAG opts = self.model._meta app_label = opts.app_label if not self.has_view_or_change_permission(request): raise PermissionDenied try: cl = self.get_changelist_instance(request) except IncorrectLookupParameters: # Wacky lookup parameters were given, so redirect to the main # changelist page, without parameters, and pass an 'invalid=1' # parameter via the query string. If wacky parameters were given # and the 'invalid=1' parameter was already in the query string, # something is screwed up with the database, so display an error # page. if ERROR_FLAG in request.GET: return SimpleTemplateResponse('admin/invalid_setup.html', { 'title': _('Database error'), }) return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1') # If the request was POSTed, this might be a bulk action or a bulk # edit. Try to look up an action or confirmation first, but if this # isn't an action the POST will fall through to the bulk edit check, # below. action_failed = False selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) actions = self.get_actions(request) # Actions with no confirmation if (actions and request.method == 'POST' and 'index' in request.POST and '_save' not in request.POST): if selected: response = self.response_action(request, queryset=cl.get_queryset(request)) if response: return response else: action_failed = True else: msg = _("Items must be selected in order to perform " "actions on them. No items have been changed.") self.message_user(request, msg, messages.WARNING) action_failed = True # Actions with confirmation if (actions and request.method == 'POST' and helpers.ACTION_CHECKBOX_NAME in request.POST and 'index' not in request.POST and '_save' not in request.POST): if selected: response = self.response_action(request, queryset=cl.get_queryset(request)) if response: return response else: action_failed = True if action_failed: # Redirect back to the changelist page to avoid resubmitting the # form if the user refreshes the browser or uses the "No, take # me back" button on the action confirmation page. return HttpResponseRedirect(request.get_full_path()) # If we're allowing changelist editing, we need to construct a formset # for the changelist given all the fields to be edited. Then we'll # use the formset to validate/process POSTed data. formset = cl.formset = None # Handle POSTed bulk-edit data. if request.method == 'POST' and cl.list_editable and '_save' in request.POST: if not self.has_change_permission(request): raise PermissionDenied FormSet = self.get_changelist_formset(request) modified_objects = self._get_list_editable_queryset(request, FormSet.get_default_prefix()) formset = cl.formset = FormSet(request.POST, request.FILES, queryset=modified_objects) if formset.is_valid(): changecount = 0 for form in formset.forms: if form.has_changed(): obj = self.save_form(request, form, change=True) self.save_model(request, obj, form, change=True) self.save_related(request, form, formsets=[], change=True) change_msg = self.construct_change_message(request, form, None) self.log_change(request, obj, change_msg) changecount += 1 if changecount: msg = ngettext( "%(count)s %(name)s was changed successfully.", "%(count)s %(name)s were changed successfully.", changecount ) % { 'count': changecount, 'name': model_ngettext(opts, changecount), } self.message_user(request, msg, messages.SUCCESS) return HttpResponseRedirect(request.get_full_path()) # Handle GET -- construct a formset for display. elif cl.list_editable and self.has_change_permission(request): FormSet = self.get_changelist_formset(request) formset = cl.formset = FormSet(queryset=cl.result_list) # Build the list of media to be used by the formset. if formset: media = self.media + formset.media else: media = self.media # Build the action form and populate it with available actions. if actions: action_form = self.action_form(auto_id=None) action_form.fields['action'].choices = self.get_action_choices(request) media += action_form.media else: action_form = None selection_note_all = ngettext( '%(total_count)s selected', 'All %(total_count)s selected', cl.result_count ) context = { **self.admin_site.each_context(request), 'module_name': str(opts.verbose_name_plural), 'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)}, 'selection_note_all': selection_note_all % {'total_count': cl.result_count}, 'title': cl.title, 'is_popup': cl.is_popup, 'to_field': cl.to_field, 'cl': cl, 'media': media, 'has_add_permission': self.has_add_permission(request), 'opts': cl.opts, 'action_form': action_form, 'actions_on_top': self.actions_on_top, 'actions_on_bottom': self.actions_on_bottom, 'actions_selection_counter': self.actions_selection_counter, 'preserved_filters': self.get_preserved_filters(request), **(extra_context or {}), } request.current_app = self.admin_site.name return TemplateResponse(request, self.change_list_template or [ 'admin/%s/%s/change_list.html' % (app_label, opts.model_name), 'admin/%s/change_list.html' % app_label, 'admin/change_list.html' ], context) def get_deleted_objects(self, objs, request): """ Hook for customizing the delete process for the delete view and the "delete selected" action. """ return get_deleted_objects(objs, request, self.admin_site) @csrf_protect_m def delete_view(self, request, object_id, extra_context=None): with transaction.atomic(using=router.db_for_write(self.model)): return self._delete_view(request, object_id, extra_context) def _delete_view(self, request, object_id, extra_context): "The 'delete' admin view for this model." opts = self.model._meta app_label = opts.app_label to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR)) if to_field and not self.to_field_allowed(request, to_field): raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field) obj = self.get_object(request, unquote(object_id), to_field) if not self.has_delete_permission(request, obj): raise PermissionDenied if obj is None: return self._get_obj_does_not_exist_redirect(request, opts, object_id) # Populate deleted_objects, a data structure of all related objects that # will also be deleted. deleted_objects, model_count, perms_needed, protected = self.get_deleted_objects([obj], request) if request.POST and not protected: # The user has confirmed the deletion. if perms_needed: raise PermissionDenied obj_display = str(obj) attr = str(to_field) if to_field else opts.pk.attname obj_id = obj.serializable_value(attr) self.log_deletion(request, obj, obj_display) self.delete_model(request, obj) return self.response_delete(request, obj_display, obj_id) object_name = str(opts.verbose_name) if perms_needed or protected: title = _("Cannot delete %(name)s") % {"name": object_name} else: title = _("Are you sure?") context = { **self.admin_site.each_context(request), 'title': title, 'object_name': object_name, 'object': obj, 'deleted_objects': deleted_objects, 'model_count': dict(model_count).items(), 'perms_lacking': perms_needed, 'protected': protected, 'opts': opts, 'app_label': app_label, 'preserved_filters': self.get_preserved_filters(request), 'is_popup': IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET, 'to_field': to_field, **(extra_context or {}), } return self.render_delete_form(request, context) def history_view(self, request, object_id, extra_context=None): "The 'history' admin view for this model." from django.contrib.admin.models import LogEntry # First check if the user can see this history. model = self.model obj = self.get_object(request, unquote(object_id)) if obj is None: return self._get_obj_does_not_exist_redirect(request, model._meta, object_id) if not self.has_view_or_change_permission(request, obj): raise PermissionDenied # Then get the history for this object. opts = model._meta app_label = opts.app_label action_list = LogEntry.objects.filter( object_id=unquote(object_id), content_type=get_content_type_for_model(model) ).select_related().order_by('action_time') context = { **self.admin_site.each_context(request), 'title': _('Change history: %s') % obj, 'action_list': action_list, 'module_name': str(capfirst(opts.verbose_name_plural)), 'object': obj, 'opts': opts, 'preserved_filters': self.get_preserved_filters(request), **(extra_context or {}), } request.current_app = self.admin_site.name return TemplateResponse(request, self.object_history_template or [ "admin/%s/%s/object_history.html" % (app_label, opts.model_name), "admin/%s/object_history.html" % app_label, "admin/object_history.html" ], context) def _create_formsets(self, request, obj, change): "Helper function to generate formsets for add/change_view." formsets = [] inline_instances = [] prefixes = {} get_formsets_args = [request] if change: get_formsets_args.append(obj) for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args): prefix = FormSet.get_default_prefix() prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1 or not prefix: prefix = "%s-%s" % (prefix, prefixes[prefix]) formset_params = { 'instance': obj, 'prefix': prefix, 'queryset': inline.get_queryset(request), } if request.method == 'POST': formset_params.update({ 'data': request.POST.copy(), 'files': request.FILES, 'save_as_new': '_saveasnew' in request.POST }) formset = FormSet(**formset_params) def user_deleted_form(request, obj, formset, index): """Return whether or not the user deleted the form.""" return ( inline.has_delete_permission(request, obj) and '{}-{}-DELETE'.format(formset.prefix, index) in request.POST ) # Bypass validation of each view-only inline form (since the form's # data won't be in request.POST), unless the form was deleted. if not inline.has_change_permission(request, obj if change else None): for index, form in enumerate(formset.initial_forms): if user_deleted_form(request, obj, formset, index): continue form._errors = {} form.cleaned_data = form.initial formsets.append(formset) inline_instances.append(inline) return formsets, inline_instances class InlineModelAdmin(BaseModelAdmin): """ Options for inline editing of ``model`` instances. Provide ``fk_name`` to specify the attribute name of the ``ForeignKey`` from ``model`` to its parent. This is required if ``model`` has more than one ``ForeignKey`` to its parent. """ model = None fk_name = None formset = BaseInlineFormSet extra = 3 min_num = None max_num = None template = None verbose_name = None verbose_name_plural = None can_delete = True show_change_link = False checks_class = InlineModelAdminChecks classes = None def __init__(self, parent_model, admin_site): self.admin_site = admin_site self.parent_model = parent_model self.opts = self.model._meta self.has_registered_model = admin_site.is_registered(self.model) super().__init__() if self.verbose_name is None: self.verbose_name = self.model._meta.verbose_name if self.verbose_name_plural is None: self.verbose_name_plural = self.model._meta.verbose_name_plural @property def media(self): extra = '' if settings.DEBUG else '.min' js = ['vendor/jquery/jquery%s.js' % extra, 'jquery.init.js', 'inlines%s.js' % extra] if self.filter_vertical or self.filter_horizontal: js.extend(['SelectBox.js', 'SelectFilter2.js']) if self.classes and 'collapse' in self.classes: js.append('collapse%s.js' % extra) return forms.Media(js=['admin/js/%s' % url for url in js]) def get_extra(self, request, obj=None, **kwargs): """Hook for customizing the number of extra inline forms.""" return self.extra def get_min_num(self, request, obj=None, **kwargs): """Hook for customizing the min number of inline forms.""" return self.min_num def get_max_num(self, request, obj=None, **kwargs): """Hook for customizing the max number of extra inline forms.""" return self.max_num def get_formset(self, request, obj=None, **kwargs): """Return a BaseInlineFormSet class for use in admin add/change views.""" if 'fields' in kwargs: fields = kwargs.pop('fields') else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) excluded = self.get_exclude(request, obj) exclude = [] if excluded is None else list(excluded) exclude.extend(self.get_readonly_fields(request, obj)) if excluded is None and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # InlineModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # If exclude is an empty list we use None, since that's the actual # default. exclude = exclude or None can_delete = self.can_delete and self.has_delete_permission(request, obj) defaults = { 'form': self.form, 'formset': self.formset, 'fk_name': self.fk_name, 'fields': fields, 'exclude': exclude, 'formfield_callback': partial(self.formfield_for_dbfield, request=request), 'extra': self.get_extra(request, obj, **kwargs), 'min_num': self.get_min_num(request, obj, **kwargs), 'max_num': self.get_max_num(request, obj, **kwargs), 'can_delete': can_delete, **kwargs, } base_model_form = defaults['form'] can_change = self.has_change_permission(request, obj) if request else True can_add = self.has_add_permission(request, obj) if request else True class DeleteProtectedModelForm(base_model_form): def hand_clean_DELETE(self): """ We don't validate the 'DELETE' field itself because on templates it's not rendered using the field information, but just using a generic "deletion_field" of the InlineModelAdmin. """ if self.cleaned_data.get(DELETION_FIELD_NAME, False): using = router.db_for_write(self._meta.model) collector = NestedObjects(using=using) if self.instance._state.adding: return collector.collect([self.instance]) if collector.protected: objs = [] for p in collector.protected: objs.append( # Translators: Model verbose name and instance representation, # suitable to be an item in a list. _('%(class_name)s %(instance)s') % { 'class_name': p._meta.verbose_name, 'instance': p} ) params = { 'class_name': self._meta.model._meta.verbose_name, 'instance': self.instance, 'related_objects': get_text_list(objs, _('and')), } msg = _("Deleting %(class_name)s %(instance)s would require " "deleting the following protected related objects: " "%(related_objects)s") raise ValidationError(msg, code='deleting_protected', params=params) def is_valid(self): result = super().is_valid() self.hand_clean_DELETE() return result def has_changed(self): # Protect against unauthorized edits. if not can_change and not self.instance._state.adding: return False if not can_add and self.instance._state.adding: return False return super().has_changed() defaults['form'] = DeleteProtectedModelForm if defaults['fields'] is None and not modelform_defines_fields(defaults['form']): defaults['fields'] = forms.ALL_FIELDS return inlineformset_factory(self.parent_model, self.model, **defaults) def _get_form_for_get_fields(self, request, obj=None): return self.get_formset(request, obj, fields=None).form def get_queryset(self, request): queryset = super().get_queryset(request) if not self.has_view_or_change_permission(request): queryset = queryset.none() return queryset def _has_any_perms_for_target_model(self, request, perms): """ This method is called only when the ModelAdmin's model is for an ManyToManyField's implicit through model (if self.opts.auto_created). Return True if the user has any of the given permissions ('add', 'change', etc.) for the model that points to the through model. """ opts = self.opts # Find the target model of an auto-created many-to-many relationship. for field in opts.fields: if field.remote_field and field.remote_field.model != self.parent_model: opts = field.remote_field.model._meta break return any( request.user.has_perm('%s.%s' % (opts.app_label, get_permission_codename(perm, opts))) for perm in perms ) def has_add_permission(self, request, obj): if self.opts.auto_created: # Auto-created intermediate models don't have their own # permissions. The user needs to have the change permission for the # related model in order to be able to do anything with the # intermediate model. return self._has_any_perms_for_target_model(request, ['change']) return super().has_add_permission(request) def has_change_permission(self, request, obj=None): if self.opts.auto_created: # Same comment as has_add_permission(). return self._has_any_perms_for_target_model(request, ['change']) return super().has_change_permission(request) def has_delete_permission(self, request, obj=None): if self.opts.auto_created: # Same comment as has_add_permission(). return self._has_any_perms_for_target_model(request, ['change']) return super().has_delete_permission(request, obj) def has_view_permission(self, request, obj=None): if self.opts.auto_created: # Same comment as has_add_permission(). The 'change' permission # also implies the 'view' permission. return self._has_any_perms_for_target_model(request, ['view', 'change']) return super().has_view_permission(request) class StackedInline(InlineModelAdmin): template = 'admin/edit_inline/stacked.html' class TabularInline(InlineModelAdmin): template = 'admin/edit_inline/tabular.html'
b1c57c9220abb022c896e326ab3301735f47a6e87b3b813a9b8ba4e917666310
""" Form Widget classes specific to the Django admin site. """ import copy import json from django import forms from django.conf import settings from django.core.exceptions import ValidationError from django.core.validators import URLValidator from django.db.models import CASCADE from django.urls import reverse from django.urls.exceptions import NoReverseMatch from django.utils.html import smart_urlquote from django.utils.safestring import mark_safe from django.utils.text import Truncator from django.utils.translation import get_language, gettext as _ class FilteredSelectMultiple(forms.SelectMultiple): """ A SelectMultiple with a JavaScript filter interface. Note that the resulting JavaScript assumes that the jsi18n catalog has been loaded in the page """ @property def media(self): extra = '' if settings.DEBUG else '.min' js = [ 'vendor/jquery/jquery%s.js' % extra, 'jquery.init.js', 'core.js', 'SelectBox.js', 'SelectFilter2.js', ] return forms.Media(js=["admin/js/%s" % path for path in js]) def __init__(self, verbose_name, is_stacked, attrs=None, choices=()): self.verbose_name = verbose_name self.is_stacked = is_stacked super().__init__(attrs, choices) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context['widget']['attrs']['class'] = 'selectfilter' if self.is_stacked: context['widget']['attrs']['class'] += 'stacked' context['widget']['attrs']['data-field-name'] = self.verbose_name context['widget']['attrs']['data-is-stacked'] = int(self.is_stacked) return context class AdminDateWidget(forms.DateInput): class Media: js = [ 'admin/js/calendar.js', 'admin/js/admin/DateTimeShortcuts.js', ] def __init__(self, attrs=None, format=None): attrs = {'class': 'vDateField', 'size': '10', **(attrs or {})} super().__init__(attrs=attrs, format=format) class AdminTimeWidget(forms.TimeInput): class Media: js = [ 'admin/js/calendar.js', 'admin/js/admin/DateTimeShortcuts.js', ] def __init__(self, attrs=None, format=None): attrs = {'class': 'vTimeField', 'size': '8', **(attrs or {})} super().__init__(attrs=attrs, format=format) class AdminSplitDateTime(forms.SplitDateTimeWidget): """ A SplitDateTime Widget that has some admin-specific styling. """ template_name = 'admin/widgets/split_datetime.html' def __init__(self, attrs=None): widgets = [AdminDateWidget, AdminTimeWidget] # Note that we're calling MultiWidget, not SplitDateTimeWidget, because # we want to define widgets. forms.MultiWidget.__init__(self, widgets, attrs) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context['date_label'] = _('Date:') context['time_label'] = _('Time:') return context class AdminRadioSelect(forms.RadioSelect): template_name = 'admin/widgets/radio.html' class AdminFileWidget(forms.ClearableFileInput): template_name = 'admin/widgets/clearable_file_input.html' def url_params_from_lookup_dict(lookups): """ Convert the type of lookups specified in a ForeignKey limit_choices_to attribute to a dictionary of query parameters """ params = {} if lookups and hasattr(lookups, 'items'): for k, v in lookups.items(): if callable(v): v = v() if isinstance(v, (tuple, list)): v = ','.join(str(x) for x in v) elif isinstance(v, bool): v = ('0', '1')[v] else: v = str(v) params[k] = v return params class ForeignKeyRawIdWidget(forms.TextInput): """ A Widget for displaying ForeignKeys in the "raw_id" interface rather than in a <select> box. """ template_name = 'admin/widgets/foreign_key_raw_id.html' def __init__(self, rel, admin_site, attrs=None, using=None): self.rel = rel self.admin_site = admin_site self.db = using super().__init__(attrs) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) rel_to = self.rel.model if rel_to in self.admin_site._registry: # The related object is registered with the same AdminSite related_url = reverse( 'admin:%s_%s_changelist' % ( rel_to._meta.app_label, rel_to._meta.model_name, ), current_app=self.admin_site.name, ) params = self.url_parameters() if params: related_url += '?' + '&amp;'.join('%s=%s' % (k, v) for k, v in params.items()) context['related_url'] = mark_safe(related_url) context['link_title'] = _('Lookup') # The JavaScript code looks for this class. context['widget']['attrs'].setdefault('class', 'vForeignKeyRawIdAdminField') else: context['related_url'] = None if context['widget']['value']: context['link_label'], context['link_url'] = self.label_and_url_for_value(value) else: context['link_label'] = None return context def base_url_parameters(self): limit_choices_to = self.rel.limit_choices_to if callable(limit_choices_to): limit_choices_to = limit_choices_to() return url_params_from_lookup_dict(limit_choices_to) def url_parameters(self): from django.contrib.admin.views.main import TO_FIELD_VAR params = self.base_url_parameters() params.update({TO_FIELD_VAR: self.rel.get_related_field().name}) return params def label_and_url_for_value(self, value): key = self.rel.get_related_field().name try: obj = self.rel.model._default_manager.using(self.db).get(**{key: value}) except (ValueError, self.rel.model.DoesNotExist, ValidationError): return '', '' try: url = reverse( '%s:%s_%s_change' % ( self.admin_site.name, obj._meta.app_label, obj._meta.object_name.lower(), ), args=(obj.pk,) ) except NoReverseMatch: url = '' # Admin not registered for target model. return Truncator(obj).words(14), url class ManyToManyRawIdWidget(ForeignKeyRawIdWidget): """ A Widget for displaying ManyToMany ids in the "raw_id" interface rather than in a <select multiple> box. """ template_name = 'admin/widgets/many_to_many_raw_id.html' def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.rel.model in self.admin_site._registry: # The related object is registered with the same AdminSite context['widget']['attrs']['class'] = 'vManyToManyRawIdAdminField' return context def url_parameters(self): return self.base_url_parameters() def label_and_url_for_value(self, value): return '', '' def value_from_datadict(self, data, files, name): value = data.get(name) if value: return value.split(',') def format_value(self, value): return ','.join(str(v) for v in value) if value else '' class RelatedFieldWidgetWrapper(forms.Widget): """ This class is a wrapper to a given widget to add the add icon for the admin interface. """ template_name = 'admin/widgets/related_widget_wrapper.html' def __init__(self, widget, rel, admin_site, can_add_related=None, can_change_related=False, can_delete_related=False, can_view_related=False): self.needs_multipart_form = widget.needs_multipart_form self.attrs = widget.attrs self.choices = widget.choices self.widget = widget self.rel = rel # Backwards compatible check for whether a user can add related # objects. if can_add_related is None: can_add_related = rel.model in admin_site._registry self.can_add_related = can_add_related # XXX: The UX does not support multiple selected values. multiple = getattr(widget, 'allow_multiple_selected', False) self.can_change_related = not multiple and can_change_related # XXX: The deletion UX can be confusing when dealing with cascading deletion. cascade = getattr(rel, 'on_delete', None) is CASCADE self.can_delete_related = not multiple and not cascade and can_delete_related self.can_view_related = not multiple and can_view_related # so we can check if the related object is registered with this AdminSite self.admin_site = admin_site def __deepcopy__(self, memo): obj = copy.copy(self) obj.widget = copy.deepcopy(self.widget, memo) obj.attrs = self.widget.attrs memo[id(self)] = obj return obj @property def is_hidden(self): return self.widget.is_hidden @property def media(self): return self.widget.media def get_related_url(self, info, action, *args): return reverse("admin:%s_%s_%s" % (info + (action,)), current_app=self.admin_site.name, args=args) def get_context(self, name, value, attrs): from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR rel_opts = self.rel.model._meta info = (rel_opts.app_label, rel_opts.model_name) self.widget.choices = self.choices url_params = '&'.join("%s=%s" % param for param in [ (TO_FIELD_VAR, self.rel.get_related_field().name), (IS_POPUP_VAR, 1), ]) context = { 'rendered_widget': self.widget.render(name, value, attrs), 'is_hidden': self.is_hidden, 'name': name, 'url_params': url_params, 'model': rel_opts.verbose_name, 'can_add_related': self.can_add_related, 'can_change_related': self.can_change_related, 'can_delete_related': self.can_delete_related, 'can_view_related': self.can_view_related, } if self.can_add_related: context['add_related_url'] = self.get_related_url(info, 'add') if self.can_delete_related: context['delete_related_template_url'] = self.get_related_url(info, 'delete', '__fk__') if self.can_view_related or self.can_change_related: context['change_related_template_url'] = self.get_related_url(info, 'change', '__fk__') return context def value_from_datadict(self, data, files, name): return self.widget.value_from_datadict(data, files, name) def value_omitted_from_data(self, data, files, name): return self.widget.value_omitted_from_data(data, files, name) def id_for_label(self, id_): return self.widget.id_for_label(id_) class AdminTextareaWidget(forms.Textarea): def __init__(self, attrs=None): super().__init__(attrs={'class': 'vLargeTextField', **(attrs or {})}) class AdminTextInputWidget(forms.TextInput): def __init__(self, attrs=None): super().__init__(attrs={'class': 'vTextField', **(attrs or {})}) class AdminEmailInputWidget(forms.EmailInput): def __init__(self, attrs=None): super().__init__(attrs={'class': 'vTextField', **(attrs or {})}) class AdminURLFieldWidget(forms.URLInput): template_name = 'admin/widgets/url.html' def __init__(self, attrs=None, validator_class=URLValidator): super().__init__(attrs={'class': 'vURLField', **(attrs or {})}) self.validator = validator_class() def get_context(self, name, value, attrs): try: self.validator(value if value else '') url_valid = True except ValidationError: url_valid = False context = super().get_context(name, value, attrs) context['current_label'] = _('Currently:') context['change_label'] = _('Change:') context['widget']['href'] = smart_urlquote(context['widget']['value']) if value else '' context['url_valid'] = url_valid return context class AdminIntegerFieldWidget(forms.NumberInput): class_name = 'vIntegerField' def __init__(self, attrs=None): super().__init__(attrs={'class': self.class_name, **(attrs or {})}) class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget): class_name = 'vBigIntegerField' class AdminUUIDInputWidget(forms.TextInput): def __init__(self, attrs=None): super().__init__(attrs={'class': 'vUUIDField', **(attrs or {})}) # Mapping of lowercase language codes [returned by Django's get_language()] to # language codes supported by select2. # See django/contrib/admin/static/admin/js/vendor/select2/i18n/* SELECT2_TRANSLATIONS = {x.lower(): x for x in [ 'ar', 'az', 'bg', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'eu', 'fa', 'fi', 'fr', 'gl', 'he', 'hi', 'hr', 'hu', 'id', 'is', 'it', 'ja', 'km', 'ko', 'lt', 'lv', 'mk', 'ms', 'nb', 'nl', 'pl', 'pt-BR', 'pt', 'ro', 'ru', 'sk', 'sr-Cyrl', 'sr', 'sv', 'th', 'tr', 'uk', 'vi', ]} SELECT2_TRANSLATIONS.update({'zh-hans': 'zh-CN', 'zh-hant': 'zh-TW'}) class AutocompleteMixin: """ Select widget mixin that loads options from AutocompleteJsonView via AJAX. Renders the necessary data attributes for select2 and adds the static form media. """ url_name = '%s:%s_%s_autocomplete' def __init__(self, rel, admin_site, attrs=None, choices=(), using=None): self.rel = rel self.admin_site = admin_site self.db = using self.choices = choices self.attrs = {} if attrs is None else attrs.copy() def get_url(self): model = self.rel.model return reverse(self.url_name % (self.admin_site.name, model._meta.app_label, model._meta.model_name)) def build_attrs(self, base_attrs, extra_attrs=None): """ Set select2's AJAX attributes. Attributes can be set using the html5 data attribute. Nested attributes require a double dash as per https://select2.org/configuration/data-attributes#nested-subkey-options """ attrs = super().build_attrs(base_attrs, extra_attrs=extra_attrs) attrs.setdefault('class', '') attrs.update({ 'data-ajax--cache': 'true', 'data-ajax--delay': 250, 'data-ajax--type': 'GET', 'data-ajax--url': self.get_url(), 'data-theme': 'admin-autocomplete', 'data-allow-clear': json.dumps(not self.is_required), 'data-placeholder': '', # Allows clearing of the input. 'class': attrs['class'] + (' ' if attrs['class'] else '') + 'admin-autocomplete', }) return attrs def optgroups(self, name, value, attr=None): """Return selected options based on the ModelChoiceIterator.""" default = (None, [], 0) groups = [default] has_selected = False selected_choices = { str(v) for v in value if str(v) not in self.choices.field.empty_values } if not self.is_required and not self.allow_multiple_selected: default[1].append(self.create_option(name, '', '', False, 0)) choices = ( (obj.pk, self.choices.field.label_from_instance(obj)) for obj in self.choices.queryset.using(self.db).filter(pk__in=selected_choices) ) for option_value, option_label in choices: selected = ( str(option_value) in value and (has_selected is False or self.allow_multiple_selected) ) has_selected |= selected index = len(default[1]) subgroup = default[1] subgroup.append(self.create_option(name, option_value, option_label, selected_choices, index)) return groups @property def media(self): extra = '' if settings.DEBUG else '.min' i18n_name = SELECT2_TRANSLATIONS.get(get_language()) i18n_file = ('admin/js/vendor/select2/i18n/%s.js' % i18n_name,) if i18n_name else () return forms.Media( js=( 'admin/js/vendor/jquery/jquery%s.js' % extra, 'admin/js/vendor/select2/select2.full%s.js' % extra, ) + i18n_file + ( 'admin/js/jquery.init.js', 'admin/js/autocomplete.js', ), css={ 'screen': ( 'admin/css/vendor/select2/select2%s.css' % extra, 'admin/css/autocomplete.css', ), }, ) class AutocompleteSelect(AutocompleteMixin, forms.Select): pass class AutocompleteSelectMultiple(AutocompleteMixin, forms.SelectMultiple): pass
933a08b9cfedec3de32d6d0c05c50942fa5c5bb48b859c7fee4ecc99989d09b0
from django.contrib.postgres.fields import ArrayField, JSONField from django.db.models import Aggregate, Value from .mixins import OrderableAggMixin __all__ = [ 'ArrayAgg', 'BitAnd', 'BitOr', 'BoolAnd', 'BoolOr', 'JSONBAgg', 'StringAgg', ] class ArrayAgg(OrderableAggMixin, Aggregate): function = 'ARRAY_AGG' template = '%(function)s(%(distinct)s%(expressions)s %(ordering)s)' allow_distinct = True @property def output_field(self): return ArrayField(self.source_expressions[0].output_field) def convert_value(self, value, expression, connection): if not value: return [] return value class BitAnd(Aggregate): function = 'BIT_AND' class BitOr(Aggregate): function = 'BIT_OR' class BoolAnd(Aggregate): function = 'BOOL_AND' class BoolOr(Aggregate): function = 'BOOL_OR' class JSONBAgg(Aggregate): function = 'JSONB_AGG' output_field = JSONField() def convert_value(self, value, expression, connection): if not value: return [] return value class StringAgg(OrderableAggMixin, Aggregate): function = 'STRING_AGG' template = '%(function)s(%(distinct)s%(expressions)s %(ordering)s)' allow_distinct = True def __init__(self, expression, delimiter, **extra): delimiter_expr = Value(str(delimiter)) super().__init__(expression, delimiter_expr, **extra) def convert_value(self, value, expression, connection): if not value: return '' return value
19e9c6426551dda0596caf102f5e9f4b59202660623802e4c532bc4621263adf
from datetime import datetime, timedelta from django import forms from django.conf import settings from django.contrib import messages from django.contrib.admin import FieldListFilter from django.contrib.admin.exceptions import ( DisallowedModelAdminLookup, DisallowedModelAdminToField, ) from django.contrib.admin.options import ( IS_POPUP_VAR, TO_FIELD_VAR, IncorrectLookupParameters, ) from django.contrib.admin.utils import ( get_fields_from_path, lookup_needs_distinct, prepare_lookup_value, quote, ) from django.core.exceptions import ( FieldDoesNotExist, ImproperlyConfigured, SuspiciousOperation, ) from django.core.paginator import InvalidPage from django.db.models import F, Field, ManyToOneRel, OrderBy from django.db.models.expressions import Combinable from django.urls import reverse from django.utils.http import urlencode from django.utils.timezone import make_aware from django.utils.translation import gettext # Changelist settings ALL_VAR = 'all' ORDER_VAR = 'o' ORDER_TYPE_VAR = 'ot' PAGE_VAR = 'p' SEARCH_VAR = 'q' ERROR_FLAG = 'e' IGNORED_PARAMS = ( ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR) class ChangeListSearchForm(forms.Form): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Populate "fields" dynamically because SEARCH_VAR is a variable: self.fields = { SEARCH_VAR: forms.CharField(required=False, strip=False), } class ChangeList: search_form_class = ChangeListSearchForm def __init__(self, request, model, list_display, list_display_links, list_filter, date_hierarchy, search_fields, list_select_related, list_per_page, list_max_show_all, list_editable, model_admin, sortable_by): self.model = model self.opts = model._meta self.lookup_opts = self.opts self.root_queryset = model_admin.get_queryset(request) self.list_display = list_display self.list_display_links = list_display_links self.list_filter = list_filter self.has_filters = None self.date_hierarchy = date_hierarchy self.search_fields = search_fields self.list_select_related = list_select_related self.list_per_page = list_per_page self.list_max_show_all = list_max_show_all self.model_admin = model_admin self.preserved_filters = model_admin.get_preserved_filters(request) self.sortable_by = sortable_by # Get search parameters from the query string. _search_form = self.search_form_class(request.GET) if not _search_form.is_valid(): for error in _search_form.errors.values(): messages.error(request, ', '.join(error)) self.query = _search_form.cleaned_data.get(SEARCH_VAR) or '' try: self.page_num = int(request.GET.get(PAGE_VAR, 0)) except ValueError: self.page_num = 0 self.show_all = ALL_VAR in request.GET self.is_popup = IS_POPUP_VAR in request.GET to_field = request.GET.get(TO_FIELD_VAR) if to_field and not model_admin.to_field_allowed(request, to_field): raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field) self.to_field = to_field self.params = dict(request.GET.items()) if PAGE_VAR in self.params: del self.params[PAGE_VAR] if ERROR_FLAG in self.params: del self.params[ERROR_FLAG] if self.is_popup: self.list_editable = () else: self.list_editable = list_editable self.queryset = self.get_queryset(request) self.get_results(request) if self.is_popup: title = gettext('Select %s') elif self.model_admin.has_change_permission(request): title = gettext('Select %s to change') else: title = gettext('Select %s to view') self.title = title % self.opts.verbose_name self.pk_attname = self.lookup_opts.pk.attname def get_filters_params(self, params=None): """ Return all params except IGNORED_PARAMS. """ params = params or self.params lookup_params = params.copy() # a dictionary of the query string # Remove all the parameters that are globally and systematically # ignored. for ignored in IGNORED_PARAMS: if ignored in lookup_params: del lookup_params[ignored] return lookup_params def get_filters(self, request): lookup_params = self.get_filters_params() use_distinct = False for key, value in lookup_params.items(): if not self.model_admin.lookup_allowed(key, value): raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key) filter_specs = [] for list_filter in self.list_filter: if callable(list_filter): # This is simply a custom list filter class. spec = list_filter(request, lookup_params, self.model, self.model_admin) else: field_path = None if isinstance(list_filter, (tuple, list)): # This is a custom FieldListFilter class for a given field. field, field_list_filter_class = list_filter else: # This is simply a field name, so use the default # FieldListFilter class that has been registered for the # type of the given field. field, field_list_filter_class = list_filter, FieldListFilter.create if not isinstance(field, Field): field_path = field field = get_fields_from_path(self.model, field_path)[-1] lookup_params_count = len(lookup_params) spec = field_list_filter_class( field, request, lookup_params, self.model, self.model_admin, field_path=field_path, ) # field_list_filter_class removes any lookup_params it # processes. If that happened, check if distinct() is needed to # remove duplicate results. if lookup_params_count > len(lookup_params): use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, field_path) if spec and spec.has_output(): filter_specs.append(spec) if self.date_hierarchy: # Create bounded lookup parameters so that the query is more # efficient. year = lookup_params.pop('%s__year' % self.date_hierarchy, None) if year is not None: month = lookup_params.pop('%s__month' % self.date_hierarchy, None) day = lookup_params.pop('%s__day' % self.date_hierarchy, None) try: from_date = datetime( int(year), int(month if month is not None else 1), int(day if day is not None else 1), ) except ValueError as e: raise IncorrectLookupParameters(e) from e if day: to_date = from_date + timedelta(days=1) elif month: # In this branch, from_date will always be the first of a # month, so advancing 32 days gives the next month. to_date = (from_date + timedelta(days=32)).replace(day=1) else: to_date = from_date.replace(year=from_date.year + 1) if settings.USE_TZ: from_date = make_aware(from_date) to_date = make_aware(to_date) lookup_params.update({ '%s__gte' % self.date_hierarchy: from_date, '%s__lt' % self.date_hierarchy: to_date, }) # At this point, all the parameters used by the various ListFilters # have been removed from lookup_params, which now only contains other # parameters passed via the query string. We now loop through the # remaining parameters both to ensure that all the parameters are valid # fields and to determine if at least one of them needs distinct(). If # the lookup parameters aren't real fields, then bail out. try: for key, value in lookup_params.items(): lookup_params[key] = prepare_lookup_value(key, value) use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, key) return filter_specs, bool(filter_specs), lookup_params, use_distinct except FieldDoesNotExist as e: raise IncorrectLookupParameters(e) from e def get_query_string(self, new_params=None, remove=None): if new_params is None: new_params = {} if remove is None: remove = [] p = self.params.copy() for r in remove: for k in list(p): if k.startswith(r): del p[k] for k, v in new_params.items(): if v is None: if k in p: del p[k] else: p[k] = v return '?%s' % urlencode(sorted(p.items())) def get_results(self, request): paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page) # Get the number of objects, with admin filters applied. result_count = paginator.count # Get the total number of objects, with no admin filters applied. if self.model_admin.show_full_result_count: full_result_count = self.root_queryset.count() else: full_result_count = None can_show_all = result_count <= self.list_max_show_all multi_page = result_count > self.list_per_page # Get the list of objects to display on this page. if (self.show_all and can_show_all) or not multi_page: result_list = self.queryset._clone() else: try: result_list = paginator.page(self.page_num + 1).object_list except InvalidPage: raise IncorrectLookupParameters self.result_count = result_count self.show_full_result_count = self.model_admin.show_full_result_count # Admin actions are shown if there is at least one entry # or if entries are not counted because show_full_result_count is disabled self.show_admin_actions = not self.show_full_result_count or bool(full_result_count) self.full_result_count = full_result_count self.result_list = result_list self.can_show_all = can_show_all self.multi_page = multi_page self.paginator = paginator def _get_default_ordering(self): ordering = [] if self.model_admin.ordering: ordering = self.model_admin.ordering elif self.lookup_opts.ordering: ordering = self.lookup_opts.ordering return ordering def get_ordering_field(self, field_name): """ Return the proper model field name corresponding to the given field_name to use for ordering. field_name may either be the name of a proper model field or the name of a method (on the admin or model) or a callable with the 'admin_order_field' attribute. Return None if no proper model field name can be matched. """ try: field = self.lookup_opts.get_field(field_name) return field.name except FieldDoesNotExist: # See whether field_name is a name of a non-field # that allows sorting. if callable(field_name): attr = field_name elif hasattr(self.model_admin, field_name): attr = getattr(self.model_admin, field_name) else: attr = getattr(self.model, field_name) if isinstance(attr, property) and hasattr(attr, 'fget'): attr = attr.fget return getattr(attr, 'admin_order_field', None) def get_ordering(self, request, queryset): """ Return the list of ordering fields for the change list. First check the get_ordering() method in model admin, then check the object's default ordering. Then, any manually-specified ordering from the query string overrides anything. Finally, a deterministic order is guaranteed by calling _get_deterministic_ordering() with the constructed ordering. """ params = self.params ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering()) if ORDER_VAR in params: # Clear ordering and used params ordering = [] order_params = params[ORDER_VAR].split('.') for p in order_params: try: none, pfx, idx = p.rpartition('-') field_name = self.list_display[int(idx)] order_field = self.get_ordering_field(field_name) if not order_field: continue # No 'admin_order_field', skip it if isinstance(order_field, OrderBy): if pfx == '-': order_field = order_field.copy() order_field.reverse_ordering() ordering.append(order_field) elif hasattr(order_field, 'resolve_expression'): # order_field is an expression. ordering.append(order_field.desc() if pfx == '-' else order_field.asc()) # reverse order if order_field has already "-" as prefix elif order_field.startswith('-') and pfx == '-': ordering.append(order_field[1:]) else: ordering.append(pfx + order_field) except (IndexError, ValueError): continue # Invalid ordering specified, skip it. # Add the given query's ordering fields, if any. ordering.extend(queryset.query.order_by) return self._get_deterministic_ordering(ordering) def _get_deterministic_ordering(self, ordering): """ Ensure a deterministic order across all database backends. Search for a single field or unique together set of fields providing a total ordering. If these are missing, augment the ordering with a descendant primary key. """ ordering = list(ordering) ordering_fields = set() total_ordering_fields = {'pk'} | { field.attname for field in self.lookup_opts.fields if field.unique and not field.null } for part in ordering: # Search for single field providing a total ordering. field_name = None if isinstance(part, str): field_name = part.lstrip('-') elif isinstance(part, F): field_name = part.name elif isinstance(part, OrderBy) and isinstance(part.expression, F): field_name = part.expression.name if field_name: # Normalize attname references by using get_field(). try: field = self.lookup_opts.get_field(field_name) except FieldDoesNotExist: # Could be "?" for random ordering or a related field # lookup. Skip this part of introspection for now. continue # Ordering by a related field name orders by the referenced # model's ordering. Skip this part of introspection for now. if field.remote_field and field_name == field.name: continue if field.attname in total_ordering_fields: break ordering_fields.add(field.attname) else: # No single total ordering field, try unique_together. for field_names in self.lookup_opts.unique_together: # Normalize attname references by using get_field(). fields = [self.lookup_opts.get_field(field_name) for field_name in field_names] # Composite unique constraints containing a nullable column # cannot ensure total ordering. if any(field.null for field in fields): continue if ordering_fields.issuperset(field.attname for field in fields): break else: # If no set of unique fields is present in the ordering, rely # on the primary key to provide total ordering. ordering.append('-pk') return ordering def get_ordering_field_columns(self): """ Return a dictionary of ordering field column numbers and asc/desc. """ # We must cope with more than one column having the same underlying sort # field, so we base things on column numbers. ordering = self._get_default_ordering() ordering_fields = {} if ORDER_VAR not in self.params: # for ordering specified on ModelAdmin or model Meta, we don't know # the right column numbers absolutely, because there might be more # than one column associated with that ordering, so we guess. for field in ordering: if isinstance(field, (Combinable, OrderBy)): if not isinstance(field, OrderBy): field = field.asc() if isinstance(field.expression, F): order_type = 'desc' if field.descending else 'asc' field = field.expression.name else: continue elif field.startswith('-'): field = field[1:] order_type = 'desc' else: order_type = 'asc' for index, attr in enumerate(self.list_display): if self.get_ordering_field(attr) == field: ordering_fields[index] = order_type break else: for p in self.params[ORDER_VAR].split('.'): none, pfx, idx = p.rpartition('-') try: idx = int(idx) except ValueError: continue # skip it ordering_fields[idx] = 'desc' if pfx == '-' else 'asc' return ordering_fields def get_queryset(self, request): # First, we collect all the declared list filters. (self.filter_specs, self.has_filters, remaining_lookup_params, filters_use_distinct) = self.get_filters(request) # Then, we let every list filter modify the queryset to its liking. qs = self.root_queryset for filter_spec in self.filter_specs: new_qs = filter_spec.queryset(request, qs) if new_qs is not None: qs = new_qs try: # Finally, we apply the remaining lookup parameters from the query # string (i.e. those that haven't already been processed by the # filters). qs = qs.filter(**remaining_lookup_params) except (SuspiciousOperation, ImproperlyConfigured): # Allow certain types of errors to be re-raised as-is so that the # caller can treat them in a special way. raise except Exception as e: # Every other error is caught with a naked except, because we don't # have any other way of validating lookup parameters. They might be # invalid if the keyword arguments are incorrect, or if the values # are not in the correct type, so we might get FieldError, # ValueError, ValidationError, or ?. raise IncorrectLookupParameters(e) if not qs.query.select_related: qs = self.apply_select_related(qs) # Set ordering. ordering = self.get_ordering(request, qs) qs = qs.order_by(*ordering) # Apply search results qs, search_use_distinct = self.model_admin.get_search_results(request, qs, self.query) # Remove duplicates from results, if necessary if filters_use_distinct | search_use_distinct: return qs.distinct() else: return qs def apply_select_related(self, qs): if self.list_select_related is True: return qs.select_related() if self.list_select_related is False: if self.has_related_field_in_list_display(): return qs.select_related() if self.list_select_related: return qs.select_related(*self.list_select_related) return qs def has_related_field_in_list_display(self): for field_name in self.list_display: try: field = self.lookup_opts.get_field(field_name) except FieldDoesNotExist: pass else: if isinstance(field.remote_field, ManyToOneRel): # <FK>_id field names don't require a join. if field_name != field.get_attname(): return True return False def url_for_result(self, result): pk = getattr(result, self.pk_attname) return reverse('admin:%s_%s_change' % (self.opts.app_label, self.opts.model_name), args=(quote(pk),), current_app=self.model_admin.admin_site.name)
98eee2ea97af441105d82e2955d4129eab87b5a7ceb0e62ee5775e3dad8ef452
from django.contrib.gis.db.models.fields import ( ExtentField, GeometryCollectionField, GeometryField, LineStringField, ) from django.db.models import Aggregate from django.utils.functional import cached_property __all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union'] class GeoAggregate(Aggregate): function = None is_extent = False @cached_property def output_field(self): return self.output_field_class(self.source_expressions[0].output_field.srid) def as_sql(self, compiler, connection, function=None, **extra_context): # this will be called again in parent, but it's needed now - before # we get the spatial_aggregate_name connection.ops.check_expression_support(self) return super().as_sql( compiler, connection, function=function or connection.ops.spatial_aggregate_name(self.name), **extra_context ) def as_oracle(self, compiler, connection, **extra_context): tolerance = self.extra.get('tolerance') or getattr(self, 'tolerance', 0.05) template = None if self.is_extent else '%(function)s(SDOAGGRTYPE(%(expressions)s,%(tolerance)s))' return self.as_sql(compiler, connection, template=template, tolerance=tolerance, **extra_context) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) for expr in c.get_source_expressions(): if not hasattr(expr.field, 'geom_type'): raise ValueError('Geospatial aggregates only allowed on geometry fields.') return c class Collect(GeoAggregate): name = 'Collect' output_field_class = GeometryCollectionField class Extent(GeoAggregate): name = 'Extent' is_extent = '2D' def __init__(self, expression, **extra): super().__init__(expression, output_field=ExtentField(), **extra) def convert_value(self, value, expression, connection): return connection.ops.convert_extent(value) class Extent3D(GeoAggregate): name = 'Extent3D' is_extent = '3D' def __init__(self, expression, **extra): super().__init__(expression, output_field=ExtentField(), **extra) def convert_value(self, value, expression, connection): return connection.ops.convert_extent3d(value) class MakeLine(GeoAggregate): name = 'MakeLine' output_field_class = LineStringField class Union(GeoAggregate): name = 'Union' output_field_class = GeometryField
3e0f5ef28ffd8091733b82d2581c5d24ce7055e2fff4b17c547d126106f5d70c
from decimal import Decimal from django.contrib.gis.db.models.fields import BaseSpatialField, GeometryField from django.contrib.gis.db.models.sql import AreaField, DistanceField from django.contrib.gis.geos import GEOSGeometry from django.core.exceptions import FieldError from django.db import NotSupportedError from django.db.models import ( BinaryField, BooleanField, FloatField, Func, IntegerField, TextField, Transform, Value, ) from django.db.models.functions import Cast from django.utils.functional import cached_property NUMERIC_TYPES = (int, float, Decimal) class GeoFuncMixin: function = None geom_param_pos = (0,) def __init__(self, *expressions, **extra): super().__init__(*expressions, **extra) # Ensure that value expressions are geometric. for pos in self.geom_param_pos: expr = self.source_expressions[pos] if not isinstance(expr, Value): continue try: output_field = expr.output_field except FieldError: output_field = None geom = expr.value if not isinstance(geom, GEOSGeometry) or output_field and not isinstance(output_field, GeometryField): raise TypeError("%s function requires a geometric argument in position %d." % (self.name, pos + 1)) if not geom.srid and not output_field: raise ValueError("SRID is required for all geometries.") if not output_field: self.source_expressions[pos] = Value(geom, output_field=GeometryField(srid=geom.srid)) @property def name(self): return self.__class__.__name__ @cached_property def geo_field(self): return self.source_expressions[self.geom_param_pos[0]].field def as_sql(self, compiler, connection, function=None, **extra_context): if self.function is None and function is None: function = connection.ops.spatial_function_name(self.name) return super().as_sql(compiler, connection, function=function, **extra_context) def resolve_expression(self, *args, **kwargs): res = super().resolve_expression(*args, **kwargs) # Ensure that expressions are geometric. source_fields = res.get_source_fields() for pos in self.geom_param_pos: field = source_fields[pos] if not isinstance(field, GeometryField): raise TypeError( "%s function requires a GeometryField in position %s, got %s." % ( self.name, pos + 1, type(field).__name__, ) ) base_srid = res.geo_field.srid for pos in self.geom_param_pos[1:]: expr = res.source_expressions[pos] expr_srid = expr.output_field.srid if expr_srid != base_srid: # Automatic SRID conversion so objects are comparable. res.source_expressions[pos] = Transform(expr, base_srid).resolve_expression(*args, **kwargs) return res def _handle_param(self, value, param_name='', check_types=None): if not hasattr(value, 'resolve_expression'): if check_types and not isinstance(value, check_types): raise TypeError( "The %s parameter has the wrong type: should be %s." % ( param_name, check_types) ) return value class GeoFunc(GeoFuncMixin, Func): pass class GeomOutputGeoFunc(GeoFunc): @cached_property def output_field(self): return GeometryField(srid=self.geo_field.srid) class SQLiteDecimalToFloatMixin: """ By default, Decimal values are converted to str by the SQLite backend, which is not acceptable by the GIS functions expecting numeric values. """ def as_sqlite(self, compiler, connection, **extra_context): for expr in self.get_source_expressions(): if hasattr(expr, 'value') and isinstance(expr.value, Decimal): expr.value = float(expr.value) return super().as_sql(compiler, connection, **extra_context) class OracleToleranceMixin: tolerance = 0.05 def as_oracle(self, compiler, connection, **extra_context): tol = self.extra.get('tolerance', self.tolerance) return self.as_sql( compiler, connection, template="%%(function)s(%%(expressions)s, %s)" % tol, **extra_context ) class Area(OracleToleranceMixin, GeoFunc): arity = 1 @cached_property def output_field(self): return AreaField(self.geo_field) def as_sql(self, compiler, connection, **extra_context): if not connection.features.supports_area_geodetic and self.geo_field.geodetic(connection): raise NotSupportedError('Area on geodetic coordinate systems not supported.') return super().as_sql(compiler, connection, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): if self.geo_field.geodetic(connection): extra_context['template'] = '%(function)s(%(expressions)s, %(spheroid)d)' extra_context['spheroid'] = True return self.as_sql(compiler, connection, **extra_context) class Azimuth(GeoFunc): output_field = FloatField() arity = 2 geom_param_pos = (0, 1) class AsGeoJSON(GeoFunc): output_field = TextField() def __init__(self, expression, bbox=False, crs=False, precision=8, **extra): expressions = [expression] if precision is not None: expressions.append(self._handle_param(precision, 'precision', int)) options = 0 if crs and bbox: options = 3 elif bbox: options = 1 elif crs: options = 2 if options: expressions.append(options) super().__init__(*expressions, **extra) def as_oracle(self, compiler, connection, **extra_context): source_expressions = self.get_source_expressions() clone = self.copy() clone.set_source_expressions(source_expressions[:1]) return super(AsGeoJSON, clone).as_sql(compiler, connection, **extra_context) class AsGML(GeoFunc): geom_param_pos = (1,) output_field = TextField() def __init__(self, expression, version=2, precision=8, **extra): expressions = [version, expression] if precision is not None: expressions.append(self._handle_param(precision, 'precision', int)) super().__init__(*expressions, **extra) def as_oracle(self, compiler, connection, **extra_context): source_expressions = self.get_source_expressions() version = source_expressions[0] clone = self.copy() clone.set_source_expressions([source_expressions[1]]) extra_context['function'] = 'SDO_UTIL.TO_GML311GEOMETRY' if version.value == 3 else 'SDO_UTIL.TO_GMLGEOMETRY' return super(AsGML, clone).as_sql(compiler, connection, **extra_context) class AsKML(AsGML): def as_sqlite(self, compiler, connection, **extra_context): # No version parameter clone = self.copy() clone.set_source_expressions(self.get_source_expressions()[1:]) return clone.as_sql(compiler, connection, **extra_context) class AsSVG(GeoFunc): output_field = TextField() def __init__(self, expression, relative=False, precision=8, **extra): relative = relative if hasattr(relative, 'resolve_expression') else int(relative) expressions = [ expression, relative, self._handle_param(precision, 'precision', int), ] super().__init__(*expressions, **extra) class AsWKB(GeoFunc): output_field = BinaryField() arity = 1 class AsWKT(GeoFunc): output_field = TextField() arity = 1 class BoundingCircle(OracleToleranceMixin, GeoFunc): def __init__(self, expression, num_seg=48, **extra): super().__init__(expression, num_seg, **extra) def as_oracle(self, compiler, connection, **extra_context): clone = self.copy() clone.set_source_expressions([self.get_source_expressions()[0]]) return super(BoundingCircle, clone).as_oracle(compiler, connection, **extra_context) class Centroid(OracleToleranceMixin, GeomOutputGeoFunc): arity = 1 class Difference(OracleToleranceMixin, GeomOutputGeoFunc): arity = 2 geom_param_pos = (0, 1) class DistanceResultMixin: @cached_property def output_field(self): return DistanceField(self.geo_field) def source_is_geography(self): return self.geo_field.geography and self.geo_field.srid == 4326 class Distance(DistanceResultMixin, OracleToleranceMixin, GeoFunc): geom_param_pos = (0, 1) spheroid = None def __init__(self, expr1, expr2, spheroid=None, **extra): expressions = [expr1, expr2] if spheroid is not None: self.spheroid = self._handle_param(spheroid, 'spheroid', bool) super().__init__(*expressions, **extra) def as_postgresql(self, compiler, connection, **extra_context): clone = self.copy() function = None expr2 = clone.source_expressions[1] geography = self.source_is_geography() if expr2.output_field.geography != geography: if isinstance(expr2, Value): expr2.output_field.geography = geography else: clone.source_expressions[1] = Cast( expr2, GeometryField(srid=expr2.output_field.srid, geography=geography), ) if not geography and self.geo_field.geodetic(connection): # Geometry fields with geodetic (lon/lat) coordinates need special distance functions if self.spheroid: # DistanceSpheroid is more accurate and resource intensive than DistanceSphere function = connection.ops.spatial_function_name('DistanceSpheroid') # Replace boolean param by the real spheroid of the base field clone.source_expressions.append(Value(self.geo_field.spheroid(connection))) else: function = connection.ops.spatial_function_name('DistanceSphere') return super(Distance, clone).as_sql(compiler, connection, function=function, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): if self.geo_field.geodetic(connection): # SpatiaLite returns NULL instead of zero on geodetic coordinates extra_context['template'] = 'COALESCE(%(function)s(%(expressions)s, %(spheroid)s), 0)' extra_context['spheroid'] = int(bool(self.spheroid)) return super().as_sql(compiler, connection, **extra_context) class Envelope(GeomOutputGeoFunc): arity = 1 class ForcePolygonCW(GeomOutputGeoFunc): arity = 1 class GeoHash(GeoFunc): output_field = TextField() def __init__(self, expression, precision=None, **extra): expressions = [expression] if precision is not None: expressions.append(self._handle_param(precision, 'precision', int)) super().__init__(*expressions, **extra) def as_mysql(self, compiler, connection, **extra_context): clone = self.copy() # If no precision is provided, set it to the maximum. if len(clone.source_expressions) < 2: clone.source_expressions.append(Value(100)) return clone.as_sql(compiler, connection, **extra_context) class GeometryDistance(GeoFunc): output_field = FloatField() arity = 2 function = '' arg_joiner = ' <-> ' geom_param_pos = (0, 1) class Intersection(OracleToleranceMixin, GeomOutputGeoFunc): arity = 2 geom_param_pos = (0, 1) @BaseSpatialField.register_lookup class IsValid(OracleToleranceMixin, GeoFuncMixin, Transform): lookup_name = 'isvalid' output_field = BooleanField() def as_oracle(self, compiler, connection, **extra_context): sql, params = super().as_oracle(compiler, connection, **extra_context) return "CASE %s WHEN 'TRUE' THEN 1 ELSE 0 END" % sql, params class Length(DistanceResultMixin, OracleToleranceMixin, GeoFunc): def __init__(self, expr1, spheroid=True, **extra): self.spheroid = spheroid super().__init__(expr1, **extra) def as_sql(self, compiler, connection, **extra_context): if self.geo_field.geodetic(connection) and not connection.features.supports_length_geodetic: raise NotSupportedError("This backend doesn't support Length on geodetic fields") return super().as_sql(compiler, connection, **extra_context) def as_postgresql(self, compiler, connection, **extra_context): clone = self.copy() function = None if self.source_is_geography(): clone.source_expressions.append(Value(self.spheroid)) elif self.geo_field.geodetic(connection): # Geometry fields with geodetic (lon/lat) coordinates need length_spheroid function = connection.ops.spatial_function_name('LengthSpheroid') clone.source_expressions.append(Value(self.geo_field.spheroid(connection))) else: dim = min(f.dim for f in self.get_source_fields() if f) if dim > 2: function = connection.ops.length3d return super(Length, clone).as_sql(compiler, connection, function=function, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): function = None if self.geo_field.geodetic(connection): function = 'GeodesicLength' if self.spheroid else 'GreatCircleLength' return super().as_sql(compiler, connection, function=function, **extra_context) class LineLocatePoint(GeoFunc): output_field = FloatField() arity = 2 geom_param_pos = (0, 1) class MakeValid(GeomOutputGeoFunc): pass class MemSize(GeoFunc): output_field = IntegerField() arity = 1 class NumGeometries(GeoFunc): output_field = IntegerField() arity = 1 class NumPoints(GeoFunc): output_field = IntegerField() arity = 1 class Perimeter(DistanceResultMixin, OracleToleranceMixin, GeoFunc): arity = 1 def as_postgresql(self, compiler, connection, **extra_context): function = None if self.geo_field.geodetic(connection) and not self.source_is_geography(): raise NotSupportedError("ST_Perimeter cannot use a non-projected non-geography field.") dim = min(f.dim for f in self.get_source_fields()) if dim > 2: function = connection.ops.perimeter3d return super().as_sql(compiler, connection, function=function, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): if self.geo_field.geodetic(connection): raise NotSupportedError("Perimeter cannot use a non-projected field.") return super().as_sql(compiler, connection, **extra_context) class PointOnSurface(OracleToleranceMixin, GeomOutputGeoFunc): arity = 1 class Reverse(GeoFunc): arity = 1 class Scale(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc): def __init__(self, expression, x, y, z=0.0, **extra): expressions = [ expression, self._handle_param(x, 'x', NUMERIC_TYPES), self._handle_param(y, 'y', NUMERIC_TYPES), ] if z != 0.0: expressions.append(self._handle_param(z, 'z', NUMERIC_TYPES)) super().__init__(*expressions, **extra) class SnapToGrid(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc): def __init__(self, expression, *args, **extra): nargs = len(args) expressions = [expression] if nargs in (1, 2): expressions.extend( [self._handle_param(arg, '', NUMERIC_TYPES) for arg in args] ) elif nargs == 4: # Reverse origin and size param ordering expressions += [ *(self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[2:]), *(self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[0:2]), ] else: raise ValueError('Must provide 1, 2, or 4 arguments to `SnapToGrid`.') super().__init__(*expressions, **extra) class SymDifference(OracleToleranceMixin, GeomOutputGeoFunc): arity = 2 geom_param_pos = (0, 1) class Transform(GeomOutputGeoFunc): def __init__(self, expression, srid, **extra): expressions = [ expression, self._handle_param(srid, 'srid', int), ] if 'output_field' not in extra: extra['output_field'] = GeometryField(srid=srid) super().__init__(*expressions, **extra) class Translate(Scale): def as_sqlite(self, compiler, connection, **extra_context): clone = self.copy() if len(self.source_expressions) < 4: # Always provide the z parameter for ST_Translate clone.source_expressions.append(Value(0)) return super(Translate, clone).as_sqlite(compiler, connection, **extra_context) class Union(OracleToleranceMixin, GeomOutputGeoFunc): arity = 2 geom_param_pos = (0, 1)
868065c5313a32a5f97b9fdec2e12deb698802a6b9a397a8dac9c3e0b0f12e62
from unittest import mock from django.apps.registry import apps as global_apps from django.db import DatabaseError, connection from django.db.migrations.exceptions import InvalidMigrationPlan from django.db.migrations.executor import MigrationExecutor from django.db.migrations.graph import MigrationGraph from django.db.migrations.recorder import MigrationRecorder from django.test import ( SimpleTestCase, modify_settings, override_settings, skipUnlessDBFeature, ) from .test_base import MigrationTestBase @modify_settings(INSTALLED_APPS={'append': 'migrations2'}) class ExecutorTests(MigrationTestBase): """ Tests the migration executor (full end-to-end running). Bear in mind that if these are failing you should fix the other test failures first, as they may be propagating into here. """ available_apps = ["migrations", "migrations2", "django.contrib.auth", "django.contrib.contenttypes"] @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_run(self): """ Tests running a simple set of migrations. """ executor = MigrationExecutor(connection) # Let's look at the plan first and make sure it's up to scratch plan = executor.migration_plan([("migrations", "0002_second")]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0001_initial"], False), (executor.loader.graph.nodes["migrations", "0002_second"], False), ], ) # Were the tables there before? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_book") # Alright, let's try running it executor.migrate([("migrations", "0002_second")]) # Are the tables there now? self.assertTableExists("migrations_author") self.assertTableExists("migrations_book") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Alright, let's undo what we did plan = executor.migration_plan([("migrations", None)]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0002_second"], True), (executor.loader.graph.nodes["migrations", "0001_initial"], True), ], ) executor.migrate([("migrations", None)]) # Are the tables gone? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_book") @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_run_with_squashed(self): """ Tests running a squashed migration from zero (should ignore what it replaces) """ executor = MigrationExecutor(connection) # Check our leaf node is the squashed one leaves = [key for key in executor.loader.graph.leaf_nodes() if key[0] == "migrations"] self.assertEqual(leaves, [("migrations", "0001_squashed_0002")]) # Check the plan plan = executor.migration_plan([("migrations", "0001_squashed_0002")]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0001_squashed_0002"], False), ], ) # Were the tables there before? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_book") # Alright, let's try running it executor.migrate([("migrations", "0001_squashed_0002")]) # Are the tables there now? self.assertTableExists("migrations_author") self.assertTableExists("migrations_book") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Alright, let's undo what we did. Should also just use squashed. plan = executor.migration_plan([("migrations", None)]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0001_squashed_0002"], True), ], ) executor.migrate([("migrations", None)]) # Are the tables gone? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_book") @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_non_atomic"}) def test_non_atomic_migration(self): """ Applying a non-atomic migration works as expected. """ executor = MigrationExecutor(connection) with self.assertRaisesMessage(RuntimeError, "Abort migration"): executor.migrate([("migrations", "0001_initial")]) self.assertTableExists("migrations_publisher") migrations_apps = executor.loader.project_state(("migrations", "0001_initial")).apps Publisher = migrations_apps.get_model("migrations", "Publisher") self.assertTrue(Publisher.objects.exists()) self.assertTableNotExists("migrations_book") @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_atomic_operation"}) def test_atomic_operation_in_non_atomic_migration(self): """ An atomic operation is properly rolled back inside a non-atomic migration. """ executor = MigrationExecutor(connection) with self.assertRaisesMessage(RuntimeError, "Abort migration"): executor.migrate([("migrations", "0001_initial")]) migrations_apps = executor.loader.project_state(("migrations", "0001_initial")).apps Editor = migrations_apps.get_model("migrations", "Editor") self.assertFalse(Editor.objects.exists()) # Record previous migration as successful. executor.migrate([("migrations", "0001_initial")], fake=True) # Rebuild the graph to reflect the new DB state. executor.loader.build_graph() # Migrating backwards is also atomic. with self.assertRaisesMessage(RuntimeError, "Abort migration"): executor.migrate([("migrations", None)]) self.assertFalse(Editor.objects.exists()) @override_settings(MIGRATION_MODULES={ "migrations": "migrations.test_migrations", "migrations2": "migrations2.test_migrations_2", }) def test_empty_plan(self): """ Re-planning a full migration of a fully-migrated set doesn't perform spurious unmigrations and remigrations. There was previously a bug where the executor just always performed the backwards plan for applied migrations - which even for the most recent migration in an app, might include other, dependent apps, and these were being unmigrated. """ # Make the initial plan, check it executor = MigrationExecutor(connection) plan = executor.migration_plan([ ("migrations", "0002_second"), ("migrations2", "0001_initial"), ]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0001_initial"], False), (executor.loader.graph.nodes["migrations", "0002_second"], False), (executor.loader.graph.nodes["migrations2", "0001_initial"], False), ], ) # Fake-apply all migrations executor.migrate([ ("migrations", "0002_second"), ("migrations2", "0001_initial") ], fake=True) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Now plan a second time and make sure it's empty plan = executor.migration_plan([ ("migrations", "0002_second"), ("migrations2", "0001_initial"), ]) self.assertEqual(plan, []) # The resulting state should include applied migrations. state = executor.migrate([ ("migrations", "0002_second"), ("migrations2", "0001_initial"), ]) self.assertIn(('migrations', 'book'), state.models) self.assertIn(('migrations', 'author'), state.models) self.assertIn(('migrations2', 'otherauthor'), state.models) # Erase all the fake records executor.recorder.record_unapplied("migrations2", "0001_initial") executor.recorder.record_unapplied("migrations", "0002_second") executor.recorder.record_unapplied("migrations", "0001_initial") @override_settings(MIGRATION_MODULES={ "migrations": "migrations.test_migrations", "migrations2": "migrations2.test_migrations_2_no_deps", }) def test_mixed_plan_not_supported(self): """ Although the MigrationExecutor interfaces allows for mixed migration plans (combined forwards and backwards migrations) this is not supported. """ # Prepare for mixed plan executor = MigrationExecutor(connection) plan = executor.migration_plan([("migrations", "0002_second")]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0001_initial"], False), (executor.loader.graph.nodes["migrations", "0002_second"], False), ], ) executor.migrate(None, plan) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() self.assertIn(('migrations', '0001_initial'), executor.loader.applied_migrations) self.assertIn(('migrations', '0002_second'), executor.loader.applied_migrations) self.assertNotIn(('migrations2', '0001_initial'), executor.loader.applied_migrations) # Generate mixed plan plan = executor.migration_plan([ ("migrations", None), ("migrations2", "0001_initial"), ]) msg = ( 'Migration plans with both forwards and backwards migrations are ' 'not supported. Please split your migration process into separate ' 'plans of only forwards OR backwards migrations.' ) with self.assertRaisesMessage(InvalidMigrationPlan, msg) as cm: executor.migrate(None, plan) self.assertEqual( cm.exception.args[1], [ (executor.loader.graph.nodes["migrations", "0002_second"], True), (executor.loader.graph.nodes["migrations", "0001_initial"], True), (executor.loader.graph.nodes["migrations2", "0001_initial"], False), ], ) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() executor.migrate([ ("migrations", None), ("migrations2", None), ]) # Are the tables gone? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_book") self.assertTableNotExists("migrations2_otherauthor") @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_soft_apply(self): """ Tests detection of initial migrations already having been applied. """ state = {"faked": None} def fake_storer(phase, migration=None, fake=None): state["faked"] = fake executor = MigrationExecutor(connection, progress_callback=fake_storer) # Were the tables there before? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") # Run it normally self.assertEqual( executor.migration_plan([("migrations", "0001_initial")]), [ (executor.loader.graph.nodes["migrations", "0001_initial"], False), ], ) executor.migrate([("migrations", "0001_initial")]) # Are the tables there now? self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble") # We shouldn't have faked that one self.assertIs(state["faked"], False) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Fake-reverse that executor.migrate([("migrations", None)], fake=True) # Are the tables still there? self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble") # Make sure that was faked self.assertIs(state["faked"], True) # Finally, migrate forwards; this should fake-apply our initial migration executor.loader.build_graph() self.assertEqual( executor.migration_plan([("migrations", "0001_initial")]), [ (executor.loader.graph.nodes["migrations", "0001_initial"], False), ], ) # Applying the migration should raise a database level error # because we haven't given the --fake-initial option with self.assertRaises(DatabaseError): executor.migrate([("migrations", "0001_initial")]) # Reset the faked state state = {"faked": None} # Allow faking of initial CreateModel operations executor.migrate([("migrations", "0001_initial")], fake_initial=True) self.assertIs(state["faked"], True) # And migrate back to clean up the database executor.loader.build_graph() executor.migrate([("migrations", None)]) self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") @override_settings( MIGRATION_MODULES={ "migrations": "migrations.test_migrations_custom_user", "django.contrib.auth": "django.contrib.auth.migrations", }, AUTH_USER_MODEL="migrations.Author", ) def test_custom_user(self): """ Regression test for #22325 - references to a custom user model defined in the same app are not resolved correctly. """ executor = MigrationExecutor(connection) self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") # Migrate forwards executor.migrate([("migrations", "0001_initial")]) self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble") # Make sure the soft-application detection works (#23093) # Change table_names to not return auth_user during this as # it wouldn't be there in a normal run, and ensure migrations.Author # exists in the global app registry temporarily. old_table_names = connection.introspection.table_names connection.introspection.table_names = lambda c: [x for x in old_table_names(c) if x != "auth_user"] migrations_apps = executor.loader.project_state(("migrations", "0001_initial")).apps global_apps.get_app_config("migrations").models["author"] = migrations_apps.get_model("migrations", "author") try: migration = executor.loader.get_migration("auth", "0001_initial") self.assertIs(executor.detect_soft_applied(None, migration)[0], True) finally: connection.introspection.table_names = old_table_names del global_apps.get_app_config("migrations").models["author"] # And migrate back to clean up the database executor.loader.build_graph() executor.migrate([("migrations", None)]) self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") @override_settings( MIGRATION_MODULES={ "migrations": "migrations.test_add_many_to_many_field_initial", }, ) def test_detect_soft_applied_add_field_manytomanyfield(self): """ executor.detect_soft_applied() detects ManyToManyField tables from an AddField operation. This checks the case of AddField in a migration with other operations (0001) and the case of AddField in its own migration (0002). """ tables = [ # from 0001 "migrations_project", "migrations_task", "migrations_project_tasks", # from 0002 "migrations_task_projects", ] executor = MigrationExecutor(connection) # Create the tables for 0001 but make it look like the migration hasn't # been applied. executor.migrate([("migrations", "0001_initial")]) executor.migrate([("migrations", None)], fake=True) for table in tables[:3]: self.assertTableExists(table) # Table detection sees 0001 is applied but not 0002. migration = executor.loader.get_migration("migrations", "0001_initial") self.assertIs(executor.detect_soft_applied(None, migration)[0], True) migration = executor.loader.get_migration("migrations", "0002_initial") self.assertIs(executor.detect_soft_applied(None, migration)[0], False) # Create the tables for both migrations but make it look like neither # has been applied. executor.loader.build_graph() executor.migrate([("migrations", "0001_initial")], fake=True) executor.migrate([("migrations", "0002_initial")]) executor.loader.build_graph() executor.migrate([("migrations", None)], fake=True) # Table detection sees 0002 is applied. migration = executor.loader.get_migration("migrations", "0002_initial") self.assertIs(executor.detect_soft_applied(None, migration)[0], True) # Leave the tables for 0001 except the many-to-many table. That missing # table should cause detect_soft_applied() to return False. with connection.schema_editor() as editor: for table in tables[2:]: editor.execute(editor.sql_delete_table % {"table": table}) migration = executor.loader.get_migration("migrations", "0001_initial") self.assertIs(executor.detect_soft_applied(None, migration)[0], False) # Cleanup by removing the remaining tables. with connection.schema_editor() as editor: for table in tables[:2]: editor.execute(editor.sql_delete_table % {"table": table}) for table in tables: self.assertTableNotExists(table) @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.lookuperror_a", "migrations.migrations_test_apps.lookuperror_b", "migrations.migrations_test_apps.lookuperror_c" ] ) def test_unrelated_model_lookups_forwards(self): """ #24123 - All models of apps already applied which are unrelated to the first app being applied are part of the initial model state. """ try: executor = MigrationExecutor(connection) self.assertTableNotExists("lookuperror_a_a1") self.assertTableNotExists("lookuperror_b_b1") self.assertTableNotExists("lookuperror_c_c1") executor.migrate([("lookuperror_b", "0003_b3")]) self.assertTableExists("lookuperror_b_b3") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Migrate forwards -- This led to a lookup LookupErrors because # lookuperror_b.B2 is already applied executor.migrate([ ("lookuperror_a", "0004_a4"), ("lookuperror_c", "0003_c3"), ]) self.assertTableExists("lookuperror_a_a4") self.assertTableExists("lookuperror_c_c3") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() finally: # Cleanup executor.migrate([ ("lookuperror_a", None), ("lookuperror_b", None), ("lookuperror_c", None), ]) self.assertTableNotExists("lookuperror_a_a1") self.assertTableNotExists("lookuperror_b_b1") self.assertTableNotExists("lookuperror_c_c1") @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.lookuperror_a", "migrations.migrations_test_apps.lookuperror_b", "migrations.migrations_test_apps.lookuperror_c" ] ) def test_unrelated_model_lookups_backwards(self): """ #24123 - All models of apps being unapplied which are unrelated to the first app being unapplied are part of the initial model state. """ try: executor = MigrationExecutor(connection) self.assertTableNotExists("lookuperror_a_a1") self.assertTableNotExists("lookuperror_b_b1") self.assertTableNotExists("lookuperror_c_c1") executor.migrate([ ("lookuperror_a", "0004_a4"), ("lookuperror_b", "0003_b3"), ("lookuperror_c", "0003_c3"), ]) self.assertTableExists("lookuperror_b_b3") self.assertTableExists("lookuperror_a_a4") self.assertTableExists("lookuperror_c_c3") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Migrate backwards -- This led to a lookup LookupErrors because # lookuperror_b.B2 is not in the initial state (unrelated to app c) executor.migrate([("lookuperror_a", None)]) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() finally: # Cleanup executor.migrate([ ("lookuperror_b", None), ("lookuperror_c", None) ]) self.assertTableNotExists("lookuperror_a_a1") self.assertTableNotExists("lookuperror_b_b1") self.assertTableNotExists("lookuperror_c_c1") @override_settings( INSTALLED_APPS=[ 'migrations.migrations_test_apps.mutate_state_a', 'migrations.migrations_test_apps.mutate_state_b', ] ) def test_unrelated_applied_migrations_mutate_state(self): """ #26647 - Unrelated applied migrations should be part of the final state in both directions. """ executor = MigrationExecutor(connection) executor.migrate([ ('mutate_state_b', '0002_add_field'), ]) # Migrate forward. executor.loader.build_graph() state = executor.migrate([ ('mutate_state_a', '0001_initial'), ]) self.assertIn('added', dict(state.models['mutate_state_b', 'b'].fields)) executor.loader.build_graph() # Migrate backward. state = executor.migrate([ ('mutate_state_a', None), ]) self.assertIn('added', dict(state.models['mutate_state_b', 'b'].fields)) executor.migrate([ ('mutate_state_b', None), ]) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_process_callback(self): """ #24129 - Tests callback process """ call_args_list = [] def callback(*args): call_args_list.append(args) executor = MigrationExecutor(connection, progress_callback=callback) # Were the tables there before? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") executor.migrate([ ("migrations", "0001_initial"), ("migrations", "0002_second"), ]) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() executor.migrate([ ("migrations", None), ("migrations", None), ]) self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") migrations = executor.loader.graph.nodes expected = [ ("render_start",), ("render_success",), ("apply_start", migrations['migrations', '0001_initial'], False), ("apply_success", migrations['migrations', '0001_initial'], False), ("apply_start", migrations['migrations', '0002_second'], False), ("apply_success", migrations['migrations', '0002_second'], False), ("render_start",), ("render_success",), ("unapply_start", migrations['migrations', '0002_second'], False), ("unapply_success", migrations['migrations', '0002_second'], False), ("unapply_start", migrations['migrations', '0001_initial'], False), ("unapply_success", migrations['migrations', '0001_initial'], False), ] self.assertEqual(call_args_list, expected) @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.alter_fk.author_app", "migrations.migrations_test_apps.alter_fk.book_app", ] ) def test_alter_id_type_with_fk(self): try: executor = MigrationExecutor(connection) self.assertTableNotExists("author_app_author") self.assertTableNotExists("book_app_book") # Apply initial migrations executor.migrate([ ("author_app", "0001_initial"), ("book_app", "0001_initial"), ]) self.assertTableExists("author_app_author") self.assertTableExists("book_app_book") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Apply PK type alteration executor.migrate([("author_app", "0002_alter_id")]) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() finally: # We can't simply unapply the migrations here because there is no # implicit cast from VARCHAR to INT on the database level. with connection.schema_editor() as editor: editor.execute(editor.sql_delete_table % {"table": "book_app_book"}) editor.execute(editor.sql_delete_table % {"table": "author_app_author"}) self.assertTableNotExists("author_app_author") self.assertTableNotExists("book_app_book") executor.migrate([("author_app", None)], fake=True) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_apply_all_replaced_marks_replacement_as_applied(self): """ Applying all replaced migrations marks replacement as applied (#24628). """ recorder = MigrationRecorder(connection) # Place the database in a state where the replaced migrations are # partially applied: 0001 is applied, 0002 is not. recorder.record_applied("migrations", "0001_initial") executor = MigrationExecutor(connection) # Use fake because we don't actually have the first migration # applied, so the second will fail. And there's no need to actually # create/modify tables here, we're just testing the # MigrationRecord, which works the same with or without fake. executor.migrate([("migrations", "0002_second")], fake=True) # Because we've now applied 0001 and 0002 both, their squashed # replacement should be marked as applied. self.assertIn( ("migrations", "0001_squashed_0002"), recorder.applied_migrations(), ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_migrate_marks_replacement_applied_even_if_it_did_nothing(self): """ A new squash migration will be marked as applied even if all its replaced migrations were previously already applied (#24628). """ recorder = MigrationRecorder(connection) # Record all replaced migrations as applied recorder.record_applied("migrations", "0001_initial") recorder.record_applied("migrations", "0002_second") executor = MigrationExecutor(connection) executor.migrate([("migrations", "0001_squashed_0002")]) # Because 0001 and 0002 are both applied, even though this migrate run # didn't apply anything new, their squashed replacement should be # marked as applied. self.assertIn( ("migrations", "0001_squashed_0002"), recorder.applied_migrations(), ) # When the feature is False, the operation and the record won't be # performed in a transaction and the test will systematically pass. @skipUnlessDBFeature('can_rollback_ddl') @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_migrations_applied_and_recorded_atomically(self): """Migrations are applied and recorded atomically.""" executor = MigrationExecutor(connection) with mock.patch('django.db.migrations.executor.MigrationExecutor.record_migration') as record_migration: record_migration.side_effect = RuntimeError('Recording migration failed.') with self.assertRaisesMessage(RuntimeError, 'Recording migration failed.'): executor.migrate([('migrations', '0001_initial')]) # The migration isn't recorded as applied since it failed. migration_recorder = MigrationRecorder(connection) self.assertFalse(migration_recorder.migration_qs.filter(app='migrations', name='0001_initial').exists()) self.assertTableNotExists('migrations_author') class FakeLoader: def __init__(self, graph, applied): self.graph = graph self.applied_migrations = applied class FakeMigration: """Really all we need is any object with a debug-useful repr.""" def __init__(self, name): self.name = name def __repr__(self): return 'M<%s>' % self.name class ExecutorUnitTests(SimpleTestCase): """(More) isolated unit tests for executor methods.""" def test_minimize_rollbacks(self): """ Minimize unnecessary rollbacks in connected apps. When you say "./manage.py migrate appA 0001", rather than migrating to just after appA-0001 in the linearized migration plan (which could roll back migrations in other apps that depend on appA 0001, but don't need to be rolled back since we're not rolling back appA 0001), we migrate to just before appA-0002. """ a1_impl = FakeMigration('a1') a1 = ('a', '1') a2_impl = FakeMigration('a2') a2 = ('a', '2') b1_impl = FakeMigration('b1') b1 = ('b', '1') graph = MigrationGraph() graph.add_node(a1, a1_impl) graph.add_node(a2, a2_impl) graph.add_node(b1, b1_impl) graph.add_dependency(None, b1, a1) graph.add_dependency(None, a2, a1) executor = MigrationExecutor(None) executor.loader = FakeLoader(graph, { a1: a1_impl, b1: b1_impl, a2: a2_impl, }) plan = executor.migration_plan({a1}) self.assertEqual(plan, [(a2_impl, True)]) def test_minimize_rollbacks_branchy(self): r""" Minimize rollbacks when target has multiple in-app children. a: 1 <---- 3 <--\ \ \- 2 <--- 4 \ \ b: \- 1 <--- 2 """ a1_impl = FakeMigration('a1') a1 = ('a', '1') a2_impl = FakeMigration('a2') a2 = ('a', '2') a3_impl = FakeMigration('a3') a3 = ('a', '3') a4_impl = FakeMigration('a4') a4 = ('a', '4') b1_impl = FakeMigration('b1') b1 = ('b', '1') b2_impl = FakeMigration('b2') b2 = ('b', '2') graph = MigrationGraph() graph.add_node(a1, a1_impl) graph.add_node(a2, a2_impl) graph.add_node(a3, a3_impl) graph.add_node(a4, a4_impl) graph.add_node(b1, b1_impl) graph.add_node(b2, b2_impl) graph.add_dependency(None, a2, a1) graph.add_dependency(None, a3, a1) graph.add_dependency(None, a4, a2) graph.add_dependency(None, a4, a3) graph.add_dependency(None, b2, b1) graph.add_dependency(None, b1, a1) graph.add_dependency(None, b2, a2) executor = MigrationExecutor(None) executor.loader = FakeLoader(graph, { a1: a1_impl, b1: b1_impl, a2: a2_impl, b2: b2_impl, a3: a3_impl, a4: a4_impl, }) plan = executor.migration_plan({a1}) should_be_rolled_back = [b2_impl, a4_impl, a2_impl, a3_impl] exp = [(m, True) for m in should_be_rolled_back] self.assertEqual(plan, exp) def test_backwards_nothing_to_do(self): r""" If the current state satisfies the given target, do nothing. a: 1 <--- 2 b: \- 1 c: \- 1 If a1 is applied already and a2 is not, and we're asked to migrate to a1, don't apply or unapply b1 or c1, regardless of their current state. """ a1_impl = FakeMigration('a1') a1 = ('a', '1') a2_impl = FakeMigration('a2') a2 = ('a', '2') b1_impl = FakeMigration('b1') b1 = ('b', '1') c1_impl = FakeMigration('c1') c1 = ('c', '1') graph = MigrationGraph() graph.add_node(a1, a1_impl) graph.add_node(a2, a2_impl) graph.add_node(b1, b1_impl) graph.add_node(c1, c1_impl) graph.add_dependency(None, a2, a1) graph.add_dependency(None, b1, a1) graph.add_dependency(None, c1, a1) executor = MigrationExecutor(None) executor.loader = FakeLoader(graph, { a1: a1_impl, b1: b1_impl, }) plan = executor.migration_plan({a1}) self.assertEqual(plan, [])
75a122019f71bcaa1b59949cd6069e166951fdd09fa46c9bd0ec696d1abde29b
import datetime import importlib import io import os import sys from unittest import mock from django.apps import apps from django.core.management import CommandError, call_command from django.db import ( ConnectionHandler, DatabaseError, connection, connections, models, ) from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.backends.utils import truncate_name from django.db.migrations.exceptions import InconsistentMigrationHistory from django.db.migrations.recorder import MigrationRecorder from django.test import TestCase, override_settings, skipUnlessDBFeature from .models import UnicodeModel, UnserializableModel from .routers import TestRouter from .test_base import MigrationTestBase class MigrateTests(MigrationTestBase): """ Tests running the migrate command. """ databases = {'default', 'other'} @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_migrate(self): """ Tests basic usage of the migrate command. """ # No tables are created self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableNotExists("migrations_book") # Run the migrations to 0001 only stdout = io.StringIO() call_command('migrate', 'migrations', '0001', verbosity=1, stdout=stdout, no_color=True) stdout = stdout.getvalue() self.assertIn('Target specific migration: 0001_initial, from migrations', stdout) self.assertIn('Applying migrations.0001_initial... OK', stdout) # The correct tables exist self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble") self.assertTableNotExists("migrations_book") # Run migrations all the way call_command("migrate", verbosity=0) # The correct tables exist self.assertTableExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableExists("migrations_book") # Unmigrate everything stdout = io.StringIO() call_command('migrate', 'migrations', 'zero', verbosity=1, stdout=stdout, no_color=True) stdout = stdout.getvalue() self.assertIn('Unapply all migrations: migrations', stdout) self.assertIn('Unapplying migrations.0002_second... OK', stdout) # Tables are gone self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableNotExists("migrations_book") @override_settings(INSTALLED_APPS=[ 'django.contrib.auth', 'django.contrib.contenttypes', 'migrations.migrations_test_apps.migrated_app', ]) def test_migrate_with_system_checks(self): out = io.StringIO() call_command('migrate', skip_checks=False, no_color=True, stdout=out) self.assertIn('Apply all migrations: migrated_app', out.getvalue()) @override_settings(INSTALLED_APPS=['migrations', 'migrations.migrations_test_apps.unmigrated_app_syncdb']) def test_app_without_migrations(self): msg = "App 'unmigrated_app_syncdb' does not have migrations." with self.assertRaisesMessage(CommandError, msg): call_command('migrate', app_label='unmigrated_app_syncdb') @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_clashing_prefix'}) def test_ambiguous_prefix(self): msg = ( "More than one migration matches 'a' in app 'migrations'. Please " "be more specific." ) with self.assertRaisesMessage(CommandError, msg): call_command('migrate', app_label='migrations', migration_name='a') @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_unknown_prefix(self): msg = "Cannot find a migration matching 'nonexistent' from app 'migrations'." with self.assertRaisesMessage(CommandError, msg): call_command('migrate', app_label='migrations', migration_name='nonexistent') @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_initial_false"}) def test_migrate_initial_false(self): """ `Migration.initial = False` skips fake-initial detection. """ # Make sure no tables are created self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") # Run the migrations to 0001 only call_command("migrate", "migrations", "0001", verbosity=0) # Fake rollback call_command("migrate", "migrations", "zero", fake=True, verbosity=0) # Make sure fake-initial detection does not run with self.assertRaises(DatabaseError): call_command("migrate", "migrations", "0001", fake_initial=True, verbosity=0) call_command("migrate", "migrations", "0001", fake=True, verbosity=0) # Real rollback call_command("migrate", "migrations", "zero", verbosity=0) # Make sure it's all gone self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableNotExists("migrations_book") @override_settings( MIGRATION_MODULES={"migrations": "migrations.test_migrations"}, DATABASE_ROUTERS=['migrations.routers.TestRouter'], ) def test_migrate_fake_initial(self): """ --fake-initial only works if all tables created in the initial migration of an app exists. Database routers must be obeyed when doing that check. """ # Make sure no tables are created for db in self.databases: self.assertTableNotExists("migrations_author", using=db) self.assertTableNotExists("migrations_tribble", using=db) # Run the migrations to 0001 only call_command("migrate", "migrations", "0001", verbosity=0) call_command("migrate", "migrations", "0001", verbosity=0, database="other") # Make sure the right tables exist self.assertTableExists("migrations_author") self.assertTableNotExists("migrations_tribble") # Also check the "other" database self.assertTableNotExists("migrations_author", using="other") self.assertTableExists("migrations_tribble", using="other") # Fake a roll-back call_command("migrate", "migrations", "zero", fake=True, verbosity=0) call_command("migrate", "migrations", "zero", fake=True, verbosity=0, database="other") # Make sure the tables still exist self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble", using="other") # Try to run initial migration with self.assertRaises(DatabaseError): call_command("migrate", "migrations", "0001", verbosity=0) # Run initial migration with an explicit --fake-initial out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: False): call_command("migrate", "migrations", "0001", fake_initial=True, stdout=out, verbosity=1) call_command("migrate", "migrations", "0001", fake_initial=True, verbosity=0, database="other") self.assertIn( "migrations.0001_initial... faked", out.getvalue().lower() ) # Run migrations all the way call_command("migrate", verbosity=0) call_command("migrate", verbosity=0, database="other") # Make sure the right tables exist self.assertTableExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableExists("migrations_book") self.assertTableNotExists("migrations_author", using="other") self.assertTableNotExists("migrations_tribble", using="other") self.assertTableNotExists("migrations_book", using="other") # Fake a roll-back call_command("migrate", "migrations", "zero", fake=True, verbosity=0) call_command("migrate", "migrations", "zero", fake=True, verbosity=0, database="other") # Make sure the tables still exist self.assertTableExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableExists("migrations_book") # Try to run initial migration with self.assertRaises(DatabaseError): call_command("migrate", "migrations", verbosity=0) # Run initial migration with an explicit --fake-initial with self.assertRaises(DatabaseError): # Fails because "migrations_tribble" does not exist but needs to in # order to make --fake-initial work. call_command("migrate", "migrations", fake_initial=True, verbosity=0) # Fake an apply call_command("migrate", "migrations", fake=True, verbosity=0) call_command("migrate", "migrations", fake=True, verbosity=0, database="other") # Unmigrate everything call_command("migrate", "migrations", "zero", verbosity=0) call_command("migrate", "migrations", "zero", verbosity=0, database="other") # Make sure it's all gone for db in self.databases: self.assertTableNotExists("migrations_author", using=db) self.assertTableNotExists("migrations_tribble", using=db) self.assertTableNotExists("migrations_book", using=db) @skipUnlessDBFeature('ignores_table_name_case') def test_migrate_fake_initial_case_insensitive(self): with override_settings(MIGRATION_MODULES={ 'migrations': 'migrations.test_fake_initial_case_insensitive.initial', }): call_command('migrate', 'migrations', '0001', verbosity=0) call_command('migrate', 'migrations', 'zero', fake=True, verbosity=0) with override_settings(MIGRATION_MODULES={ 'migrations': 'migrations.test_fake_initial_case_insensitive.fake_initial', }): out = io.StringIO() call_command( 'migrate', 'migrations', '0001', fake_initial=True, stdout=out, verbosity=1, no_color=True, ) self.assertIn( 'migrations.0001_initial... faked', out.getvalue().lower(), ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_fake_split_initial"}) def test_migrate_fake_split_initial(self): """ Split initial migrations can be faked with --fake-initial. """ call_command("migrate", "migrations", "0002", verbosity=0) call_command("migrate", "migrations", "zero", fake=True, verbosity=0) out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: False): call_command("migrate", "migrations", "0002", fake_initial=True, stdout=out, verbosity=1) value = out.getvalue().lower() self.assertIn("migrations.0001_initial... faked", value) self.assertIn("migrations.0002_second... faked", value) # Fake an apply call_command("migrate", "migrations", fake=True, verbosity=0) # Unmigrate everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"}) def test_migrate_conflict_exit(self): """ migrate exits if it detects a conflict. """ with self.assertRaisesMessage(CommandError, "Conflicting migrations detected"): call_command("migrate", "migrations") @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_showmigrations_list(self): """ showmigrations --list displays migrations and whether or not they're applied. """ out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: True): call_command("showmigrations", format='list', stdout=out, verbosity=0, no_color=False) self.assertEqual( '\x1b[1mmigrations\n\x1b[0m' ' [ ] 0001_initial\n' ' [ ] 0002_second\n', out.getvalue().lower() ) call_command("migrate", "migrations", "0001", verbosity=0) out = io.StringIO() # Giving the explicit app_label tests for selective `show_list` in the command call_command("showmigrations", "migrations", format='list', stdout=out, verbosity=0, no_color=True) self.assertEqual( 'migrations\n' ' [x] 0001_initial\n' ' [ ] 0002_second\n', out.getvalue().lower() ) out = io.StringIO() # Applied datetimes are displayed at verbosity 2+. call_command('showmigrations', 'migrations', stdout=out, verbosity=2, no_color=True) migration1 = MigrationRecorder(connection).migration_qs.get(app='migrations', name='0001_initial') self.assertEqual( 'migrations\n' ' [x] 0001_initial (applied at %s)\n' ' [ ] 0002_second\n' % migration1.applied.strftime('%Y-%m-%d %H:%M:%S'), out.getvalue().lower() ) # Cleanup by unmigrating everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"}) def test_showmigrations_plan(self): """ Tests --plan output of showmigrations command """ out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[ ] migrations.0001_initial\n" "[ ] migrations.0003_third\n" "[ ] migrations.0002_second\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[ ] migrations.0001_initial\n" "[ ] migrations.0003_third ... (migrations.0001_initial)\n" "[ ] migrations.0002_second ... (migrations.0001_initial, migrations.0003_third)\n", out.getvalue().lower() ) call_command("migrate", "migrations", "0003", verbosity=0) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[x] migrations.0001_initial\n" "[x] migrations.0003_third\n" "[ ] migrations.0002_second\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[x] migrations.0001_initial\n" "[x] migrations.0003_third ... (migrations.0001_initial)\n" "[ ] migrations.0002_second ... (migrations.0001_initial, migrations.0003_third)\n", out.getvalue().lower() ) # Cleanup by unmigrating everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_plan'}) def test_migrate_plan(self): """Tests migrate --plan output.""" out = io.StringIO() # Show the plan up to the third migration. call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0001_initial\n' ' Create model Salamander\n' ' Raw Python operation -> Grow salamander tail.\n' 'migrations.0002_second\n' ' Create model Book\n' " Raw SQL operation -> ['SELECT * FROM migrations_book']\n" 'migrations.0003_third\n' ' Create model Author\n' " Raw SQL operation -> ['SELECT * FROM migrations_author']\n", out.getvalue() ) try: # Migrate to the third migration. call_command('migrate', 'migrations', '0003', verbosity=0) out = io.StringIO() # Show the plan for when there is nothing to apply. call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' ' No planned migration operations.\n', out.getvalue() ) out = io.StringIO() # Show the plan for reverse migration back to 0001. call_command('migrate', 'migrations', '0001', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0003_third\n' ' Undo Create model Author\n' " Raw SQL operation -> ['SELECT * FROM migrations_book']\n" 'migrations.0002_second\n' ' Undo Create model Book\n' " Raw SQL operation -> ['SELECT * FROM migrations_salamand…\n", out.getvalue() ) out = io.StringIO() # Show the migration plan to fourth, with truncated details. call_command('migrate', 'migrations', '0004', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0004_fourth\n' ' Raw SQL operation -> SELECT * FROM migrations_author WHE…\n', out.getvalue() ) # Show the plan when an operation is irreversible. # Migrate to the fourth migration. call_command('migrate', 'migrations', '0004', verbosity=0) out = io.StringIO() call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0004_fourth\n' ' Raw SQL operation -> IRREVERSIBLE\n', out.getvalue() ) out = io.StringIO() call_command('migrate', 'migrations', '0005', plan=True, stdout=out, no_color=True) # Operation is marked as irreversible only in the revert plan. self.assertEqual( 'Planned operations:\n' 'migrations.0005_fifth\n' ' Raw Python operation\n' ' Raw Python operation\n' ' Raw Python operation -> Feed salamander.\n', out.getvalue() ) call_command('migrate', 'migrations', '0005', verbosity=0) out = io.StringIO() call_command('migrate', 'migrations', '0004', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0005_fifth\n' ' Raw Python operation -> IRREVERSIBLE\n' ' Raw Python operation -> IRREVERSIBLE\n' ' Raw Python operation\n', out.getvalue() ) finally: # Cleanup by unmigrating everything: fake the irreversible, then # migrate all to zero. call_command('migrate', 'migrations', '0003', fake=True, verbosity=0) call_command('migrate', 'migrations', 'zero', verbosity=0) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_empty'}) def test_showmigrations_no_migrations(self): out = io.StringIO() call_command('showmigrations', stdout=out, no_color=True) self.assertEqual('migrations\n (no migrations)\n', out.getvalue().lower()) @override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app']) def test_showmigrations_unmigrated_app(self): out = io.StringIO() call_command('showmigrations', 'unmigrated_app', stdout=out, no_color=True) self.assertEqual('unmigrated_app\n (no migrations)\n', out.getvalue().lower()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"}) def test_showmigrations_plan_no_migrations(self): """ Tests --plan output of showmigrations command without migrations """ out = io.StringIO() call_command('showmigrations', format='plan', stdout=out, no_color=True) self.assertEqual('(no migrations)\n', out.getvalue().lower()) out = io.StringIO() call_command('showmigrations', format='plan', stdout=out, verbosity=2, no_color=True) self.assertEqual('(no migrations)\n', out.getvalue().lower()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"}) def test_showmigrations_plan_squashed(self): """ Tests --plan output of showmigrations command with squashed migrations. """ out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[ ] migrations.1_auto\n" "[ ] migrations.2_auto\n" "[ ] migrations.3_squashed_5\n" "[ ] migrations.6_auto\n" "[ ] migrations.7_auto\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[ ] migrations.1_auto\n" "[ ] migrations.2_auto ... (migrations.1_auto)\n" "[ ] migrations.3_squashed_5 ... (migrations.2_auto)\n" "[ ] migrations.6_auto ... (migrations.3_squashed_5)\n" "[ ] migrations.7_auto ... (migrations.6_auto)\n", out.getvalue().lower() ) call_command("migrate", "migrations", "3_squashed_5", verbosity=0) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[x] migrations.1_auto\n" "[x] migrations.2_auto\n" "[x] migrations.3_squashed_5\n" "[ ] migrations.6_auto\n" "[ ] migrations.7_auto\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[x] migrations.1_auto\n" "[x] migrations.2_auto ... (migrations.1_auto)\n" "[x] migrations.3_squashed_5 ... (migrations.2_auto)\n" "[ ] migrations.6_auto ... (migrations.3_squashed_5)\n" "[ ] migrations.7_auto ... (migrations.6_auto)\n", out.getvalue().lower() ) @override_settings(INSTALLED_APPS=[ 'migrations.migrations_test_apps.mutate_state_b', 'migrations.migrations_test_apps.alter_fk.author_app', 'migrations.migrations_test_apps.alter_fk.book_app', ]) def test_showmigrations_plan_single_app_label(self): """ `showmigrations --plan app_label` output with a single app_label. """ # Single app with no dependencies on other apps. out = io.StringIO() call_command('showmigrations', 'mutate_state_b', format='plan', stdout=out) self.assertEqual( '[ ] mutate_state_b.0001_initial\n' '[ ] mutate_state_b.0002_add_field\n', out.getvalue() ) # Single app with dependencies. out = io.StringIO() call_command('showmigrations', 'author_app', format='plan', stdout=out) self.assertEqual( '[ ] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n', out.getvalue() ) # Some migrations already applied. call_command('migrate', 'author_app', '0001', verbosity=0) out = io.StringIO() call_command('showmigrations', 'author_app', format='plan', stdout=out) self.assertEqual( '[X] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n', out.getvalue() ) # Cleanup by unmigrating author_app. call_command('migrate', 'author_app', 'zero', verbosity=0) @override_settings(INSTALLED_APPS=[ 'migrations.migrations_test_apps.mutate_state_b', 'migrations.migrations_test_apps.alter_fk.author_app', 'migrations.migrations_test_apps.alter_fk.book_app', ]) def test_showmigrations_plan_multiple_app_labels(self): """ `showmigrations --plan app_label` output with multiple app_labels. """ # Multiple apps: author_app depends on book_app; mutate_state_b doesn't # depend on other apps. out = io.StringIO() call_command('showmigrations', 'mutate_state_b', 'author_app', format='plan', stdout=out) self.assertEqual( '[ ] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n' '[ ] mutate_state_b.0001_initial\n' '[ ] mutate_state_b.0002_add_field\n', out.getvalue() ) # Multiple apps: args order shouldn't matter (the same result is # expected as above). out = io.StringIO() call_command('showmigrations', 'author_app', 'mutate_state_b', format='plan', stdout=out) self.assertEqual( '[ ] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n' '[ ] mutate_state_b.0001_initial\n' '[ ] mutate_state_b.0002_add_field\n', out.getvalue() ) @override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app']) def test_showmigrations_plan_app_label_no_migrations(self): out = io.StringIO() call_command('showmigrations', 'unmigrated_app', format='plan', stdout=out, no_color=True) self.assertEqual('(no migrations)\n', out.getvalue()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_sqlmigrate_forwards(self): """ sqlmigrate outputs forward looking SQL. """ out = io.StringIO() call_command("sqlmigrate", "migrations", "0001", stdout=out) output = out.getvalue().lower() index_tx_start = output.find(connection.ops.start_transaction_sql().lower()) index_op_desc_author = output.find('-- create model author') index_create_table = output.find('create table') index_op_desc_tribble = output.find('-- create model tribble') index_op_desc_unique_together = output.find('-- alter unique_together') index_tx_end = output.find(connection.ops.end_transaction_sql().lower()) if connection.features.can_rollback_ddl: self.assertGreater(index_tx_start, -1, "Transaction start not found") self.assertGreater( index_tx_end, index_op_desc_unique_together, "Transaction end not found or found before operation description (unique_together)" ) self.assertGreater( index_op_desc_author, index_tx_start, "Operation description (author) not found or found before transaction start" ) self.assertGreater( index_create_table, index_op_desc_author, "CREATE TABLE not found or found before operation description (author)" ) self.assertGreater( index_op_desc_tribble, index_create_table, "Operation description (tribble) not found or found before CREATE TABLE (author)" ) self.assertGreater( index_op_desc_unique_together, index_op_desc_tribble, "Operation description (unique_together) not found or found before operation description (tribble)" ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_sqlmigrate_backwards(self): """ sqlmigrate outputs reverse looking SQL. """ # Cannot generate the reverse SQL unless we've applied the migration. call_command("migrate", "migrations", verbosity=0) out = io.StringIO() call_command("sqlmigrate", "migrations", "0001", stdout=out, backwards=True) output = out.getvalue().lower() index_tx_start = output.find(connection.ops.start_transaction_sql().lower()) index_op_desc_unique_together = output.find('-- alter unique_together') index_op_desc_tribble = output.find('-- create model tribble') index_op_desc_author = output.find('-- create model author') index_drop_table = output.rfind('drop table') index_tx_end = output.find(connection.ops.end_transaction_sql().lower()) if connection.features.can_rollback_ddl: self.assertGreater(index_tx_start, -1, "Transaction start not found") self.assertGreater( index_tx_end, index_op_desc_unique_together, "Transaction end not found or found before DROP TABLE" ) self.assertGreater( index_op_desc_unique_together, index_tx_start, "Operation description (unique_together) not found or found before transaction start" ) self.assertGreater( index_op_desc_tribble, index_op_desc_unique_together, "Operation description (tribble) not found or found before operation description (unique_together)" ) self.assertGreater( index_op_desc_author, index_op_desc_tribble, "Operation description (author) not found or found before operation description (tribble)" ) self.assertGreater( index_drop_table, index_op_desc_author, "DROP TABLE not found or found before operation description (author)" ) # Cleanup by unmigrating everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_non_atomic"}) def test_sqlmigrate_for_non_atomic_migration(self): """ Transaction wrappers aren't shown for non-atomic migrations. """ out = io.StringIO() call_command("sqlmigrate", "migrations", "0001", stdout=out) output = out.getvalue().lower() queries = [q.strip() for q in output.splitlines()] if connection.ops.start_transaction_sql(): self.assertNotIn(connection.ops.start_transaction_sql().lower(), queries) self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_sqlmigrate_for_non_transactional_databases(self): """ Transaction wrappers aren't shown for databases that don't support transactional DDL. """ out = io.StringIO() with mock.patch.object(connection.features, 'can_rollback_ddl', False): call_command('sqlmigrate', 'migrations', '0001', stdout=out) output = out.getvalue().lower() queries = [q.strip() for q in output.splitlines()] start_transaction_sql = connection.ops.start_transaction_sql() if start_transaction_sql: self.assertNotIn(start_transaction_sql.lower(), queries) self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_no_operations'}) def test_migrations_no_operations(self): err = io.StringIO() call_command('sqlmigrate', 'migrations', '0001_initial', stderr=err) self.assertEqual(err.getvalue(), 'No operations found.\n') @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.migrated_app", "migrations.migrations_test_apps.migrated_unapplied_app", "migrations.migrations_test_apps.unmigrated_app", ], ) def test_regression_22823_unmigrated_fk_to_migrated_model(self): """ Assuming you have 3 apps, `A`, `B`, and `C`, such that: * `A` has migrations * `B` has a migration we want to apply * `C` has no migrations, but has an FK to `A` When we try to migrate "B", an exception occurs because the "B" was not included in the ProjectState that is used to detect soft-applied migrations (#22823). """ call_command("migrate", "migrated_unapplied_app", stdout=io.StringIO()) # unmigrated_app.SillyModel has a foreign key to 'migrations.Tribble', # but that model is only defined in a migration, so the global app # registry never sees it and the reference is left dangling. Remove it # to avoid problems in subsequent tests. del apps._pending_operations[('migrations', 'tribble')] @override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app_syncdb']) def test_migrate_syncdb_deferred_sql_executed_with_schemaeditor(self): """ For an app without migrations, editor.execute() is used for executing the syncdb deferred SQL. """ stdout = io.StringIO() with mock.patch.object(BaseDatabaseSchemaEditor, 'execute') as execute: call_command('migrate', run_syncdb=True, verbosity=1, stdout=stdout, no_color=True) create_table_count = len([call for call in execute.mock_calls if 'CREATE TABLE' in str(call)]) self.assertEqual(create_table_count, 2) # There's at least one deferred SQL for creating the foreign key # index. self.assertGreater(len(execute.mock_calls), 2) stdout = stdout.getvalue() self.assertIn('Synchronize unmigrated apps: unmigrated_app_syncdb', stdout) self.assertIn('Creating tables...', stdout) table_name = truncate_name('unmigrated_app_syncdb_classroom', connection.ops.max_name_length()) self.assertIn('Creating table %s' % table_name, stdout) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_migrate_syncdb_app_with_migrations(self): msg = "Can't use run_syncdb with app 'migrations' as it has migrations." with self.assertRaisesMessage(CommandError, msg): call_command('migrate', 'migrations', run_syncdb=True, verbosity=0) @override_settings(INSTALLED_APPS=[ 'migrations.migrations_test_apps.unmigrated_app_syncdb', 'migrations.migrations_test_apps.unmigrated_app_simple', ]) def test_migrate_syncdb_app_label(self): """ Running migrate --run-syncdb with an app_label only creates tables for the specified app. """ stdout = io.StringIO() with mock.patch.object(BaseDatabaseSchemaEditor, 'execute') as execute: call_command('migrate', 'unmigrated_app_syncdb', run_syncdb=True, stdout=stdout) create_table_count = len([call for call in execute.mock_calls if 'CREATE TABLE' in str(call)]) self.assertEqual(create_table_count, 2) self.assertGreater(len(execute.mock_calls), 2) self.assertIn('Synchronize unmigrated app: unmigrated_app_syncdb', stdout.getvalue()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_migrate_record_replaced(self): """ Running a single squashed migration should record all of the original replaced migrations as run. """ recorder = MigrationRecorder(connection) out = io.StringIO() call_command("migrate", "migrations", verbosity=0) call_command("showmigrations", "migrations", stdout=out, no_color=True) self.assertEqual( 'migrations\n' ' [x] 0001_squashed_0002 (2 squashed migrations)\n', out.getvalue().lower() ) applied_migrations = recorder.applied_migrations() self.assertIn(("migrations", "0001_initial"), applied_migrations) self.assertIn(("migrations", "0002_second"), applied_migrations) self.assertIn(("migrations", "0001_squashed_0002"), applied_migrations) # Rollback changes call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_migrate_record_squashed(self): """ Running migrate for a squashed migration should record as run if all of the replaced migrations have been run (#25231). """ recorder = MigrationRecorder(connection) recorder.record_applied("migrations", "0001_initial") recorder.record_applied("migrations", "0002_second") out = io.StringIO() call_command("migrate", "migrations", verbosity=0) call_command("showmigrations", "migrations", stdout=out, no_color=True) self.assertEqual( 'migrations\n' ' [x] 0001_squashed_0002 (2 squashed migrations)\n', out.getvalue().lower() ) self.assertIn( ("migrations", "0001_squashed_0002"), recorder.applied_migrations() ) # No changes were actually applied so there is nothing to rollback @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_migrate_inconsistent_history(self): """ Running migrate with some migrations applied before their dependencies should not be allowed. """ recorder = MigrationRecorder(connection) recorder.record_applied("migrations", "0002_second") msg = "Migration migrations.0002_second is applied before its dependency migrations.0001_initial" with self.assertRaisesMessage(InconsistentMigrationHistory, msg): call_command("migrate") applied_migrations = recorder.applied_migrations() self.assertNotIn(("migrations", "0001_initial"), applied_migrations) class MakeMigrationsTests(MigrationTestBase): """ Tests running the makemigrations command. """ def setUp(self): super().setUp() self._old_models = apps.app_configs['migrations'].models.copy() def tearDown(self): apps.app_configs['migrations'].models = self._old_models apps.all_models['migrations'] = self._old_models apps.clear_cache() super().tearDown() def test_files_content(self): self.assertTableNotExists("migrations_unicodemodel") apps.register_model('migrations', UnicodeModel) with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", verbosity=0) # Check for empty __init__.py file in migrations folder init_file = os.path.join(migration_dir, "__init__.py") self.assertTrue(os.path.exists(init_file)) with open(init_file) as fp: content = fp.read() self.assertEqual(content, '') # Check for existing 0001_initial.py file in migration folder initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) with open(initial_file, encoding='utf-8') as fp: content = fp.read() self.assertIn('migrations.CreateModel', content) self.assertIn('initial = True', content) self.assertIn('úñí©óðé µóðéø', content) # Meta.verbose_name self.assertIn('úñí©óðé µóðéøß', content) # Meta.verbose_name_plural self.assertIn('ÚÑÍ¢ÓÐÉ', content) # title.verbose_name self.assertIn('“Ðjáñgó”', content) # title.default def test_makemigrations_order(self): """ makemigrations should recognize number-only migrations (0001.py). """ module = 'migrations.test_migrations_order' with self.temporary_migration_module(module=module) as migration_dir: if hasattr(importlib, 'invalidate_caches'): # importlib caches os.listdir() on some platforms like macOS # (#23850). importlib.invalidate_caches() call_command('makemigrations', 'migrations', '--empty', '-n', 'a', '-v', '0') self.assertTrue(os.path.exists(os.path.join(migration_dir, '0002_a.py'))) def test_makemigrations_empty_connections(self): empty_connections = ConnectionHandler({'default': {}}) with mock.patch('django.core.management.commands.makemigrations.connections', new=empty_connections): # with no apps out = io.StringIO() call_command('makemigrations', stdout=out) self.assertIn('No changes detected', out.getvalue()) # with an app with self.temporary_migration_module() as migration_dir: call_command('makemigrations', 'migrations', verbosity=0) init_file = os.path.join(migration_dir, '__init__.py') self.assertTrue(os.path.exists(init_file)) @override_settings(INSTALLED_APPS=['migrations', 'migrations2']) def test_makemigrations_consistency_checks_respect_routers(self): """ The history consistency checks in makemigrations respect settings.DATABASE_ROUTERS. """ def patched_has_table(migration_recorder): if migration_recorder.connection is connections['other']: raise Exception('Other connection') else: return mock.DEFAULT self.assertTableNotExists('migrations_unicodemodel') apps.register_model('migrations', UnicodeModel) with mock.patch.object( MigrationRecorder, 'has_table', autospec=True, side_effect=patched_has_table) as has_table: with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", verbosity=0) initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) self.assertEqual(has_table.call_count, 1) # 'default' is checked # Router says not to migrate 'other' so consistency shouldn't # be checked. with self.settings(DATABASE_ROUTERS=['migrations.routers.TestRouter']): call_command('makemigrations', 'migrations', verbosity=0) self.assertEqual(has_table.call_count, 2) # 'default' again # With a router that doesn't prohibit migrating 'other', # consistency is checked. with self.settings(DATABASE_ROUTERS=['migrations.routers.DefaultOtherRouter']): with self.assertRaisesMessage(Exception, 'Other connection'): call_command('makemigrations', 'migrations', verbosity=0) self.assertEqual(has_table.call_count, 4) # 'default' and 'other' # With a router that doesn't allow migrating on any database, # no consistency checks are made. with self.settings(DATABASE_ROUTERS=['migrations.routers.TestRouter']): with mock.patch.object(TestRouter, 'allow_migrate', return_value=False) as allow_migrate: call_command('makemigrations', 'migrations', verbosity=0) allow_migrate.assert_any_call('other', 'migrations', model_name='UnicodeModel') # allow_migrate() is called with the correct arguments. self.assertGreater(len(allow_migrate.mock_calls), 0) called_aliases = set() for mock_call in allow_migrate.mock_calls: _, call_args, call_kwargs = mock_call connection_alias, app_name = call_args called_aliases.add(connection_alias) # Raises an error if invalid app_name/model_name occurs. apps.get_app_config(app_name).get_model(call_kwargs['model_name']) self.assertEqual(called_aliases, set(connections)) self.assertEqual(has_table.call_count, 4) def test_failing_migration(self): # If a migration fails to serialize, it shouldn't generate an empty file. #21280 apps.register_model('migrations', UnserializableModel) with self.temporary_migration_module() as migration_dir: with self.assertRaisesMessage(ValueError, 'Cannot serialize'): call_command("makemigrations", "migrations", verbosity=0) initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertFalse(os.path.exists(initial_file)) def test_makemigrations_conflict_exit(self): """ makemigrations exits if it detects a conflict. """ with self.temporary_migration_module(module="migrations.test_migrations_conflict"): with self.assertRaises(CommandError) as context: call_command("makemigrations") exception_message = str(context.exception) self.assertIn( 'Conflicting migrations detected; multiple leaf nodes ' 'in the migration graph:', exception_message ) self.assertIn('0002_second', exception_message) self.assertIn('0002_conflicting_second', exception_message) self.assertIn('in migrations', exception_message) self.assertIn("To fix them run 'python manage.py makemigrations --merge'", exception_message) def test_makemigrations_merge_no_conflict(self): """ makemigrations exits if in merge mode with no conflicts. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("makemigrations", merge=True, stdout=out) self.assertIn("No conflicts detected to merge.", out.getvalue()) def test_makemigrations_empty_no_app_specified(self): """ makemigrations exits if no app is specified with 'empty' mode. """ msg = 'You must supply at least one app label when using --empty.' with self.assertRaisesMessage(CommandError, msg): call_command("makemigrations", empty=True) def test_makemigrations_empty_migration(self): """ makemigrations properly constructs an empty migration. """ with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", empty=True, verbosity=0) # Check for existing 0001_initial.py file in migration folder initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) with open(initial_file, encoding='utf-8') as fp: content = fp.read() # Remove all whitespace to check for empty dependencies and operations content = content.replace(' ', '') self.assertIn('dependencies=[\n]', content) self.assertIn('operations=[\n]', content) @override_settings(MIGRATION_MODULES={"migrations": None}) def test_makemigrations_disabled_migrations_for_app(self): """ makemigrations raises a nice error when migrations are disabled for an app. """ msg = ( "Django can't create migrations for app 'migrations' because migrations " "have been disabled via the MIGRATION_MODULES setting." ) with self.assertRaisesMessage(ValueError, msg): call_command("makemigrations", "migrations", empty=True, verbosity=0) def test_makemigrations_no_changes_no_apps(self): """ makemigrations exits when there are no changes and no apps are specified. """ out = io.StringIO() call_command("makemigrations", stdout=out) self.assertIn("No changes detected", out.getvalue()) def test_makemigrations_no_changes(self): """ makemigrations exits when there are no changes to an app. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): call_command("makemigrations", "migrations", stdout=out) self.assertIn("No changes detected in app 'migrations'", out.getvalue()) def test_makemigrations_no_apps_initial(self): """ makemigrations should detect initial is needed on empty migration modules if no app provided. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_empty"): call_command("makemigrations", stdout=out) self.assertIn("0001_initial.py", out.getvalue()) def test_makemigrations_no_init(self): """Migration directories without an __init__.py file are allowed.""" out = io.StringIO() with self.temporary_migration_module(module='migrations.test_migrations_no_init'): call_command('makemigrations', stdout=out) self.assertIn('0001_initial.py', out.getvalue()) def test_makemigrations_migrations_announce(self): """ makemigrations announces the migration at the default verbosity level. """ out = io.StringIO() with self.temporary_migration_module(): call_command("makemigrations", "migrations", stdout=out) self.assertIn("Migrations for 'migrations'", out.getvalue()) def test_makemigrations_no_common_ancestor(self): """ makemigrations fails to merge migrations with no common ancestor. """ with self.assertRaises(ValueError) as context: with self.temporary_migration_module(module="migrations.test_migrations_no_ancestor"): call_command("makemigrations", "migrations", merge=True) exception_message = str(context.exception) self.assertIn("Could not find common ancestor of", exception_message) self.assertIn("0002_second", exception_message) self.assertIn("0002_conflicting_second", exception_message) def test_makemigrations_interactive_reject(self): """ makemigrations enters and exits interactive mode properly. """ # Monkeypatch interactive questioner to auto reject with mock.patch('builtins.input', mock.Mock(return_value='N')): with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, interactive=True, verbosity=0) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) def test_makemigrations_interactive_accept(self): """ makemigrations enters interactive mode and merges properly. """ # Monkeypatch interactive questioner to auto accept with mock.patch('builtins.input', mock.Mock(return_value='y')): out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, interactive=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertTrue(os.path.exists(merge_file)) self.assertIn("Created new merge migration", out.getvalue()) @mock.patch('django.db.migrations.utils.datetime') def test_makemigrations_default_merge_name(self, mock_datetime): mock_datetime.datetime.now.return_value = datetime.datetime(2016, 1, 2, 3, 4) with mock.patch('builtins.input', mock.Mock(return_value='y')): out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", merge=True, interactive=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge_20160102_0304.py') self.assertTrue(os.path.exists(merge_file)) self.assertIn("Created new merge migration", out.getvalue()) def test_makemigrations_non_interactive_not_null_addition(self): """ Non-interactive makemigrations fails when a default is missing on a new not-null field. """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) silly_int = models.IntegerField() class Meta: app_label = "migrations" out = io.StringIO() with self.assertRaises(SystemExit): with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", interactive=False, stdout=out) def test_makemigrations_non_interactive_not_null_alteration(self): """ Non-interactive makemigrations fails when a default is missing on a field changed to not-null. """ class Author(models.Model): name = models.CharField(max_length=255) slug = models.SlugField() age = models.IntegerField(default=0) class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("makemigrations", "migrations", interactive=False, stdout=out) self.assertIn("Alter field slug on author", out.getvalue()) def test_makemigrations_non_interactive_no_model_rename(self): """ makemigrations adds and removes a possible model rename in non-interactive mode. """ class RenamedModel(models.Model): silly_field = models.BooleanField(default=False) class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", interactive=False, stdout=out) self.assertIn("Delete model SillyModel", out.getvalue()) self.assertIn("Create model RenamedModel", out.getvalue()) def test_makemigrations_non_interactive_no_field_rename(self): """ makemigrations adds and removes a possible field rename in non-interactive mode. """ class SillyModel(models.Model): silly_rename = models.BooleanField(default=False) class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", interactive=False, stdout=out) self.assertIn("Remove field silly_field from sillymodel", out.getvalue()) self.assertIn("Add field silly_rename to sillymodel", out.getvalue()) def test_makemigrations_handle_merge(self): """ makemigrations properly merges the conflicting migrations with --noinput. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, interactive=False, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertTrue(os.path.exists(merge_file)) output = out.getvalue() self.assertIn("Merging migrations", output) self.assertIn("Branch 0002_second", output) self.assertIn("Branch 0002_conflicting_second", output) self.assertIn("Created new merge migration", output) def test_makemigration_merge_dry_run(self): """ makemigrations respects --dry-run option when fixing migration conflicts (#24427). """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command( "makemigrations", "migrations", name="merge", dry_run=True, merge=True, interactive=False, stdout=out, ) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) output = out.getvalue() self.assertIn("Merging migrations", output) self.assertIn("Branch 0002_second", output) self.assertIn("Branch 0002_conflicting_second", output) self.assertNotIn("Created new merge migration", output) def test_makemigration_merge_dry_run_verbosity_3(self): """ `makemigrations --merge --dry-run` writes the merge migration file to stdout with `verbosity == 3` (#24427). """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command( "makemigrations", "migrations", name="merge", dry_run=True, merge=True, interactive=False, stdout=out, verbosity=3, ) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) output = out.getvalue() self.assertIn("Merging migrations", output) self.assertIn("Branch 0002_second", output) self.assertIn("Branch 0002_conflicting_second", output) self.assertNotIn("Created new merge migration", output) # Additional output caused by verbosity 3 # The complete merge migration file that would be written self.assertIn("class Migration(migrations.Migration):", output) self.assertIn("dependencies = [", output) self.assertIn("('migrations', '0002_second')", output) self.assertIn("('migrations', '0002_conflicting_second')", output) self.assertIn("operations = [", output) self.assertIn("]", output) def test_makemigrations_dry_run(self): """ `makemigrations --dry-run` should not ask for defaults. """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) silly_date = models.DateField() # Added field without a default class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", dry_run=True, stdout=out) # Output the expected changes directly, without asking for defaults self.assertIn("Add field silly_date to sillymodel", out.getvalue()) def test_makemigrations_dry_run_verbosity_3(self): """ Allow `makemigrations --dry-run` to output the migrations file to stdout (with verbosity == 3). """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) silly_char = models.CharField(default="") class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", dry_run=True, stdout=out, verbosity=3) # Normal --dry-run output self.assertIn("- Add field silly_char to sillymodel", out.getvalue()) # Additional output caused by verbosity 3 # The complete migrations file that would be written self.assertIn("class Migration(migrations.Migration):", out.getvalue()) self.assertIn("dependencies = [", out.getvalue()) self.assertIn("('migrations', '0001_initial'),", out.getvalue()) self.assertIn("migrations.AddField(", out.getvalue()) self.assertIn("model_name='sillymodel',", out.getvalue()) self.assertIn("name='silly_char',", out.getvalue()) def test_makemigrations_migrations_modules_path_not_exist(self): """ makemigrations creates migrations when specifying a custom location for migration files using MIGRATION_MODULES if the custom path doesn't already exist. """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) class Meta: app_label = "migrations" out = io.StringIO() migration_module = "migrations.test_migrations_path_doesnt_exist.foo.bar" with self.temporary_migration_module(module=migration_module) as migration_dir: call_command("makemigrations", "migrations", stdout=out) # Migrations file is actually created in the expected path. initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) # Command output indicates the migration is created. self.assertIn(" - Create model SillyModel", out.getvalue()) @override_settings(MIGRATION_MODULES={'migrations': 'some.nonexistent.path'}) def test_makemigrations_migrations_modules_nonexistent_toplevel_package(self): msg = ( 'Could not locate an appropriate location to create migrations ' 'package some.nonexistent.path. Make sure the toplevel package ' 'exists and can be imported.' ) with self.assertRaisesMessage(ValueError, msg): call_command('makemigrations', 'migrations', empty=True, verbosity=0) def test_makemigrations_interactive_by_default(self): """ The user is prompted to merge by default if there are conflicts and merge is True. Answer negative to differentiate it from behavior when --noinput is specified. """ # Monkeypatch interactive questioner to auto reject out = io.StringIO() with mock.patch('builtins.input', mock.Mock(return_value='N')): with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') # This will fail if interactive is False by default self.assertFalse(os.path.exists(merge_file)) self.assertNotIn("Created new merge migration", out.getvalue()) @override_settings( INSTALLED_APPS=[ "migrations", "migrations.migrations_test_apps.unspecified_app_with_conflict"]) def test_makemigrations_unspecified_app_with_conflict_no_merge(self): """ makemigrations does not raise a CommandError when an unspecified app has conflicting migrations. """ with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): call_command("makemigrations", "migrations", merge=False, verbosity=0) @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.migrated_app", "migrations.migrations_test_apps.unspecified_app_with_conflict"]) def test_makemigrations_unspecified_app_with_conflict_merge(self): """ makemigrations does not create a merge for an unspecified app even if it has conflicting migrations. """ # Monkeypatch interactive questioner to auto accept with mock.patch('builtins.input', mock.Mock(return_value='y')): out = io.StringIO() with self.temporary_migration_module(app_label="migrated_app") as migration_dir: call_command("makemigrations", "migrated_app", name="merge", merge=True, interactive=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) self.assertIn("No conflicts detected to merge.", out.getvalue()) @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.migrated_app", "migrations.migrations_test_apps.conflicting_app_with_dependencies"]) def test_makemigrations_merge_dont_output_dependency_operations(self): """ makemigrations --merge does not output any operations from apps that don't belong to a given app. """ # Monkeypatch interactive questioner to auto accept with mock.patch('builtins.input', mock.Mock(return_value='N')): out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: False): call_command( "makemigrations", "conflicting_app_with_dependencies", merge=True, interactive=True, stdout=out ) val = out.getvalue().lower() self.assertIn('merging conflicting_app_with_dependencies\n', val) self.assertIn( ' branch 0002_conflicting_second\n' ' - create model something\n', val ) self.assertIn( ' branch 0002_second\n' ' - delete model tribble\n' ' - remove field silly_field from author\n' ' - add field rating to author\n' ' - create model book\n', val ) def test_makemigrations_with_custom_name(self): """ makemigrations --name generate a custom migration name. """ with self.temporary_migration_module() as migration_dir: def cmd(migration_count, migration_name, *args): call_command("makemigrations", "migrations", "--verbosity", "0", "--name", migration_name, *args) migration_file = os.path.join(migration_dir, "%s_%s.py" % (migration_count, migration_name)) # Check for existing migration file in migration folder self.assertTrue(os.path.exists(migration_file)) with open(migration_file, encoding='utf-8') as fp: content = fp.read() content = content.replace(" ", "") return content # generate an initial migration migration_name_0001 = "my_initial_migration" content = cmd("0001", migration_name_0001) self.assertIn("dependencies=[\n]", content) # importlib caches os.listdir() on some platforms like macOS # (#23850). if hasattr(importlib, 'invalidate_caches'): importlib.invalidate_caches() # generate an empty migration migration_name_0002 = "my_custom_migration" content = cmd("0002", migration_name_0002, "--empty") self.assertIn("dependencies=[\n('migrations','0001_%s'),\n]" % migration_name_0001, content) self.assertIn("operations=[\n]", content) def test_makemigrations_with_invalid_custom_name(self): msg = 'The migration name must be a valid Python identifier.' with self.assertRaisesMessage(CommandError, msg): call_command('makemigrations', 'migrations', '--name', 'invalid name', '--empty') def test_makemigrations_check(self): """ makemigrations --check should exit with a non-zero status when there are changes to an app requiring migrations. """ with self.temporary_migration_module(): with self.assertRaises(SystemExit): call_command("makemigrations", "--check", "migrations", verbosity=0) with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): call_command("makemigrations", "--check", "migrations", verbosity=0) def test_makemigrations_migration_path_output(self): """ makemigrations should print the relative paths to the migrations unless they are outside of the current tree, in which case the absolute path should be shown. """ out = io.StringIO() apps.register_model('migrations', UnicodeModel) with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", stdout=out) self.assertIn(os.path.join(migration_dir, '0001_initial.py'), out.getvalue()) def test_makemigrations_migration_path_output_valueerror(self): """ makemigrations prints the absolute path if os.path.relpath() raises a ValueError when it's impossible to obtain a relative path, e.g. on Windows if Django is installed on a different drive than where the migration files are created. """ out = io.StringIO() with self.temporary_migration_module() as migration_dir: with mock.patch('os.path.relpath', side_effect=ValueError): call_command('makemigrations', 'migrations', stdout=out) self.assertIn(os.path.join(migration_dir, '0001_initial.py'), out.getvalue()) def test_makemigrations_inconsistent_history(self): """ makemigrations should raise InconsistentMigrationHistory exception if there are some migrations applied before their dependencies. """ recorder = MigrationRecorder(connection) recorder.record_applied('migrations', '0002_second') msg = "Migration migrations.0002_second is applied before its dependency migrations.0001_initial" with self.temporary_migration_module(module="migrations.test_migrations"): with self.assertRaisesMessage(InconsistentMigrationHistory, msg): call_command("makemigrations") @mock.patch('builtins.input', return_value='1') @mock.patch('django.db.migrations.questioner.sys.stdin', mock.MagicMock(encoding=sys.getdefaultencoding())) def test_makemigrations_auto_now_add_interactive(self, *args): """ makemigrations prompts the user when adding auto_now_add to an existing model. """ class Entry(models.Model): title = models.CharField(max_length=255) creation_date = models.DateTimeField(auto_now_add=True) class Meta: app_label = 'migrations' # Monkeypatch interactive questioner to auto accept with mock.patch('django.db.migrations.questioner.sys.stdout', new_callable=io.StringIO) as prompt_stdout: out = io.StringIO() with self.temporary_migration_module(module='migrations.test_auto_now_add'): call_command('makemigrations', 'migrations', interactive=True, stdout=out) output = out.getvalue() prompt_output = prompt_stdout.getvalue() self.assertIn("You can accept the default 'timezone.now' by pressing 'Enter'", prompt_output) self.assertIn("Add field creation_date to entry", output) class SquashMigrationsTests(MigrationTestBase): """ Tests running the squashmigrations command. """ def test_squashmigrations_squashes(self): """ squashmigrations squashes migrations. """ with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir: call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0) squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py") self.assertTrue(os.path.exists(squashed_migration_file)) def test_squashmigrations_initial_attribute(self): with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir: call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0) squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py") with open(squashed_migration_file, encoding='utf-8') as fp: content = fp.read() self.assertIn("initial = True", content) def test_squashmigrations_optimizes(self): """ squashmigrations optimizes operations. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=1, stdout=out) self.assertIn("Optimized from 8 operations to 2 operations.", out.getvalue()) def test_ticket_23799_squashmigrations_no_optimize(self): """ squashmigrations --no-optimize doesn't optimize operations. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=1, no_optimize=True, stdout=out) self.assertIn("Skipping optimization", out.getvalue()) def test_squashmigrations_valid_start(self): """ squashmigrations accepts a starting migration. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_changes") as migration_dir: call_command("squashmigrations", "migrations", "0002", "0003", interactive=False, verbosity=1, stdout=out) squashed_migration_file = os.path.join(migration_dir, "0002_second_squashed_0003_third.py") with open(squashed_migration_file, encoding='utf-8') as fp: content = fp.read() self.assertIn(" ('migrations', '0001_initial')", content) self.assertNotIn("initial = True", content) out = out.getvalue() self.assertNotIn(" - 0001_initial", out) self.assertIn(" - 0002_second", out) self.assertIn(" - 0003_third", out) def test_squashmigrations_invalid_start(self): """ squashmigrations doesn't accept a starting migration after the ending migration. """ with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): msg = ( "The migration 'migrations.0003_third' cannot be found. Maybe " "it comes after the migration 'migrations.0002_second'" ) with self.assertRaisesMessage(CommandError, msg): call_command("squashmigrations", "migrations", "0003", "0002", interactive=False, verbosity=0) def test_squashed_name_with_start_migration_name(self): """--squashed-name specifies the new migration's name.""" squashed_name = 'squashed_name' with self.temporary_migration_module(module='migrations.test_migrations') as migration_dir: call_command( 'squashmigrations', 'migrations', '0001', '0002', squashed_name=squashed_name, interactive=False, verbosity=0, ) squashed_migration_file = os.path.join(migration_dir, '0001_%s.py' % squashed_name) self.assertTrue(os.path.exists(squashed_migration_file)) def test_squashed_name_without_start_migration_name(self): """--squashed-name also works if a start migration is omitted.""" squashed_name = 'squashed_name' with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir: call_command( 'squashmigrations', 'migrations', '0001', squashed_name=squashed_name, interactive=False, verbosity=0, ) squashed_migration_file = os.path.join(migration_dir, '0001_%s.py' % squashed_name) self.assertTrue(os.path.exists(squashed_migration_file)) class AppLabelErrorTests(TestCase): """ This class inherits TestCase because MigrationTestBase uses `available_apps = ['migrations']` which means that it's the only installed app. 'django.contrib.auth' must be in INSTALLED_APPS for some of these tests. """ nonexistent_app_error = "No installed app with label 'nonexistent_app'." did_you_mean_auth_error = ( "No installed app with label 'django.contrib.auth'. Did you mean " "'auth'?" ) def test_makemigrations_nonexistent_app_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('makemigrations', 'nonexistent_app', stderr=err) self.assertIn(self.nonexistent_app_error, err.getvalue()) def test_makemigrations_app_name_specified_as_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('makemigrations', 'django.contrib.auth', stderr=err) self.assertIn(self.did_you_mean_auth_error, err.getvalue()) def test_migrate_nonexistent_app_label(self): with self.assertRaisesMessage(CommandError, self.nonexistent_app_error): call_command('migrate', 'nonexistent_app') def test_migrate_app_name_specified_as_label(self): with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error): call_command('migrate', 'django.contrib.auth') def test_showmigrations_nonexistent_app_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('showmigrations', 'nonexistent_app', stderr=err) self.assertIn(self.nonexistent_app_error, err.getvalue()) def test_showmigrations_app_name_specified_as_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('showmigrations', 'django.contrib.auth', stderr=err) self.assertIn(self.did_you_mean_auth_error, err.getvalue()) def test_sqlmigrate_nonexistent_app_label(self): with self.assertRaisesMessage(CommandError, self.nonexistent_app_error): call_command('sqlmigrate', 'nonexistent_app', '0002') def test_sqlmigrate_app_name_specified_as_label(self): with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error): call_command('sqlmigrate', 'django.contrib.auth', '0002') def test_squashmigrations_nonexistent_app_label(self): with self.assertRaisesMessage(CommandError, self.nonexistent_app_error): call_command('squashmigrations', 'nonexistent_app', '0002') def test_squashmigrations_app_name_specified_as_label(self): with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error): call_command('squashmigrations', 'django.contrib.auth', '0002')
5fc624a135dcd64f97befd58640a19c0e8c3239f5a09a45ae8d42b654f19411c
from django.core.exceptions import FieldDoesNotExist from django.db import ( IntegrityError, connection, migrations, models, transaction, ) from django.db.migrations.migration import Migration from django.db.migrations.operations import CreateModel from django.db.migrations.operations.fields import FieldOperation from django.db.migrations.state import ModelState, ProjectState from django.db.transaction import atomic from django.test import SimpleTestCase, override_settings, skipUnlessDBFeature from .models import FoodManager, FoodQuerySet, UnicodeModel from .test_base import OperationTestBase class Mixin: pass class OperationTests(OperationTestBase): """ Tests running the operations and making sure they do what they say they do. Each test looks at their state changing, and then their database operation - both forwards and backwards. """ def test_create_model(self): """ Tests the CreateModel operation. Most other tests use this operation as part of setup, so check failures here first. """ operation = migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.IntegerField(default=1)), ], ) self.assertEqual(operation.describe(), "Create model Pony") # Test the state alteration project_state = ProjectState() new_state = project_state.clone() operation.state_forwards("test_crmo", new_state) self.assertEqual(new_state.models["test_crmo", "pony"].name, "Pony") self.assertEqual(len(new_state.models["test_crmo", "pony"].fields), 2) # Test the database alteration self.assertTableNotExists("test_crmo_pony") with connection.schema_editor() as editor: operation.database_forwards("test_crmo", editor, project_state, new_state) self.assertTableExists("test_crmo_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crmo", editor, new_state, project_state) self.assertTableNotExists("test_crmo_pony") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "CreateModel") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["fields", "name"]) # And default manager not in set operation = migrations.CreateModel("Foo", fields=[], managers=[("objects", models.Manager())]) definition = operation.deconstruct() self.assertNotIn('managers', definition[2]) def test_create_model_with_duplicate_field_name(self): with self.assertRaisesMessage(ValueError, 'Found duplicate value pink in CreateModel fields argument.'): migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.TextField()), ("pink", models.IntegerField(default=1)), ], ) def test_create_model_with_duplicate_base(self): message = 'Found duplicate value test_crmo.pony in CreateModel bases argument.' with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=("test_crmo.Pony", "test_crmo.Pony",), ) with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=("test_crmo.Pony", "test_crmo.pony",), ) message = 'Found duplicate value migrations.unicodemodel in CreateModel bases argument.' with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=(UnicodeModel, UnicodeModel,), ) with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=(UnicodeModel, 'migrations.unicodemodel',), ) with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=(UnicodeModel, 'migrations.UnicodeModel',), ) message = "Found duplicate value <class 'django.db.models.base.Model'> in CreateModel bases argument." with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=(models.Model, models.Model,), ) message = "Found duplicate value <class 'migrations.test_operations.Mixin'> in CreateModel bases argument." with self.assertRaisesMessage(ValueError, message): migrations.CreateModel( "Pony", fields=[], bases=(Mixin, Mixin,), ) def test_create_model_with_duplicate_manager_name(self): with self.assertRaisesMessage(ValueError, 'Found duplicate value objects in CreateModel managers argument.'): migrations.CreateModel( "Pony", fields=[], managers=[ ("objects", models.Manager()), ("objects", models.Manager()), ], ) def test_create_model_with_unique_after(self): """ Tests the CreateModel operation directly followed by an AlterUniqueTogether (bug #22844 - sqlite remake issues) """ operation1 = migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.IntegerField(default=1)), ], ) operation2 = migrations.CreateModel( "Rider", [ ("id", models.AutoField(primary_key=True)), ("number", models.IntegerField(default=1)), ("pony", models.ForeignKey("test_crmoua.Pony", models.CASCADE)), ], ) operation3 = migrations.AlterUniqueTogether( "Rider", [ ("number", "pony"), ], ) # Test the database alteration project_state = ProjectState() self.assertTableNotExists("test_crmoua_pony") self.assertTableNotExists("test_crmoua_rider") with connection.schema_editor() as editor: new_state = project_state.clone() operation1.state_forwards("test_crmoua", new_state) operation1.database_forwards("test_crmoua", editor, project_state, new_state) project_state, new_state = new_state, new_state.clone() operation2.state_forwards("test_crmoua", new_state) operation2.database_forwards("test_crmoua", editor, project_state, new_state) project_state, new_state = new_state, new_state.clone() operation3.state_forwards("test_crmoua", new_state) operation3.database_forwards("test_crmoua", editor, project_state, new_state) self.assertTableExists("test_crmoua_pony") self.assertTableExists("test_crmoua_rider") def test_create_model_m2m(self): """ Test the creation of a model with a ManyToMany field and the auto-created "through" model. """ project_state = self.set_up_test_model("test_crmomm") operation = migrations.CreateModel( "Stable", [ ("id", models.AutoField(primary_key=True)), ("ponies", models.ManyToManyField("Pony", related_name="stables")) ] ) # Test the state alteration new_state = project_state.clone() operation.state_forwards("test_crmomm", new_state) # Test the database alteration self.assertTableNotExists("test_crmomm_stable_ponies") with connection.schema_editor() as editor: operation.database_forwards("test_crmomm", editor, project_state, new_state) self.assertTableExists("test_crmomm_stable") self.assertTableExists("test_crmomm_stable_ponies") self.assertColumnNotExists("test_crmomm_stable", "ponies") # Make sure the M2M field actually works with atomic(): Pony = new_state.apps.get_model("test_crmomm", "Pony") Stable = new_state.apps.get_model("test_crmomm", "Stable") stable = Stable.objects.create() p1 = Pony.objects.create(pink=False, weight=4.55) p2 = Pony.objects.create(pink=True, weight=5.43) stable.ponies.add(p1, p2) self.assertEqual(stable.ponies.count(), 2) stable.ponies.all().delete() # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crmomm", editor, new_state, project_state) self.assertTableNotExists("test_crmomm_stable") self.assertTableNotExists("test_crmomm_stable_ponies") def test_create_model_inheritance(self): """ Tests the CreateModel operation on a multi-table inheritance setup. """ project_state = self.set_up_test_model("test_crmoih") # Test the state alteration operation = migrations.CreateModel( "ShetlandPony", [ ('pony_ptr', models.OneToOneField( 'test_crmoih.Pony', models.CASCADE, auto_created=True, primary_key=True, to_field='id', serialize=False, )), ("cuteness", models.IntegerField(default=1)), ], ) new_state = project_state.clone() operation.state_forwards("test_crmoih", new_state) self.assertIn(("test_crmoih", "shetlandpony"), new_state.models) # Test the database alteration self.assertTableNotExists("test_crmoih_shetlandpony") with connection.schema_editor() as editor: operation.database_forwards("test_crmoih", editor, project_state, new_state) self.assertTableExists("test_crmoih_shetlandpony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crmoih", editor, new_state, project_state) self.assertTableNotExists("test_crmoih_shetlandpony") def test_create_proxy_model(self): """ CreateModel ignores proxy models. """ project_state = self.set_up_test_model("test_crprmo") # Test the state alteration operation = migrations.CreateModel( "ProxyPony", [], options={"proxy": True}, bases=("test_crprmo.Pony",), ) self.assertEqual(operation.describe(), "Create proxy model ProxyPony") new_state = project_state.clone() operation.state_forwards("test_crprmo", new_state) self.assertIn(("test_crprmo", "proxypony"), new_state.models) # Test the database alteration self.assertTableNotExists("test_crprmo_proxypony") self.assertTableExists("test_crprmo_pony") with connection.schema_editor() as editor: operation.database_forwards("test_crprmo", editor, project_state, new_state) self.assertTableNotExists("test_crprmo_proxypony") self.assertTableExists("test_crprmo_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crprmo", editor, new_state, project_state) self.assertTableNotExists("test_crprmo_proxypony") self.assertTableExists("test_crprmo_pony") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "CreateModel") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["bases", "fields", "name", "options"]) def test_create_unmanaged_model(self): """ CreateModel ignores unmanaged models. """ project_state = self.set_up_test_model("test_crummo") # Test the state alteration operation = migrations.CreateModel( "UnmanagedPony", [], options={"proxy": True}, bases=("test_crummo.Pony",), ) self.assertEqual(operation.describe(), "Create proxy model UnmanagedPony") new_state = project_state.clone() operation.state_forwards("test_crummo", new_state) self.assertIn(("test_crummo", "unmanagedpony"), new_state.models) # Test the database alteration self.assertTableNotExists("test_crummo_unmanagedpony") self.assertTableExists("test_crummo_pony") with connection.schema_editor() as editor: operation.database_forwards("test_crummo", editor, project_state, new_state) self.assertTableNotExists("test_crummo_unmanagedpony") self.assertTableExists("test_crummo_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crummo", editor, new_state, project_state) self.assertTableNotExists("test_crummo_unmanagedpony") self.assertTableExists("test_crummo_pony") @skipUnlessDBFeature('supports_table_check_constraints') def test_create_model_with_constraint(self): where = models.Q(pink__gt=2) check_constraint = models.CheckConstraint(check=where, name='test_constraint_pony_pink_gt_2') operation = migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.IntegerField(default=3)), ], options={'constraints': [check_constraint]}, ) # Test the state alteration project_state = ProjectState() new_state = project_state.clone() operation.state_forwards("test_crmo", new_state) self.assertEqual(len(new_state.models['test_crmo', 'pony'].options['constraints']), 1) # Test database alteration self.assertTableNotExists("test_crmo_pony") with connection.schema_editor() as editor: operation.database_forwards("test_crmo", editor, project_state, new_state) self.assertTableExists("test_crmo_pony") with connection.cursor() as cursor: with self.assertRaises(IntegrityError): cursor.execute("INSERT INTO test_crmo_pony (id, pink) VALUES (1, 1)") # Test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crmo", editor, new_state, project_state) self.assertTableNotExists("test_crmo_pony") # Test deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "CreateModel") self.assertEqual(definition[1], []) self.assertEqual(definition[2]['options']['constraints'], [check_constraint]) def test_create_model_with_partial_unique_constraint(self): partial_unique_constraint = models.UniqueConstraint( fields=['pink'], condition=models.Q(weight__gt=5), name='test_constraint_pony_pink_for_weight_gt_5_uniq', ) operation = migrations.CreateModel( 'Pony', [ ('id', models.AutoField(primary_key=True)), ('pink', models.IntegerField(default=3)), ('weight', models.FloatField()), ], options={'constraints': [partial_unique_constraint]}, ) # Test the state alteration project_state = ProjectState() new_state = project_state.clone() operation.state_forwards('test_crmo', new_state) self.assertEqual(len(new_state.models['test_crmo', 'pony'].options['constraints']), 1) # Test database alteration self.assertTableNotExists('test_crmo_pony') with connection.schema_editor() as editor: operation.database_forwards('test_crmo', editor, project_state, new_state) self.assertTableExists('test_crmo_pony') # Test constraint works Pony = new_state.apps.get_model('test_crmo', 'Pony') Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=6.0) if connection.features.supports_partial_indexes: with self.assertRaises(IntegrityError): Pony.objects.create(pink=1, weight=7.0) else: Pony.objects.create(pink=1, weight=7.0) # Test reversal with connection.schema_editor() as editor: operation.database_backwards('test_crmo', editor, new_state, project_state) self.assertTableNotExists('test_crmo_pony') # Test deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], 'CreateModel') self.assertEqual(definition[1], []) self.assertEqual(definition[2]['options']['constraints'], [partial_unique_constraint]) def test_create_model_managers(self): """ The managers on a model are set. """ project_state = self.set_up_test_model("test_cmoma") # Test the state alteration operation = migrations.CreateModel( "Food", fields=[ ("id", models.AutoField(primary_key=True)), ], managers=[ ("food_qs", FoodQuerySet.as_manager()), ("food_mgr", FoodManager("a", "b")), ("food_mgr_kwargs", FoodManager("x", "y", 3, 4)), ] ) self.assertEqual(operation.describe(), "Create model Food") new_state = project_state.clone() operation.state_forwards("test_cmoma", new_state) self.assertIn(("test_cmoma", "food"), new_state.models) managers = new_state.models["test_cmoma", "food"].managers self.assertEqual(managers[0][0], "food_qs") self.assertIsInstance(managers[0][1], models.Manager) self.assertEqual(managers[1][0], "food_mgr") self.assertIsInstance(managers[1][1], FoodManager) self.assertEqual(managers[1][1].args, ("a", "b", 1, 2)) self.assertEqual(managers[2][0], "food_mgr_kwargs") self.assertIsInstance(managers[2][1], FoodManager) self.assertEqual(managers[2][1].args, ("x", "y", 3, 4)) def test_delete_model(self): """ Tests the DeleteModel operation. """ project_state = self.set_up_test_model("test_dlmo") # Test the state alteration operation = migrations.DeleteModel("Pony") self.assertEqual(operation.describe(), "Delete model Pony") new_state = project_state.clone() operation.state_forwards("test_dlmo", new_state) self.assertNotIn(("test_dlmo", "pony"), new_state.models) # Test the database alteration self.assertTableExists("test_dlmo_pony") with connection.schema_editor() as editor: operation.database_forwards("test_dlmo", editor, project_state, new_state) self.assertTableNotExists("test_dlmo_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_dlmo", editor, new_state, project_state) self.assertTableExists("test_dlmo_pony") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "DeleteModel") self.assertEqual(definition[1], []) self.assertEqual(list(definition[2]), ["name"]) def test_delete_proxy_model(self): """ Tests the DeleteModel operation ignores proxy models. """ project_state = self.set_up_test_model("test_dlprmo", proxy_model=True) # Test the state alteration operation = migrations.DeleteModel("ProxyPony") new_state = project_state.clone() operation.state_forwards("test_dlprmo", new_state) self.assertIn(("test_dlprmo", "proxypony"), project_state.models) self.assertNotIn(("test_dlprmo", "proxypony"), new_state.models) # Test the database alteration self.assertTableExists("test_dlprmo_pony") self.assertTableNotExists("test_dlprmo_proxypony") with connection.schema_editor() as editor: operation.database_forwards("test_dlprmo", editor, project_state, new_state) self.assertTableExists("test_dlprmo_pony") self.assertTableNotExists("test_dlprmo_proxypony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_dlprmo", editor, new_state, project_state) self.assertTableExists("test_dlprmo_pony") self.assertTableNotExists("test_dlprmo_proxypony") def test_delete_mti_model(self): project_state = self.set_up_test_model('test_dlmtimo', mti_model=True) # Test the state alteration operation = migrations.DeleteModel('ShetlandPony') new_state = project_state.clone() operation.state_forwards('test_dlmtimo', new_state) self.assertIn(('test_dlmtimo', 'shetlandpony'), project_state.models) self.assertNotIn(('test_dlmtimo', 'shetlandpony'), new_state.models) # Test the database alteration self.assertTableExists('test_dlmtimo_pony') self.assertTableExists('test_dlmtimo_shetlandpony') self.assertColumnExists('test_dlmtimo_shetlandpony', 'pony_ptr_id') with connection.schema_editor() as editor: operation.database_forwards('test_dlmtimo', editor, project_state, new_state) self.assertTableExists('test_dlmtimo_pony') self.assertTableNotExists('test_dlmtimo_shetlandpony') # And test reversal with connection.schema_editor() as editor: operation.database_backwards('test_dlmtimo', editor, new_state, project_state) self.assertTableExists('test_dlmtimo_pony') self.assertTableExists('test_dlmtimo_shetlandpony') self.assertColumnExists('test_dlmtimo_shetlandpony', 'pony_ptr_id') def test_rename_model(self): """ Tests the RenameModel operation. """ project_state = self.set_up_test_model("test_rnmo", related_model=True) # Test the state alteration operation = migrations.RenameModel("Pony", "Horse") self.assertEqual(operation.describe(), "Rename model Pony to Horse") # Test initial state and database self.assertIn(("test_rnmo", "pony"), project_state.models) self.assertNotIn(("test_rnmo", "horse"), project_state.models) self.assertTableExists("test_rnmo_pony") self.assertTableNotExists("test_rnmo_horse") if connection.features.supports_foreign_keys: self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")) self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")) # Migrate forwards new_state = project_state.clone() atomic_rename = connection.features.supports_atomic_references_rename new_state = self.apply_operations("test_rnmo", new_state, [operation], atomic=atomic_rename) # Test new state and database self.assertNotIn(("test_rnmo", "pony"), new_state.models) self.assertIn(("test_rnmo", "horse"), new_state.models) # RenameModel also repoints all incoming FKs and M2Ms self.assertEqual("test_rnmo.Horse", new_state.models["test_rnmo", "rider"].fields[1][1].remote_field.model) self.assertTableNotExists("test_rnmo_pony") self.assertTableExists("test_rnmo_horse") if connection.features.supports_foreign_keys: self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")) self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")) # Migrate backwards original_state = self.unapply_operations("test_rnmo", project_state, [operation], atomic=atomic_rename) # Test original state and database self.assertIn(("test_rnmo", "pony"), original_state.models) self.assertNotIn(("test_rnmo", "horse"), original_state.models) self.assertEqual("Pony", original_state.models["test_rnmo", "rider"].fields[1][1].remote_field.model) self.assertTableExists("test_rnmo_pony") self.assertTableNotExists("test_rnmo_horse") if connection.features.supports_foreign_keys: self.assertFKExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")) self.assertFKNotExists("test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RenameModel") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'old_name': "Pony", 'new_name': "Horse"}) def test_rename_model_state_forwards(self): """ RenameModel operations shouldn't trigger the caching of rendered apps on state without prior apps. """ state = ProjectState() state.add_model(ModelState('migrations', 'Foo', [])) operation = migrations.RenameModel('Foo', 'Bar') operation.state_forwards('migrations', state) self.assertNotIn('apps', state.__dict__) self.assertNotIn(('migrations', 'foo'), state.models) self.assertIn(('migrations', 'bar'), state.models) # Now with apps cached. apps = state.apps operation = migrations.RenameModel('Bar', 'Foo') operation.state_forwards('migrations', state) self.assertIs(state.apps, apps) self.assertNotIn(('migrations', 'bar'), state.models) self.assertIn(('migrations', 'foo'), state.models) def test_rename_model_with_self_referential_fk(self): """ Tests the RenameModel operation on model with self referential FK. """ project_state = self.set_up_test_model("test_rmwsrf", related_model=True) # Test the state alteration operation = migrations.RenameModel("Rider", "HorseRider") self.assertEqual(operation.describe(), "Rename model Rider to HorseRider") new_state = project_state.clone() operation.state_forwards("test_rmwsrf", new_state) self.assertNotIn(("test_rmwsrf", "rider"), new_state.models) self.assertIn(("test_rmwsrf", "horserider"), new_state.models) # Remember, RenameModel also repoints all incoming FKs and M2Ms self.assertEqual( 'self', new_state.models["test_rmwsrf", "horserider"].fields[2][1].remote_field.model ) HorseRider = new_state.apps.get_model('test_rmwsrf', 'horserider') self.assertIs(HorseRider._meta.get_field('horserider').remote_field.model, HorseRider) # Test the database alteration self.assertTableExists("test_rmwsrf_rider") self.assertTableNotExists("test_rmwsrf_horserider") if connection.features.supports_foreign_keys: self.assertFKExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")) self.assertFKNotExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")) atomic_rename = connection.features.supports_atomic_references_rename with connection.schema_editor(atomic=atomic_rename) as editor: operation.database_forwards("test_rmwsrf", editor, project_state, new_state) self.assertTableNotExists("test_rmwsrf_rider") self.assertTableExists("test_rmwsrf_horserider") if connection.features.supports_foreign_keys: self.assertFKNotExists("test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_rider", "id")) self.assertFKExists("test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_horserider", "id")) # And test reversal with connection.schema_editor(atomic=atomic_rename) as editor: operation.database_backwards("test_rmwsrf", editor, new_state, project_state) self.assertTableExists("test_rmwsrf_rider") self.assertTableNotExists("test_rmwsrf_horserider") if connection.features.supports_foreign_keys: self.assertFKExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")) self.assertFKNotExists("test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")) def test_rename_model_with_superclass_fk(self): """ Tests the RenameModel operation on a model which has a superclass that has a foreign key. """ project_state = self.set_up_test_model("test_rmwsc", related_model=True, mti_model=True) # Test the state alteration operation = migrations.RenameModel("ShetlandPony", "LittleHorse") self.assertEqual(operation.describe(), "Rename model ShetlandPony to LittleHorse") new_state = project_state.clone() operation.state_forwards("test_rmwsc", new_state) self.assertNotIn(("test_rmwsc", "shetlandpony"), new_state.models) self.assertIn(("test_rmwsc", "littlehorse"), new_state.models) # RenameModel shouldn't repoint the superclass's relations, only local ones self.assertEqual( project_state.models["test_rmwsc", "rider"].fields[1][1].remote_field.model, new_state.models["test_rmwsc", "rider"].fields[1][1].remote_field.model ) # Before running the migration we have a table for Shetland Pony, not Little Horse self.assertTableExists("test_rmwsc_shetlandpony") self.assertTableNotExists("test_rmwsc_littlehorse") if connection.features.supports_foreign_keys: # and the foreign key on rider points to pony, not shetland pony self.assertFKExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")) self.assertFKNotExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_shetlandpony", "id")) with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: operation.database_forwards("test_rmwsc", editor, project_state, new_state) # Now we have a little horse table, not shetland pony self.assertTableNotExists("test_rmwsc_shetlandpony") self.assertTableExists("test_rmwsc_littlehorse") if connection.features.supports_foreign_keys: # but the Foreign keys still point at pony, not little horse self.assertFKExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")) self.assertFKNotExists("test_rmwsc_rider", ["pony_id"], ("test_rmwsc_littlehorse", "id")) def test_rename_model_with_self_referential_m2m(self): app_label = "test_rename_model_with_self_referential_m2m" project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel("ReflexivePony", fields=[ ("id", models.AutoField(primary_key=True)), ("ponies", models.ManyToManyField("self")), ]), ]) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.RenameModel("ReflexivePony", "ReflexivePony2"), ], atomic=connection.features.supports_atomic_references_rename) Pony = project_state.apps.get_model(app_label, "ReflexivePony2") pony = Pony.objects.create() pony.ponies.add(pony) def test_rename_model_with_m2m(self): app_label = "test_rename_model_with_m2m" project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel("Rider", fields=[ ("id", models.AutoField(primary_key=True)), ]), migrations.CreateModel("Pony", fields=[ ("id", models.AutoField(primary_key=True)), ("riders", models.ManyToManyField("Rider")), ]), ]) Pony = project_state.apps.get_model(app_label, "Pony") Rider = project_state.apps.get_model(app_label, "Rider") pony = Pony.objects.create() rider = Rider.objects.create() pony.riders.add(rider) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.RenameModel("Pony", "Pony2"), ], atomic=connection.features.supports_atomic_references_rename) Pony = project_state.apps.get_model(app_label, "Pony2") Rider = project_state.apps.get_model(app_label, "Rider") pony = Pony.objects.create() rider = Rider.objects.create() pony.riders.add(rider) self.assertEqual(Pony.objects.count(), 2) self.assertEqual(Rider.objects.count(), 2) self.assertEqual(Pony._meta.get_field('riders').remote_field.through.objects.count(), 2) def test_rename_m2m_target_model(self): app_label = "test_rename_m2m_target_model" project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel("Rider", fields=[ ("id", models.AutoField(primary_key=True)), ]), migrations.CreateModel("Pony", fields=[ ("id", models.AutoField(primary_key=True)), ("riders", models.ManyToManyField("Rider")), ]), ]) Pony = project_state.apps.get_model(app_label, "Pony") Rider = project_state.apps.get_model(app_label, "Rider") pony = Pony.objects.create() rider = Rider.objects.create() pony.riders.add(rider) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.RenameModel("Rider", "Rider2"), ], atomic=connection.features.supports_atomic_references_rename) Pony = project_state.apps.get_model(app_label, "Pony") Rider = project_state.apps.get_model(app_label, "Rider2") pony = Pony.objects.create() rider = Rider.objects.create() pony.riders.add(rider) self.assertEqual(Pony.objects.count(), 2) self.assertEqual(Rider.objects.count(), 2) self.assertEqual(Pony._meta.get_field('riders').remote_field.through.objects.count(), 2) def test_rename_m2m_through_model(self): app_label = "test_rename_through" project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel("Rider", fields=[ ("id", models.AutoField(primary_key=True)), ]), migrations.CreateModel("Pony", fields=[ ("id", models.AutoField(primary_key=True)), ]), migrations.CreateModel("PonyRider", fields=[ ("id", models.AutoField(primary_key=True)), ("rider", models.ForeignKey("test_rename_through.Rider", models.CASCADE)), ("pony", models.ForeignKey("test_rename_through.Pony", models.CASCADE)), ]), migrations.AddField( "Pony", "riders", models.ManyToManyField("test_rename_through.Rider", through="test_rename_through.PonyRider"), ), ]) Pony = project_state.apps.get_model(app_label, "Pony") Rider = project_state.apps.get_model(app_label, "Rider") PonyRider = project_state.apps.get_model(app_label, "PonyRider") pony = Pony.objects.create() rider = Rider.objects.create() PonyRider.objects.create(pony=pony, rider=rider) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.RenameModel("PonyRider", "PonyRider2"), ]) Pony = project_state.apps.get_model(app_label, "Pony") Rider = project_state.apps.get_model(app_label, "Rider") PonyRider = project_state.apps.get_model(app_label, "PonyRider2") pony = Pony.objects.first() rider = Rider.objects.create() PonyRider.objects.create(pony=pony, rider=rider) self.assertEqual(Pony.objects.count(), 1) self.assertEqual(Rider.objects.count(), 2) self.assertEqual(PonyRider.objects.count(), 2) self.assertEqual(pony.riders.count(), 2) def test_rename_m2m_model_after_rename_field(self): """RenameModel renames a many-to-many column after a RenameField.""" app_label = 'test_rename_multiple' project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel('Pony', fields=[ ('id', models.AutoField(primary_key=True)), ('name', models.CharField(max_length=20)), ]), migrations.CreateModel('Rider', fields=[ ('id', models.AutoField(primary_key=True)), ('pony', models.ForeignKey('test_rename_multiple.Pony', models.CASCADE)), ]), migrations.CreateModel('PonyRider', fields=[ ('id', models.AutoField(primary_key=True)), ('riders', models.ManyToManyField('Rider')), ]), migrations.RenameField(model_name='pony', old_name='name', new_name='fancy_name'), migrations.RenameModel(old_name='Rider', new_name='Jockey'), ], atomic=connection.features.supports_atomic_references_rename) Pony = project_state.apps.get_model(app_label, 'Pony') Jockey = project_state.apps.get_model(app_label, 'Jockey') PonyRider = project_state.apps.get_model(app_label, 'PonyRider') # No "no such column" error means the column was renamed correctly. pony = Pony.objects.create(fancy_name='a good name') jockey = Jockey.objects.create(pony=pony) ponyrider = PonyRider.objects.create() ponyrider.riders.add(jockey) def test_add_field(self): """ Tests the AddField operation. """ # Test the state alteration operation = migrations.AddField( "Pony", "height", models.FloatField(null=True, default=5), ) self.assertEqual(operation.describe(), "Add field height to Pony") project_state, new_state = self.make_test_state("test_adfl", operation) self.assertEqual(len(new_state.models["test_adfl", "pony"].fields), 4) field = [ f for n, f in new_state.models["test_adfl", "pony"].fields if n == "height" ][0] self.assertEqual(field.default, 5) # Test the database alteration self.assertColumnNotExists("test_adfl_pony", "height") with connection.schema_editor() as editor: operation.database_forwards("test_adfl", editor, project_state, new_state) self.assertColumnExists("test_adfl_pony", "height") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_adfl", editor, new_state, project_state) self.assertColumnNotExists("test_adfl_pony", "height") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AddField") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"]) def test_add_charfield(self): """ Tests the AddField operation on TextField. """ project_state = self.set_up_test_model("test_adchfl") Pony = project_state.apps.get_model("test_adchfl", "Pony") pony = Pony.objects.create(weight=42) new_state = self.apply_operations("test_adchfl", project_state, [ migrations.AddField( "Pony", "text", models.CharField(max_length=10, default="some text"), ), migrations.AddField( "Pony", "empty", models.CharField(max_length=10, default=""), ), # If not properly quoted digits would be interpreted as an int. migrations.AddField( "Pony", "digits", models.CharField(max_length=10, default="42"), ), # Manual quoting is fragile and could trip on quotes. Refs #xyz. migrations.AddField( "Pony", "quotes", models.CharField(max_length=10, default='"\'"'), ), ]) Pony = new_state.apps.get_model("test_adchfl", "Pony") pony = Pony.objects.get(pk=pony.pk) self.assertEqual(pony.text, "some text") self.assertEqual(pony.empty, "") self.assertEqual(pony.digits, "42") self.assertEqual(pony.quotes, '"\'"') def test_add_textfield(self): """ Tests the AddField operation on TextField. """ project_state = self.set_up_test_model("test_adtxtfl") Pony = project_state.apps.get_model("test_adtxtfl", "Pony") pony = Pony.objects.create(weight=42) new_state = self.apply_operations("test_adtxtfl", project_state, [ migrations.AddField( "Pony", "text", models.TextField(default="some text"), ), migrations.AddField( "Pony", "empty", models.TextField(default=""), ), # If not properly quoted digits would be interpreted as an int. migrations.AddField( "Pony", "digits", models.TextField(default="42"), ), # Manual quoting is fragile and could trip on quotes. Refs #xyz. migrations.AddField( "Pony", "quotes", models.TextField(default='"\'"'), ), ]) Pony = new_state.apps.get_model("test_adtxtfl", "Pony") pony = Pony.objects.get(pk=pony.pk) self.assertEqual(pony.text, "some text") self.assertEqual(pony.empty, "") self.assertEqual(pony.digits, "42") self.assertEqual(pony.quotes, '"\'"') def test_add_binaryfield(self): """ Tests the AddField operation on TextField/BinaryField. """ project_state = self.set_up_test_model("test_adbinfl") Pony = project_state.apps.get_model("test_adbinfl", "Pony") pony = Pony.objects.create(weight=42) new_state = self.apply_operations("test_adbinfl", project_state, [ migrations.AddField( "Pony", "blob", models.BinaryField(default=b"some text"), ), migrations.AddField( "Pony", "empty", models.BinaryField(default=b""), ), # If not properly quoted digits would be interpreted as an int. migrations.AddField( "Pony", "digits", models.BinaryField(default=b"42"), ), # Manual quoting is fragile and could trip on quotes. Refs #xyz. migrations.AddField( "Pony", "quotes", models.BinaryField(default=b'"\'"'), ), ]) Pony = new_state.apps.get_model("test_adbinfl", "Pony") pony = Pony.objects.get(pk=pony.pk) # SQLite returns buffer/memoryview, cast to bytes for checking. self.assertEqual(bytes(pony.blob), b"some text") self.assertEqual(bytes(pony.empty), b"") self.assertEqual(bytes(pony.digits), b"42") self.assertEqual(bytes(pony.quotes), b'"\'"') def test_column_name_quoting(self): """ Column names that are SQL keywords shouldn't cause problems when used in migrations (#22168). """ project_state = self.set_up_test_model("test_regr22168") operation = migrations.AddField( "Pony", "order", models.IntegerField(default=0), ) new_state = project_state.clone() operation.state_forwards("test_regr22168", new_state) with connection.schema_editor() as editor: operation.database_forwards("test_regr22168", editor, project_state, new_state) self.assertColumnExists("test_regr22168_pony", "order") def test_add_field_preserve_default(self): """ Tests the AddField operation's state alteration when preserve_default = False. """ project_state = self.set_up_test_model("test_adflpd") # Test the state alteration operation = migrations.AddField( "Pony", "height", models.FloatField(null=True, default=4), preserve_default=False, ) new_state = project_state.clone() operation.state_forwards("test_adflpd", new_state) self.assertEqual(len(new_state.models["test_adflpd", "pony"].fields), 4) field = [ f for n, f in new_state.models["test_adflpd", "pony"].fields if n == "height" ][0] self.assertEqual(field.default, models.NOT_PROVIDED) # Test the database alteration project_state.apps.get_model("test_adflpd", "pony").objects.create( weight=4, ) self.assertColumnNotExists("test_adflpd_pony", "height") with connection.schema_editor() as editor: operation.database_forwards("test_adflpd", editor, project_state, new_state) self.assertColumnExists("test_adflpd_pony", "height") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AddField") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["field", "model_name", "name", "preserve_default"]) def test_add_field_m2m(self): """ Tests the AddField operation with a ManyToManyField. """ project_state = self.set_up_test_model("test_adflmm", second_model=True) # Test the state alteration operation = migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies")) new_state = project_state.clone() operation.state_forwards("test_adflmm", new_state) self.assertEqual(len(new_state.models["test_adflmm", "pony"].fields), 4) # Test the database alteration self.assertTableNotExists("test_adflmm_pony_stables") with connection.schema_editor() as editor: operation.database_forwards("test_adflmm", editor, project_state, new_state) self.assertTableExists("test_adflmm_pony_stables") self.assertColumnNotExists("test_adflmm_pony", "stables") # Make sure the M2M field actually works with atomic(): Pony = new_state.apps.get_model("test_adflmm", "Pony") p = Pony.objects.create(pink=False, weight=4.55) p.stables.create() self.assertEqual(p.stables.count(), 1) p.stables.all().delete() # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_adflmm", editor, new_state, project_state) self.assertTableNotExists("test_adflmm_pony_stables") def test_alter_field_m2m(self): project_state = self.set_up_test_model("test_alflmm", second_model=True) project_state = self.apply_operations("test_alflmm", project_state, operations=[ migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies")) ]) Pony = project_state.apps.get_model("test_alflmm", "Pony") self.assertFalse(Pony._meta.get_field('stables').blank) project_state = self.apply_operations("test_alflmm", project_state, operations=[ migrations.AlterField( "Pony", "stables", models.ManyToManyField(to="Stable", related_name="ponies", blank=True) ) ]) Pony = project_state.apps.get_model("test_alflmm", "Pony") self.assertTrue(Pony._meta.get_field('stables').blank) def test_repoint_field_m2m(self): project_state = self.set_up_test_model("test_alflmm", second_model=True, third_model=True) project_state = self.apply_operations("test_alflmm", project_state, operations=[ migrations.AddField("Pony", "places", models.ManyToManyField("Stable", related_name="ponies")) ]) Pony = project_state.apps.get_model("test_alflmm", "Pony") project_state = self.apply_operations("test_alflmm", project_state, operations=[ migrations.AlterField("Pony", "places", models.ManyToManyField(to="Van", related_name="ponies")) ]) # Ensure the new field actually works Pony = project_state.apps.get_model("test_alflmm", "Pony") p = Pony.objects.create(pink=False, weight=4.55) p.places.create() self.assertEqual(p.places.count(), 1) p.places.all().delete() def test_remove_field_m2m(self): project_state = self.set_up_test_model("test_rmflmm", second_model=True) project_state = self.apply_operations("test_rmflmm", project_state, operations=[ migrations.AddField("Pony", "stables", models.ManyToManyField("Stable", related_name="ponies")) ]) self.assertTableExists("test_rmflmm_pony_stables") with_field_state = project_state.clone() operations = [migrations.RemoveField("Pony", "stables")] project_state = self.apply_operations("test_rmflmm", project_state, operations=operations) self.assertTableNotExists("test_rmflmm_pony_stables") # And test reversal self.unapply_operations("test_rmflmm", with_field_state, operations=operations) self.assertTableExists("test_rmflmm_pony_stables") def test_remove_field_m2m_with_through(self): project_state = self.set_up_test_model("test_rmflmmwt", second_model=True) self.assertTableNotExists("test_rmflmmwt_ponystables") project_state = self.apply_operations("test_rmflmmwt", project_state, operations=[ migrations.CreateModel("PonyStables", fields=[ ("pony", models.ForeignKey('test_rmflmmwt.Pony', models.CASCADE)), ("stable", models.ForeignKey('test_rmflmmwt.Stable', models.CASCADE)), ]), migrations.AddField( "Pony", "stables", models.ManyToManyField("Stable", related_name="ponies", through='test_rmflmmwt.PonyStables') ) ]) self.assertTableExists("test_rmflmmwt_ponystables") operations = [migrations.RemoveField("Pony", "stables"), migrations.DeleteModel("PonyStables")] self.apply_operations("test_rmflmmwt", project_state, operations=operations) def test_remove_field(self): """ Tests the RemoveField operation. """ project_state = self.set_up_test_model("test_rmfl") # Test the state alteration operation = migrations.RemoveField("Pony", "pink") self.assertEqual(operation.describe(), "Remove field pink from Pony") new_state = project_state.clone() operation.state_forwards("test_rmfl", new_state) self.assertEqual(len(new_state.models["test_rmfl", "pony"].fields), 2) # Test the database alteration self.assertColumnExists("test_rmfl_pony", "pink") with connection.schema_editor() as editor: operation.database_forwards("test_rmfl", editor, project_state, new_state) self.assertColumnNotExists("test_rmfl_pony", "pink") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_rmfl", editor, new_state, project_state) self.assertColumnExists("test_rmfl_pony", "pink") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RemoveField") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'name': 'pink'}) def test_remove_fk(self): """ Tests the RemoveField operation on a foreign key. """ project_state = self.set_up_test_model("test_rfk", related_model=True) self.assertColumnExists("test_rfk_rider", "pony_id") operation = migrations.RemoveField("Rider", "pony") new_state = project_state.clone() operation.state_forwards("test_rfk", new_state) with connection.schema_editor() as editor: operation.database_forwards("test_rfk", editor, project_state, new_state) self.assertColumnNotExists("test_rfk_rider", "pony_id") with connection.schema_editor() as editor: operation.database_backwards("test_rfk", editor, new_state, project_state) self.assertColumnExists("test_rfk_rider", "pony_id") def test_alter_model_table(self): """ Tests the AlterModelTable operation. """ project_state = self.set_up_test_model("test_almota") # Test the state alteration operation = migrations.AlterModelTable("Pony", "test_almota_pony_2") self.assertEqual(operation.describe(), "Rename table for Pony to test_almota_pony_2") new_state = project_state.clone() operation.state_forwards("test_almota", new_state) self.assertEqual(new_state.models["test_almota", "pony"].options["db_table"], "test_almota_pony_2") # Test the database alteration self.assertTableExists("test_almota_pony") self.assertTableNotExists("test_almota_pony_2") with connection.schema_editor() as editor: operation.database_forwards("test_almota", editor, project_state, new_state) self.assertTableNotExists("test_almota_pony") self.assertTableExists("test_almota_pony_2") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_almota", editor, new_state, project_state) self.assertTableExists("test_almota_pony") self.assertTableNotExists("test_almota_pony_2") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterModelTable") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Pony", 'table': "test_almota_pony_2"}) def test_alter_model_table_none(self): """ Tests the AlterModelTable operation if the table name is set to None. """ operation = migrations.AlterModelTable("Pony", None) self.assertEqual(operation.describe(), "Rename table for Pony to (default)") def test_alter_model_table_noop(self): """ Tests the AlterModelTable operation if the table name is not changed. """ project_state = self.set_up_test_model("test_almota") # Test the state alteration operation = migrations.AlterModelTable("Pony", "test_almota_pony") new_state = project_state.clone() operation.state_forwards("test_almota", new_state) self.assertEqual(new_state.models["test_almota", "pony"].options["db_table"], "test_almota_pony") # Test the database alteration self.assertTableExists("test_almota_pony") with connection.schema_editor() as editor: operation.database_forwards("test_almota", editor, project_state, new_state) self.assertTableExists("test_almota_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_almota", editor, new_state, project_state) self.assertTableExists("test_almota_pony") def test_alter_model_table_m2m(self): """ AlterModelTable should rename auto-generated M2M tables. """ app_label = "test_talflmltlm2m" pony_db_table = 'pony_foo' project_state = self.set_up_test_model(app_label, second_model=True, db_table=pony_db_table) # Add the M2M field first_state = project_state.clone() operation = migrations.AddField("Pony", "stables", models.ManyToManyField("Stable")) operation.state_forwards(app_label, first_state) with connection.schema_editor() as editor: operation.database_forwards(app_label, editor, project_state, first_state) original_m2m_table = "%s_%s" % (pony_db_table, "stables") new_m2m_table = "%s_%s" % (app_label, "pony_stables") self.assertTableExists(original_m2m_table) self.assertTableNotExists(new_m2m_table) # Rename the Pony db_table which should also rename the m2m table. second_state = first_state.clone() operation = migrations.AlterModelTable(name='pony', table=None) operation.state_forwards(app_label, second_state) atomic_rename = connection.features.supports_atomic_references_rename with connection.schema_editor(atomic=atomic_rename) as editor: operation.database_forwards(app_label, editor, first_state, second_state) self.assertTableExists(new_m2m_table) self.assertTableNotExists(original_m2m_table) # And test reversal with connection.schema_editor(atomic=atomic_rename) as editor: operation.database_backwards(app_label, editor, second_state, first_state) self.assertTableExists(original_m2m_table) self.assertTableNotExists(new_m2m_table) def test_alter_field(self): """ Tests the AlterField operation. """ project_state = self.set_up_test_model("test_alfl") # Test the state alteration operation = migrations.AlterField("Pony", "pink", models.IntegerField(null=True)) self.assertEqual(operation.describe(), "Alter field pink on Pony") new_state = project_state.clone() operation.state_forwards("test_alfl", new_state) self.assertIs(project_state.models["test_alfl", "pony"].get_field_by_name("pink").null, False) self.assertIs(new_state.models["test_alfl", "pony"].get_field_by_name("pink").null, True) # Test the database alteration self.assertColumnNotNull("test_alfl_pony", "pink") with connection.schema_editor() as editor: operation.database_forwards("test_alfl", editor, project_state, new_state) self.assertColumnNull("test_alfl_pony", "pink") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alfl", editor, new_state, project_state) self.assertColumnNotNull("test_alfl_pony", "pink") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterField") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"]) def test_alter_field_pk(self): """ Tests the AlterField operation on primary keys (for things like PostgreSQL's SERIAL weirdness) """ project_state = self.set_up_test_model("test_alflpk") # Test the state alteration operation = migrations.AlterField("Pony", "id", models.IntegerField(primary_key=True)) new_state = project_state.clone() operation.state_forwards("test_alflpk", new_state) self.assertIsInstance(project_state.models["test_alflpk", "pony"].get_field_by_name("id"), models.AutoField) self.assertIsInstance(new_state.models["test_alflpk", "pony"].get_field_by_name("id"), models.IntegerField) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_alflpk", editor, project_state, new_state) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alflpk", editor, new_state, project_state) @skipUnlessDBFeature('supports_foreign_keys') def test_alter_field_pk_fk(self): """ Tests the AlterField operation on primary keys changes any FKs pointing to it. """ project_state = self.set_up_test_model("test_alflpkfk", related_model=True) # Test the state alteration operation = migrations.AlterField("Pony", "id", models.FloatField(primary_key=True)) new_state = project_state.clone() operation.state_forwards("test_alflpkfk", new_state) self.assertIsInstance(project_state.models["test_alflpkfk", "pony"].get_field_by_name("id"), models.AutoField) self.assertIsInstance(new_state.models["test_alflpkfk", "pony"].get_field_by_name("id"), models.FloatField) def assertIdTypeEqualsFkType(): with connection.cursor() as cursor: id_type, id_null = [ (c.type_code, c.null_ok) for c in connection.introspection.get_table_description(cursor, "test_alflpkfk_pony") if c.name == "id" ][0] fk_type, fk_null = [ (c.type_code, c.null_ok) for c in connection.introspection.get_table_description(cursor, "test_alflpkfk_rider") if c.name == "pony_id" ][0] self.assertEqual(id_type, fk_type) self.assertEqual(id_null, fk_null) assertIdTypeEqualsFkType() # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_alflpkfk", editor, project_state, new_state) assertIdTypeEqualsFkType() # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alflpkfk", editor, new_state, project_state) assertIdTypeEqualsFkType() @skipUnlessDBFeature('supports_foreign_keys') def test_alter_field_reloads_state_on_fk_with_to_field_target_type_change(self): app_label = 'test_alflrsfkwtflttc' project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel('Rider', fields=[ ('id', models.AutoField(primary_key=True)), ('code', models.PositiveIntegerField(unique=True)), ]), migrations.CreateModel('Pony', fields=[ ('id', models.AutoField(primary_key=True)), ('rider', models.ForeignKey('%s.Rider' % app_label, models.CASCADE, to_field='code')), ]), ]) operation = migrations.AlterField( 'Rider', 'code', models.CharField(max_length=100, unique=True), ) self.apply_operations(app_label, project_state, operations=[operation]) @skipUnlessDBFeature('supports_foreign_keys') def test_alter_field_reloads_state_on_fk_with_to_field_related_name_target_type_change(self): app_label = 'test_alflrsfkwtflrnttc' project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel('Rider', fields=[ ('id', models.AutoField(primary_key=True)), ('code', models.PositiveIntegerField(unique=True)), ]), migrations.CreateModel('Pony', fields=[ ('id', models.AutoField(primary_key=True)), ('rider', models.ForeignKey( '%s.Rider' % app_label, models.CASCADE, to_field='code', related_name='+', )), ]), ]) operation = migrations.AlterField( 'Rider', 'code', models.CharField(max_length=100, unique=True), ) self.apply_operations(app_label, project_state, operations=[operation]) def test_alter_field_reloads_state_on_fk_target_changes(self): """ If AlterField doesn't reload state appropriately, the second AlterField crashes on MySQL due to not dropping the PonyRider.pony foreign key constraint before modifying the column. """ app_label = 'alter_alter_field_reloads_state_on_fk_target_changes' project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel('Rider', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ]), migrations.CreateModel('Pony', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ('rider', models.ForeignKey('%s.Rider' % app_label, models.CASCADE)), ]), migrations.CreateModel('PonyRider', fields=[ ('id', models.AutoField(primary_key=True)), ('pony', models.ForeignKey('%s.Pony' % app_label, models.CASCADE)), ]), ]) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.AlterField('Rider', 'id', models.CharField(primary_key=True, max_length=99)), migrations.AlterField('Pony', 'id', models.CharField(primary_key=True, max_length=99)), ]) def test_alter_field_reloads_state_on_fk_with_to_field_target_changes(self): """ If AlterField doesn't reload state appropriately, the second AlterField crashes on MySQL due to not dropping the PonyRider.pony foreign key constraint before modifying the column. """ app_label = 'alter_alter_field_reloads_state_on_fk_with_to_field_target_changes' project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel('Rider', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ('slug', models.CharField(unique=True, max_length=100)), ]), migrations.CreateModel('Pony', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ('rider', models.ForeignKey('%s.Rider' % app_label, models.CASCADE, to_field='slug')), ('slug', models.CharField(unique=True, max_length=100)), ]), migrations.CreateModel('PonyRider', fields=[ ('id', models.AutoField(primary_key=True)), ('pony', models.ForeignKey('%s.Pony' % app_label, models.CASCADE, to_field='slug')), ]), ]) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.AlterField('Rider', 'slug', models.CharField(unique=True, max_length=99)), migrations.AlterField('Pony', 'slug', models.CharField(unique=True, max_length=99)), ]) def test_rename_field_reloads_state_on_fk_target_changes(self): """ If RenameField doesn't reload state appropriately, the AlterField crashes on MySQL due to not dropping the PonyRider.pony foreign key constraint before modifying the column. """ app_label = 'alter_rename_field_reloads_state_on_fk_target_changes' project_state = self.apply_operations(app_label, ProjectState(), operations=[ migrations.CreateModel('Rider', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ]), migrations.CreateModel('Pony', fields=[ ('id', models.CharField(primary_key=True, max_length=100)), ('rider', models.ForeignKey('%s.Rider' % app_label, models.CASCADE)), ]), migrations.CreateModel('PonyRider', fields=[ ('id', models.AutoField(primary_key=True)), ('pony', models.ForeignKey('%s.Pony' % app_label, models.CASCADE)), ]), ]) project_state = self.apply_operations(app_label, project_state, operations=[ migrations.RenameField('Rider', 'id', 'id2'), migrations.AlterField('Pony', 'id', models.CharField(primary_key=True, max_length=99)), ], atomic=connection.features.supports_atomic_references_rename) def test_rename_field(self): """ Tests the RenameField operation. """ project_state = self.set_up_test_model("test_rnfl", unique_together=True, index_together=True) # Test the state alteration operation = migrations.RenameField("Pony", "pink", "blue") self.assertEqual(operation.describe(), "Rename field pink on Pony to blue") new_state = project_state.clone() operation.state_forwards("test_rnfl", new_state) self.assertIn("blue", [n for n, f in new_state.models["test_rnfl", "pony"].fields]) self.assertNotIn("pink", [n for n, f in new_state.models["test_rnfl", "pony"].fields]) # Make sure the unique_together has the renamed column too self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['unique_together'][0]) self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['unique_together'][0]) # Make sure the index_together has the renamed column too self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['index_together'][0]) self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['index_together'][0]) # Test the database alteration self.assertColumnExists("test_rnfl_pony", "pink") self.assertColumnNotExists("test_rnfl_pony", "blue") with connection.schema_editor() as editor: operation.database_forwards("test_rnfl", editor, project_state, new_state) self.assertColumnExists("test_rnfl_pony", "blue") self.assertColumnNotExists("test_rnfl_pony", "pink") # Ensure the unique constraint has been ported over with connection.cursor() as cursor: cursor.execute("INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)") with self.assertRaises(IntegrityError): with atomic(): cursor.execute("INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)") cursor.execute("DELETE FROM test_rnfl_pony") # Ensure the index constraint has been ported over self.assertIndexExists("test_rnfl_pony", ["weight", "blue"]) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_rnfl", editor, new_state, project_state) self.assertColumnExists("test_rnfl_pony", "pink") self.assertColumnNotExists("test_rnfl_pony", "blue") # Ensure the index constraint has been reset self.assertIndexExists("test_rnfl_pony", ["weight", "pink"]) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RenameField") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'old_name': "pink", 'new_name': "blue"}) def test_rename_missing_field(self): state = ProjectState() state.add_model(ModelState('app', 'model', [])) with self.assertRaisesMessage(FieldDoesNotExist, "app.model has no field named 'field'"): migrations.RenameField('model', 'field', 'new_field').state_forwards('app', state) def test_rename_referenced_field_state_forward(self): state = ProjectState() state.add_model(ModelState('app', 'Model', [ ('id', models.AutoField(primary_key=True)), ('field', models.IntegerField(unique=True)), ])) state.add_model(ModelState('app', 'OtherModel', [ ('id', models.AutoField(primary_key=True)), ('fk', models.ForeignKey('Model', models.CASCADE, to_field='field')), ('fo', models.ForeignObject('Model', models.CASCADE, from_fields=('fk',), to_fields=('field',))), ])) operation = migrations.RenameField('Model', 'field', 'renamed') new_state = state.clone() operation.state_forwards('app', new_state) self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].remote_field.field_name, 'renamed') self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].from_fields, ['self']) self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].to_fields, ('renamed',)) self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].from_fields, ('fk',)) self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].to_fields, ('renamed',)) operation = migrations.RenameField('OtherModel', 'fk', 'renamed_fk') new_state = state.clone() operation.state_forwards('app', new_state) self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].remote_field.field_name, 'renamed') self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].from_fields, ('self',)) self.assertEqual(new_state.models['app', 'othermodel'].fields[1][1].to_fields, ('renamed',)) self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].from_fields, ('renamed_fk',)) self.assertEqual(new_state.models['app', 'othermodel'].fields[2][1].to_fields, ('renamed',)) def test_alter_unique_together(self): """ Tests the AlterUniqueTogether operation. """ project_state = self.set_up_test_model("test_alunto") # Test the state alteration operation = migrations.AlterUniqueTogether("Pony", [("pink", "weight")]) self.assertEqual(operation.describe(), "Alter unique_together for Pony (1 constraint(s))") new_state = project_state.clone() operation.state_forwards("test_alunto", new_state) self.assertEqual(len(project_state.models["test_alunto", "pony"].options.get("unique_together", set())), 0) self.assertEqual(len(new_state.models["test_alunto", "pony"].options.get("unique_together", set())), 1) # Make sure we can insert duplicate rows with connection.cursor() as cursor: cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") cursor.execute("DELETE FROM test_alunto_pony") # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_alunto", editor, project_state, new_state) cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") with self.assertRaises(IntegrityError): with atomic(): cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") cursor.execute("DELETE FROM test_alunto_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alunto", editor, new_state, project_state) cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)") cursor.execute("DELETE FROM test_alunto_pony") # Test flat unique_together operation = migrations.AlterUniqueTogether("Pony", ("pink", "weight")) operation.state_forwards("test_alunto", new_state) self.assertEqual(len(new_state.models["test_alunto", "pony"].options.get("unique_together", set())), 1) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterUniqueTogether") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Pony", 'unique_together': {("pink", "weight")}}) def test_alter_unique_together_remove(self): operation = migrations.AlterUniqueTogether("Pony", None) self.assertEqual(operation.describe(), "Alter unique_together for Pony (0 constraint(s))") def test_add_index(self): """ Test the AddIndex operation. """ project_state = self.set_up_test_model("test_adin") msg = ( "Indexes passed to AddIndex operations require a name argument. " "<Index: fields='pink'> doesn't have one." ) with self.assertRaisesMessage(ValueError, msg): migrations.AddIndex("Pony", models.Index(fields=["pink"])) index = models.Index(fields=["pink"], name="test_adin_pony_pink_idx") operation = migrations.AddIndex("Pony", index) self.assertEqual(operation.describe(), "Create index test_adin_pony_pink_idx on field(s) pink of model Pony") new_state = project_state.clone() operation.state_forwards("test_adin", new_state) # Test the database alteration self.assertEqual(len(new_state.models["test_adin", "pony"].options['indexes']), 1) self.assertIndexNotExists("test_adin_pony", ["pink"]) with connection.schema_editor() as editor: operation.database_forwards("test_adin", editor, project_state, new_state) self.assertIndexExists("test_adin_pony", ["pink"]) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_adin", editor, new_state, project_state) self.assertIndexNotExists("test_adin_pony", ["pink"]) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AddIndex") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'index': index}) def test_remove_index(self): """ Test the RemoveIndex operation. """ project_state = self.set_up_test_model("test_rmin", multicol_index=True) self.assertTableExists("test_rmin_pony") self.assertIndexExists("test_rmin_pony", ["pink", "weight"]) operation = migrations.RemoveIndex("Pony", "pony_test_idx") self.assertEqual(operation.describe(), "Remove index pony_test_idx from Pony") new_state = project_state.clone() operation.state_forwards("test_rmin", new_state) # Test the state alteration self.assertEqual(len(new_state.models["test_rmin", "pony"].options['indexes']), 0) self.assertIndexExists("test_rmin_pony", ["pink", "weight"]) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_rmin", editor, project_state, new_state) self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"]) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_rmin", editor, new_state, project_state) self.assertIndexExists("test_rmin_pony", ["pink", "weight"]) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RemoveIndex") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'name': "pony_test_idx"}) # Also test a field dropped with index - sqlite remake issue operations = [ migrations.RemoveIndex("Pony", "pony_test_idx"), migrations.RemoveField("Pony", "pink"), ] self.assertColumnExists("test_rmin_pony", "pink") self.assertIndexExists("test_rmin_pony", ["pink", "weight"]) # Test database alteration new_state = project_state.clone() self.apply_operations('test_rmin', new_state, operations=operations) self.assertColumnNotExists("test_rmin_pony", "pink") self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"]) # And test reversal self.unapply_operations("test_rmin", project_state, operations=operations) self.assertIndexExists("test_rmin_pony", ["pink", "weight"]) def test_add_index_state_forwards(self): project_state = self.set_up_test_model('test_adinsf') index = models.Index(fields=['pink'], name='test_adinsf_pony_pink_idx') old_model = project_state.apps.get_model('test_adinsf', 'Pony') new_state = project_state.clone() operation = migrations.AddIndex('Pony', index) operation.state_forwards('test_adinsf', new_state) new_model = new_state.apps.get_model('test_adinsf', 'Pony') self.assertIsNot(old_model, new_model) def test_remove_index_state_forwards(self): project_state = self.set_up_test_model('test_rminsf') index = models.Index(fields=['pink'], name='test_rminsf_pony_pink_idx') migrations.AddIndex('Pony', index).state_forwards('test_rminsf', project_state) old_model = project_state.apps.get_model('test_rminsf', 'Pony') new_state = project_state.clone() operation = migrations.RemoveIndex('Pony', 'test_rminsf_pony_pink_idx') operation.state_forwards('test_rminsf', new_state) new_model = new_state.apps.get_model('test_rminsf', 'Pony') self.assertIsNot(old_model, new_model) def test_alter_field_with_index(self): """ Test AlterField operation with an index to ensure indexes created via Meta.indexes don't get dropped with sqlite3 remake. """ project_state = self.set_up_test_model("test_alflin", index=True) operation = migrations.AlterField("Pony", "pink", models.IntegerField(null=True)) new_state = project_state.clone() operation.state_forwards("test_alflin", new_state) # Test the database alteration self.assertColumnNotNull("test_alflin_pony", "pink") with connection.schema_editor() as editor: operation.database_forwards("test_alflin", editor, project_state, new_state) # Index hasn't been dropped self.assertIndexExists("test_alflin_pony", ["pink"]) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alflin", editor, new_state, project_state) # Ensure the index is still there self.assertIndexExists("test_alflin_pony", ["pink"]) def test_alter_index_together(self): """ Tests the AlterIndexTogether operation. """ project_state = self.set_up_test_model("test_alinto") # Test the state alteration operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")]) self.assertEqual(operation.describe(), "Alter index_together for Pony (1 constraint(s))") new_state = project_state.clone() operation.state_forwards("test_alinto", new_state) self.assertEqual(len(project_state.models["test_alinto", "pony"].options.get("index_together", set())), 0) self.assertEqual(len(new_state.models["test_alinto", "pony"].options.get("index_together", set())), 1) # Make sure there's no matching index self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"]) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_alinto", editor, project_state, new_state) self.assertIndexExists("test_alinto_pony", ["pink", "weight"]) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alinto", editor, new_state, project_state) self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"]) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterIndexTogether") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Pony", 'index_together': {("pink", "weight")}}) def test_alter_index_together_remove(self): operation = migrations.AlterIndexTogether("Pony", None) self.assertEqual(operation.describe(), "Alter index_together for Pony (0 constraint(s))") @skipUnlessDBFeature('supports_table_check_constraints') def test_add_constraint(self): project_state = self.set_up_test_model("test_addconstraint") gt_check = models.Q(pink__gt=2) gt_constraint = models.CheckConstraint(check=gt_check, name="test_add_constraint_pony_pink_gt_2") gt_operation = migrations.AddConstraint("Pony", gt_constraint) self.assertEqual( gt_operation.describe(), "Create constraint test_add_constraint_pony_pink_gt_2 on model Pony" ) # Test the state alteration new_state = project_state.clone() gt_operation.state_forwards("test_addconstraint", new_state) self.assertEqual(len(new_state.models["test_addconstraint", "pony"].options["constraints"]), 1) Pony = new_state.apps.get_model("test_addconstraint", "Pony") self.assertEqual(len(Pony._meta.constraints), 1) # Test the database alteration with connection.schema_editor() as editor: gt_operation.database_forwards("test_addconstraint", editor, project_state, new_state) with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=1, weight=1.0) # Add another one. lt_check = models.Q(pink__lt=100) lt_constraint = models.CheckConstraint(check=lt_check, name="test_add_constraint_pony_pink_lt_100") lt_operation = migrations.AddConstraint("Pony", lt_constraint) lt_operation.state_forwards("test_addconstraint", new_state) self.assertEqual(len(new_state.models["test_addconstraint", "pony"].options["constraints"]), 2) Pony = new_state.apps.get_model("test_addconstraint", "Pony") self.assertEqual(len(Pony._meta.constraints), 2) with connection.schema_editor() as editor: lt_operation.database_forwards("test_addconstraint", editor, project_state, new_state) with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=100, weight=1.0) # Test reversal with connection.schema_editor() as editor: gt_operation.database_backwards("test_addconstraint", editor, new_state, project_state) Pony.objects.create(pink=1, weight=1.0) # Test deconstruction definition = gt_operation.deconstruct() self.assertEqual(definition[0], "AddConstraint") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'constraint': gt_constraint}) @skipUnlessDBFeature('supports_table_check_constraints') def test_add_constraint_percent_escaping(self): app_label = 'add_constraint_string_quoting' operations = [ CreateModel( 'Author', fields=[ ('id', models.AutoField(primary_key=True)), ('name', models.CharField(max_length=100)), ('rebate', models.CharField(max_length=100)), ], ), ] from_state = self.apply_operations(app_label, ProjectState(), operations) # "%" generated in startswith lookup should be escaped in a way that is # considered a leading wildcard. check = models.Q(name__startswith='Albert') constraint = models.CheckConstraint(check=check, name='name_constraint') operation = migrations.AddConstraint('Author', constraint) to_state = from_state.clone() operation.state_forwards(app_label, to_state) with connection.schema_editor() as editor: operation.database_forwards(app_label, editor, from_state, to_state) Author = to_state.apps.get_model(app_label, 'Author') with self.assertRaises(IntegrityError), transaction.atomic(): Author.objects.create(name='Artur') # Literal "%" should be escaped in a way that is not a considered a # wildcard. check = models.Q(rebate__endswith='%') constraint = models.CheckConstraint(check=check, name='rebate_constraint') operation = migrations.AddConstraint('Author', constraint) from_state = to_state to_state = from_state.clone() operation.state_forwards(app_label, to_state) Author = to_state.apps.get_model(app_label, 'Author') with connection.schema_editor() as editor: operation.database_forwards(app_label, editor, from_state, to_state) Author = to_state.apps.get_model(app_label, 'Author') with self.assertRaises(IntegrityError), transaction.atomic(): Author.objects.create(name='Albert', rebate='10$') author = Author.objects.create(name='Albert', rebate='10%') self.assertEqual(Author.objects.get(), author) @skipUnlessDBFeature('supports_table_check_constraints') def test_add_or_constraint(self): app_label = 'test_addorconstraint' constraint_name = 'add_constraint_or' from_state = self.set_up_test_model(app_label) check = models.Q(pink__gt=2, weight__gt=2) | models.Q(weight__lt=0) constraint = models.CheckConstraint(check=check, name=constraint_name) operation = migrations.AddConstraint('Pony', constraint) to_state = from_state.clone() operation.state_forwards(app_label, to_state) with connection.schema_editor() as editor: operation.database_forwards(app_label, editor, from_state, to_state) Pony = to_state.apps.get_model(app_label, 'Pony') with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=2, weight=3.0) with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=3, weight=1.0) Pony.objects.bulk_create([ Pony(pink=3, weight=-1.0), Pony(pink=1, weight=-1.0), Pony(pink=3, weight=3.0), ]) @skipUnlessDBFeature('supports_table_check_constraints') def test_add_constraint_combinable(self): app_label = 'test_addconstraint_combinable' operations = [ CreateModel( 'Book', fields=[ ('id', models.AutoField(primary_key=True)), ('read', models.PositiveIntegerField()), ('unread', models.PositiveIntegerField()), ], ), ] from_state = self.apply_operations(app_label, ProjectState(), operations) constraint = models.CheckConstraint( check=models.Q(read=(100 - models.F('unread'))), name='test_addconstraint_combinable_sum_100', ) operation = migrations.AddConstraint('Book', constraint) to_state = from_state.clone() operation.state_forwards(app_label, to_state) with connection.schema_editor() as editor: operation.database_forwards(app_label, editor, from_state, to_state) Book = to_state.apps.get_model(app_label, 'Book') with self.assertRaises(IntegrityError), transaction.atomic(): Book.objects.create(read=70, unread=10) Book.objects.create(read=70, unread=30) @skipUnlessDBFeature('supports_table_check_constraints') def test_remove_constraint(self): project_state = self.set_up_test_model("test_removeconstraint", constraints=[ models.CheckConstraint(check=models.Q(pink__gt=2), name="test_remove_constraint_pony_pink_gt_2"), models.CheckConstraint(check=models.Q(pink__lt=100), name="test_remove_constraint_pony_pink_lt_100"), ]) gt_operation = migrations.RemoveConstraint("Pony", "test_remove_constraint_pony_pink_gt_2") self.assertEqual( gt_operation.describe(), "Remove constraint test_remove_constraint_pony_pink_gt_2 from model Pony" ) # Test state alteration new_state = project_state.clone() gt_operation.state_forwards("test_removeconstraint", new_state) self.assertEqual(len(new_state.models["test_removeconstraint", "pony"].options['constraints']), 1) Pony = new_state.apps.get_model("test_removeconstraint", "Pony") self.assertEqual(len(Pony._meta.constraints), 1) # Test database alteration with connection.schema_editor() as editor: gt_operation.database_forwards("test_removeconstraint", editor, project_state, new_state) Pony.objects.create(pink=1, weight=1.0).delete() with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=100, weight=1.0) # Remove the other one. lt_operation = migrations.RemoveConstraint("Pony", "test_remove_constraint_pony_pink_lt_100") lt_operation.state_forwards("test_removeconstraint", new_state) self.assertEqual(len(new_state.models["test_removeconstraint", "pony"].options['constraints']), 0) Pony = new_state.apps.get_model("test_removeconstraint", "Pony") self.assertEqual(len(Pony._meta.constraints), 0) with connection.schema_editor() as editor: lt_operation.database_forwards("test_removeconstraint", editor, project_state, new_state) Pony.objects.create(pink=100, weight=1.0).delete() # Test reversal with connection.schema_editor() as editor: gt_operation.database_backwards("test_removeconstraint", editor, new_state, project_state) with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=1, weight=1.0) # Test deconstruction definition = gt_operation.deconstruct() self.assertEqual(definition[0], "RemoveConstraint") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': "Pony", 'name': "test_remove_constraint_pony_pink_gt_2"}) def test_add_partial_unique_constraint(self): project_state = self.set_up_test_model('test_addpartialuniqueconstraint') partial_unique_constraint = models.UniqueConstraint( fields=['pink'], condition=models.Q(weight__gt=5), name='test_constraint_pony_pink_for_weight_gt_5_uniq', ) operation = migrations.AddConstraint('Pony', partial_unique_constraint) self.assertEqual( operation.describe(), 'Create constraint test_constraint_pony_pink_for_weight_gt_5_uniq ' 'on model Pony' ) # Test the state alteration new_state = project_state.clone() operation.state_forwards('test_addpartialuniqueconstraint', new_state) self.assertEqual(len(new_state.models['test_addpartialuniqueconstraint', 'pony'].options['constraints']), 1) Pony = new_state.apps.get_model('test_addpartialuniqueconstraint', 'Pony') self.assertEqual(len(Pony._meta.constraints), 1) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards('test_addpartialuniqueconstraint', editor, project_state, new_state) # Test constraint works Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=6.0) if connection.features.supports_partial_indexes: with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=1, weight=7.0) else: Pony.objects.create(pink=1, weight=7.0) # Test reversal with connection.schema_editor() as editor: operation.database_backwards('test_addpartialuniqueconstraint', editor, new_state, project_state) # Test constraint doesn't work Pony.objects.create(pink=1, weight=7.0) # Test deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], 'AddConstraint') self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'model_name': 'Pony', 'constraint': partial_unique_constraint}) def test_remove_partial_unique_constraint(self): project_state = self.set_up_test_model('test_removepartialuniqueconstraint', constraints=[ models.UniqueConstraint( fields=['pink'], condition=models.Q(weight__gt=5), name='test_constraint_pony_pink_for_weight_gt_5_uniq', ), ]) gt_operation = migrations.RemoveConstraint('Pony', 'test_constraint_pony_pink_for_weight_gt_5_uniq') self.assertEqual( gt_operation.describe(), 'Remove constraint test_constraint_pony_pink_for_weight_gt_5_uniq from model Pony' ) # Test state alteration new_state = project_state.clone() gt_operation.state_forwards('test_removepartialuniqueconstraint', new_state) self.assertEqual(len(new_state.models['test_removepartialuniqueconstraint', 'pony'].options['constraints']), 0) Pony = new_state.apps.get_model('test_removepartialuniqueconstraint', 'Pony') self.assertEqual(len(Pony._meta.constraints), 0) # Test database alteration with connection.schema_editor() as editor: gt_operation.database_forwards('test_removepartialuniqueconstraint', editor, project_state, new_state) # Test constraint doesn't work Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=4.0) Pony.objects.create(pink=1, weight=6.0) Pony.objects.create(pink=1, weight=7.0).delete() # Test reversal with connection.schema_editor() as editor: gt_operation.database_backwards('test_removepartialuniqueconstraint', editor, new_state, project_state) # Test constraint works if connection.features.supports_partial_indexes: with self.assertRaises(IntegrityError), transaction.atomic(): Pony.objects.create(pink=1, weight=7.0) else: Pony.objects.create(pink=1, weight=7.0) # Test deconstruction definition = gt_operation.deconstruct() self.assertEqual(definition[0], 'RemoveConstraint') self.assertEqual(definition[1], []) self.assertEqual(definition[2], { 'model_name': 'Pony', 'name': 'test_constraint_pony_pink_for_weight_gt_5_uniq', }) def test_alter_model_options(self): """ Tests the AlterModelOptions operation. """ project_state = self.set_up_test_model("test_almoop") # Test the state alteration (no DB alteration to test) operation = migrations.AlterModelOptions("Pony", {"permissions": [("can_groom", "Can groom")]}) self.assertEqual(operation.describe(), "Change Meta options on Pony") new_state = project_state.clone() operation.state_forwards("test_almoop", new_state) self.assertEqual(len(project_state.models["test_almoop", "pony"].options.get("permissions", [])), 0) self.assertEqual(len(new_state.models["test_almoop", "pony"].options.get("permissions", [])), 1) self.assertEqual(new_state.models["test_almoop", "pony"].options["permissions"][0][0], "can_groom") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterModelOptions") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Pony", 'options': {"permissions": [("can_groom", "Can groom")]}}) def test_alter_model_options_emptying(self): """ The AlterModelOptions operation removes keys from the dict (#23121) """ project_state = self.set_up_test_model("test_almoop", options=True) # Test the state alteration (no DB alteration to test) operation = migrations.AlterModelOptions("Pony", {}) self.assertEqual(operation.describe(), "Change Meta options on Pony") new_state = project_state.clone() operation.state_forwards("test_almoop", new_state) self.assertEqual(len(project_state.models["test_almoop", "pony"].options.get("permissions", [])), 1) self.assertEqual(len(new_state.models["test_almoop", "pony"].options.get("permissions", [])), 0) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterModelOptions") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Pony", 'options': {}}) def test_alter_order_with_respect_to(self): """ Tests the AlterOrderWithRespectTo operation. """ project_state = self.set_up_test_model("test_alorwrtto", related_model=True) # Test the state alteration operation = migrations.AlterOrderWithRespectTo("Rider", "pony") self.assertEqual(operation.describe(), "Set order_with_respect_to on Rider to pony") new_state = project_state.clone() operation.state_forwards("test_alorwrtto", new_state) self.assertIsNone( project_state.models["test_alorwrtto", "rider"].options.get("order_with_respect_to", None) ) self.assertEqual( new_state.models["test_alorwrtto", "rider"].options.get("order_with_respect_to", None), "pony" ) # Make sure there's no matching index self.assertColumnNotExists("test_alorwrtto_rider", "_order") # Create some rows before alteration rendered_state = project_state.apps pony = rendered_state.get_model("test_alorwrtto", "Pony").objects.create(weight=50) rendered_state.get_model("test_alorwrtto", "Rider").objects.create(pony=pony, friend_id=1) rendered_state.get_model("test_alorwrtto", "Rider").objects.create(pony=pony, friend_id=2) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_alorwrtto", editor, project_state, new_state) self.assertColumnExists("test_alorwrtto_rider", "_order") # Check for correct value in rows updated_riders = new_state.apps.get_model("test_alorwrtto", "Rider").objects.all() self.assertEqual(updated_riders[0]._order, 0) self.assertEqual(updated_riders[1]._order, 0) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_alorwrtto", editor, new_state, project_state) self.assertColumnNotExists("test_alorwrtto_rider", "_order") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "AlterOrderWithRespectTo") self.assertEqual(definition[1], []) self.assertEqual(definition[2], {'name': "Rider", 'order_with_respect_to': "pony"}) def test_alter_model_managers(self): """ The managers on a model are set. """ project_state = self.set_up_test_model("test_almoma") # Test the state alteration operation = migrations.AlterModelManagers( "Pony", managers=[ ("food_qs", FoodQuerySet.as_manager()), ("food_mgr", FoodManager("a", "b")), ("food_mgr_kwargs", FoodManager("x", "y", 3, 4)), ] ) self.assertEqual(operation.describe(), "Change managers on Pony") managers = project_state.models["test_almoma", "pony"].managers self.assertEqual(managers, []) new_state = project_state.clone() operation.state_forwards("test_almoma", new_state) self.assertIn(("test_almoma", "pony"), new_state.models) managers = new_state.models["test_almoma", "pony"].managers self.assertEqual(managers[0][0], "food_qs") self.assertIsInstance(managers[0][1], models.Manager) self.assertEqual(managers[1][0], "food_mgr") self.assertIsInstance(managers[1][1], FoodManager) self.assertEqual(managers[1][1].args, ("a", "b", 1, 2)) self.assertEqual(managers[2][0], "food_mgr_kwargs") self.assertIsInstance(managers[2][1], FoodManager) self.assertEqual(managers[2][1].args, ("x", "y", 3, 4)) rendered_state = new_state.apps model = rendered_state.get_model('test_almoma', 'pony') self.assertIsInstance(model.food_qs, models.Manager) self.assertIsInstance(model.food_mgr, FoodManager) self.assertIsInstance(model.food_mgr_kwargs, FoodManager) def test_alter_model_managers_emptying(self): """ The managers on a model are set. """ project_state = self.set_up_test_model("test_almomae", manager_model=True) # Test the state alteration operation = migrations.AlterModelManagers("Food", managers=[]) self.assertEqual(operation.describe(), "Change managers on Food") self.assertIn(("test_almomae", "food"), project_state.models) managers = project_state.models["test_almomae", "food"].managers self.assertEqual(managers[0][0], "food_qs") self.assertIsInstance(managers[0][1], models.Manager) self.assertEqual(managers[1][0], "food_mgr") self.assertIsInstance(managers[1][1], FoodManager) self.assertEqual(managers[1][1].args, ("a", "b", 1, 2)) self.assertEqual(managers[2][0], "food_mgr_kwargs") self.assertIsInstance(managers[2][1], FoodManager) self.assertEqual(managers[2][1].args, ("x", "y", 3, 4)) new_state = project_state.clone() operation.state_forwards("test_almomae", new_state) managers = new_state.models["test_almomae", "food"].managers self.assertEqual(managers, []) def test_alter_fk(self): """ Creating and then altering an FK works correctly and deals with the pending SQL (#23091) """ project_state = self.set_up_test_model("test_alfk") # Test adding and then altering the FK in one go create_operation = migrations.CreateModel( name="Rider", fields=[ ("id", models.AutoField(primary_key=True)), ("pony", models.ForeignKey("Pony", models.CASCADE)), ], ) create_state = project_state.clone() create_operation.state_forwards("test_alfk", create_state) alter_operation = migrations.AlterField( model_name='Rider', name='pony', field=models.ForeignKey("Pony", models.CASCADE, editable=False), ) alter_state = create_state.clone() alter_operation.state_forwards("test_alfk", alter_state) with connection.schema_editor() as editor: create_operation.database_forwards("test_alfk", editor, project_state, create_state) alter_operation.database_forwards("test_alfk", editor, create_state, alter_state) def test_alter_fk_non_fk(self): """ Altering an FK to a non-FK works (#23244) """ # Test the state alteration operation = migrations.AlterField( model_name="Rider", name="pony", field=models.FloatField(), ) project_state, new_state = self.make_test_state("test_afknfk", operation, related_model=True) # Test the database alteration self.assertColumnExists("test_afknfk_rider", "pony_id") self.assertColumnNotExists("test_afknfk_rider", "pony") with connection.schema_editor() as editor: operation.database_forwards("test_afknfk", editor, project_state, new_state) self.assertColumnExists("test_afknfk_rider", "pony") self.assertColumnNotExists("test_afknfk_rider", "pony_id") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_afknfk", editor, new_state, project_state) self.assertColumnExists("test_afknfk_rider", "pony_id") self.assertColumnNotExists("test_afknfk_rider", "pony") def test_run_sql(self): """ Tests the RunSQL operation. """ project_state = self.set_up_test_model("test_runsql") # Create the operation operation = migrations.RunSQL( # Use a multi-line string with a comment to test splitting on SQLite and MySQL respectively "CREATE TABLE i_love_ponies (id int, special_thing varchar(15));\n" "INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'i love ponies'); -- this is magic!\n" "INSERT INTO i_love_ponies (id, special_thing) VALUES (2, 'i love django');\n" "UPDATE i_love_ponies SET special_thing = 'Ponies' WHERE special_thing LIKE '%%ponies';" "UPDATE i_love_ponies SET special_thing = 'Django' WHERE special_thing LIKE '%django';", # Run delete queries to test for parameter substitution failure # reported in #23426 "DELETE FROM i_love_ponies WHERE special_thing LIKE '%Django%';" "DELETE FROM i_love_ponies WHERE special_thing LIKE '%%Ponies%%';" "DROP TABLE i_love_ponies", state_operations=[migrations.CreateModel("SomethingElse", [("id", models.AutoField(primary_key=True))])], ) self.assertEqual(operation.describe(), "Raw SQL operation") # Test the state alteration new_state = project_state.clone() operation.state_forwards("test_runsql", new_state) self.assertEqual(len(new_state.models["test_runsql", "somethingelse"].fields), 1) # Make sure there's no table self.assertTableNotExists("i_love_ponies") # Test SQL collection with connection.schema_editor(collect_sql=True) as editor: operation.database_forwards("test_runsql", editor, project_state, new_state) self.assertIn("LIKE '%%ponies';", "\n".join(editor.collected_sql)) operation.database_backwards("test_runsql", editor, project_state, new_state) self.assertIn("LIKE '%%Ponies%%';", "\n".join(editor.collected_sql)) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_runsql", editor, project_state, new_state) self.assertTableExists("i_love_ponies") # Make sure all the SQL was processed with connection.cursor() as cursor: cursor.execute("SELECT COUNT(*) FROM i_love_ponies") self.assertEqual(cursor.fetchall()[0][0], 2) cursor.execute("SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Django'") self.assertEqual(cursor.fetchall()[0][0], 1) cursor.execute("SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Ponies'") self.assertEqual(cursor.fetchall()[0][0], 1) # And test reversal self.assertTrue(operation.reversible) with connection.schema_editor() as editor: operation.database_backwards("test_runsql", editor, new_state, project_state) self.assertTableNotExists("i_love_ponies") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RunSQL") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["reverse_sql", "sql", "state_operations"]) # And elidable reduction self.assertIs(False, operation.reduce(operation, [])) elidable_operation = migrations.RunSQL('SELECT 1 FROM void;', elidable=True) self.assertEqual(elidable_operation.reduce(operation, []), [operation]) def test_run_sql_params(self): """ #23426 - RunSQL should accept parameters. """ project_state = self.set_up_test_model("test_runsql") # Create the operation operation = migrations.RunSQL( ["CREATE TABLE i_love_ponies (id int, special_thing varchar(15));"], ["DROP TABLE i_love_ponies"], ) param_operation = migrations.RunSQL( # forwards ( "INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'Django');", ["INSERT INTO i_love_ponies (id, special_thing) VALUES (2, %s);", ['Ponies']], ("INSERT INTO i_love_ponies (id, special_thing) VALUES (%s, %s);", (3, 'Python',)), ), # backwards [ "DELETE FROM i_love_ponies WHERE special_thing = 'Django';", ["DELETE FROM i_love_ponies WHERE special_thing = 'Ponies';", None], ("DELETE FROM i_love_ponies WHERE id = %s OR special_thing = %s;", [3, 'Python']), ] ) # Make sure there's no table self.assertTableNotExists("i_love_ponies") new_state = project_state.clone() # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_runsql", editor, project_state, new_state) # Test parameter passing with connection.schema_editor() as editor: param_operation.database_forwards("test_runsql", editor, project_state, new_state) # Make sure all the SQL was processed with connection.cursor() as cursor: cursor.execute("SELECT COUNT(*) FROM i_love_ponies") self.assertEqual(cursor.fetchall()[0][0], 3) with connection.schema_editor() as editor: param_operation.database_backwards("test_runsql", editor, new_state, project_state) with connection.cursor() as cursor: cursor.execute("SELECT COUNT(*) FROM i_love_ponies") self.assertEqual(cursor.fetchall()[0][0], 0) # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_runsql", editor, new_state, project_state) self.assertTableNotExists("i_love_ponies") def test_run_sql_params_invalid(self): """ #23426 - RunSQL should fail when a list of statements with an incorrect number of tuples is given. """ project_state = self.set_up_test_model("test_runsql") new_state = project_state.clone() operation = migrations.RunSQL( # forwards [ ["INSERT INTO foo (bar) VALUES ('buz');"] ], # backwards ( ("DELETE FROM foo WHERE bar = 'buz';", 'invalid', 'parameter count'), ), ) with connection.schema_editor() as editor: with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 1"): operation.database_forwards("test_runsql", editor, project_state, new_state) with connection.schema_editor() as editor: with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 3"): operation.database_backwards("test_runsql", editor, new_state, project_state) def test_run_sql_noop(self): """ #24098 - Tests no-op RunSQL operations. """ operation = migrations.RunSQL(migrations.RunSQL.noop, migrations.RunSQL.noop) with connection.schema_editor() as editor: operation.database_forwards("test_runsql", editor, None, None) operation.database_backwards("test_runsql", editor, None, None) def test_run_python(self): """ Tests the RunPython operation """ project_state = self.set_up_test_model("test_runpython", mti_model=True) # Create the operation def inner_method(models, schema_editor): Pony = models.get_model("test_runpython", "Pony") Pony.objects.create(pink=1, weight=3.55) Pony.objects.create(weight=5) def inner_method_reverse(models, schema_editor): Pony = models.get_model("test_runpython", "Pony") Pony.objects.filter(pink=1, weight=3.55).delete() Pony.objects.filter(weight=5).delete() operation = migrations.RunPython(inner_method, reverse_code=inner_method_reverse) self.assertEqual(operation.describe(), "Raw Python operation") # Test the state alteration does nothing new_state = project_state.clone() operation.state_forwards("test_runpython", new_state) self.assertEqual(new_state, project_state) # Test the database alteration self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0) with connection.schema_editor() as editor: operation.database_forwards("test_runpython", editor, project_state, new_state) self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2) # Now test reversal self.assertTrue(operation.reversible) with connection.schema_editor() as editor: operation.database_backwards("test_runpython", editor, project_state, new_state) self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0) # Now test we can't use a string with self.assertRaisesMessage(ValueError, 'RunPython must be supplied with a callable'): migrations.RunPython("print 'ahahaha'") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RunPython") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["code", "reverse_code"]) # Also test reversal fails, with an operation identical to above but without reverse_code set no_reverse_operation = migrations.RunPython(inner_method) self.assertFalse(no_reverse_operation.reversible) with connection.schema_editor() as editor: no_reverse_operation.database_forwards("test_runpython", editor, project_state, new_state) with self.assertRaises(NotImplementedError): no_reverse_operation.database_backwards("test_runpython", editor, new_state, project_state) self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2) def create_ponies(models, schema_editor): Pony = models.get_model("test_runpython", "Pony") pony1 = Pony.objects.create(pink=1, weight=3.55) self.assertIsNot(pony1.pk, None) pony2 = Pony.objects.create(weight=5) self.assertIsNot(pony2.pk, None) self.assertNotEqual(pony1.pk, pony2.pk) operation = migrations.RunPython(create_ponies) with connection.schema_editor() as editor: operation.database_forwards("test_runpython", editor, project_state, new_state) self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 4) # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "RunPython") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["code"]) def create_shetlandponies(models, schema_editor): ShetlandPony = models.get_model("test_runpython", "ShetlandPony") pony1 = ShetlandPony.objects.create(weight=4.0) self.assertIsNot(pony1.pk, None) pony2 = ShetlandPony.objects.create(weight=5.0) self.assertIsNot(pony2.pk, None) self.assertNotEqual(pony1.pk, pony2.pk) operation = migrations.RunPython(create_shetlandponies) with connection.schema_editor() as editor: operation.database_forwards("test_runpython", editor, project_state, new_state) self.assertEqual(project_state.apps.get_model("test_runpython", "Pony").objects.count(), 6) self.assertEqual(project_state.apps.get_model("test_runpython", "ShetlandPony").objects.count(), 2) # And elidable reduction self.assertIs(False, operation.reduce(operation, [])) elidable_operation = migrations.RunPython(inner_method, elidable=True) self.assertEqual(elidable_operation.reduce(operation, []), [operation]) def test_run_python_atomic(self): """ Tests the RunPython operation correctly handles the "atomic" keyword """ project_state = self.set_up_test_model("test_runpythonatomic", mti_model=True) def inner_method(models, schema_editor): Pony = models.get_model("test_runpythonatomic", "Pony") Pony.objects.create(pink=1, weight=3.55) raise ValueError("Adrian hates ponies.") # Verify atomicity when applying. atomic_migration = Migration("test", "test_runpythonatomic") atomic_migration.operations = [migrations.RunPython(inner_method, reverse_code=inner_method)] non_atomic_migration = Migration("test", "test_runpythonatomic") non_atomic_migration.operations = [migrations.RunPython(inner_method, reverse_code=inner_method, atomic=False)] # If we're a fully-transactional database, both versions should rollback if connection.features.can_rollback_ddl: self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: atomic_migration.apply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: non_atomic_migration.apply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) # Otherwise, the non-atomic operation should leave a row there else: self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: atomic_migration.apply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: non_atomic_migration.apply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 1) # Reset object count to zero and verify atomicity when unapplying. project_state.apps.get_model("test_runpythonatomic", "Pony").objects.all().delete() # On a fully-transactional database, both versions rollback. if connection.features.can_rollback_ddl: self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: atomic_migration.unapply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: non_atomic_migration.unapply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) # Otherwise, the non-atomic operation leaves a row there. else: self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: atomic_migration.unapply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 0) with self.assertRaises(ValueError): with connection.schema_editor() as editor: non_atomic_migration.unapply(project_state, editor) self.assertEqual(project_state.apps.get_model("test_runpythonatomic", "Pony").objects.count(), 1) # Verify deconstruction. definition = non_atomic_migration.operations[0].deconstruct() self.assertEqual(definition[0], "RunPython") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["atomic", "code", "reverse_code"]) def test_run_python_related_assignment(self): """ #24282 - Model changes to a FK reverse side update the model on the FK side as well. """ def inner_method(models, schema_editor): Author = models.get_model("test_authors", "Author") Book = models.get_model("test_books", "Book") author = Author.objects.create(name="Hemingway") Book.objects.create(title="Old Man and The Sea", author=author) create_author = migrations.CreateModel( "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ], options={}, ) create_book = migrations.CreateModel( "Book", [ ("id", models.AutoField(primary_key=True)), ("title", models.CharField(max_length=100)), ("author", models.ForeignKey("test_authors.Author", models.CASCADE)) ], options={}, ) add_hometown = migrations.AddField( "Author", "hometown", models.CharField(max_length=100), ) create_old_man = migrations.RunPython(inner_method, inner_method) project_state = ProjectState() new_state = project_state.clone() with connection.schema_editor() as editor: create_author.state_forwards("test_authors", new_state) create_author.database_forwards("test_authors", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: create_book.state_forwards("test_books", new_state) create_book.database_forwards("test_books", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: add_hometown.state_forwards("test_authors", new_state) add_hometown.database_forwards("test_authors", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: create_old_man.state_forwards("test_books", new_state) create_old_man.database_forwards("test_books", editor, project_state, new_state) def test_model_with_bigautofield(self): """ A model with BigAutoField can be created. """ def create_data(models, schema_editor): Author = models.get_model("test_author", "Author") Book = models.get_model("test_book", "Book") author1 = Author.objects.create(name="Hemingway") Book.objects.create(title="Old Man and The Sea", author=author1) Book.objects.create(id=2 ** 33, title="A farewell to arms", author=author1) author2 = Author.objects.create(id=2 ** 33, name="Remarque") Book.objects.create(title="All quiet on the western front", author=author2) Book.objects.create(title="Arc de Triomphe", author=author2) create_author = migrations.CreateModel( "Author", [ ("id", models.BigAutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ], options={}, ) create_book = migrations.CreateModel( "Book", [ ("id", models.BigAutoField(primary_key=True)), ("title", models.CharField(max_length=100)), ("author", models.ForeignKey(to="test_author.Author", on_delete=models.CASCADE)) ], options={}, ) fill_data = migrations.RunPython(create_data) project_state = ProjectState() new_state = project_state.clone() with connection.schema_editor() as editor: create_author.state_forwards("test_author", new_state) create_author.database_forwards("test_author", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: create_book.state_forwards("test_book", new_state) create_book.database_forwards("test_book", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: fill_data.state_forwards("fill_data", new_state) fill_data.database_forwards("fill_data", editor, project_state, new_state) def _test_autofield_foreignfield_growth(self, source_field, target_field, target_value): """ A field may be migrated in the following ways: - AutoField to BigAutoField - SmallAutoField to AutoField - SmallAutoField to BigAutoField """ def create_initial_data(models, schema_editor): Article = models.get_model("test_article", "Article") Blog = models.get_model("test_blog", "Blog") blog = Blog.objects.create(name="web development done right") Article.objects.create(name="Frameworks", blog=blog) Article.objects.create(name="Programming Languages", blog=blog) def create_big_data(models, schema_editor): Article = models.get_model("test_article", "Article") Blog = models.get_model("test_blog", "Blog") blog2 = Blog.objects.create(name="Frameworks", id=target_value) Article.objects.create(name="Django", blog=blog2) Article.objects.create(id=target_value, name="Django2", blog=blog2) create_blog = migrations.CreateModel( "Blog", [ ("id", source_field(primary_key=True)), ("name", models.CharField(max_length=100)), ], options={}, ) create_article = migrations.CreateModel( "Article", [ ("id", source_field(primary_key=True)), ("blog", models.ForeignKey(to="test_blog.Blog", on_delete=models.CASCADE)), ("name", models.CharField(max_length=100)), ("data", models.TextField(default="")), ], options={}, ) fill_initial_data = migrations.RunPython(create_initial_data, create_initial_data) fill_big_data = migrations.RunPython(create_big_data, create_big_data) grow_article_id = migrations.AlterField('Article', 'id', target_field(primary_key=True)) grow_blog_id = migrations.AlterField('Blog', 'id', target_field(primary_key=True)) project_state = ProjectState() new_state = project_state.clone() with connection.schema_editor() as editor: create_blog.state_forwards("test_blog", new_state) create_blog.database_forwards("test_blog", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: create_article.state_forwards("test_article", new_state) create_article.database_forwards("test_article", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: fill_initial_data.state_forwards("fill_initial_data", new_state) fill_initial_data.database_forwards("fill_initial_data", editor, project_state, new_state) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: grow_article_id.state_forwards("test_article", new_state) grow_article_id.database_forwards("test_article", editor, project_state, new_state) state = new_state.clone() article = state.apps.get_model("test_article.Article") self.assertIsInstance(article._meta.pk, target_field) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: grow_blog_id.state_forwards("test_blog", new_state) grow_blog_id.database_forwards("test_blog", editor, project_state, new_state) state = new_state.clone() blog = state.apps.get_model("test_blog.Blog") self.assertIsInstance(blog._meta.pk, target_field) project_state = new_state new_state = new_state.clone() with connection.schema_editor() as editor: fill_big_data.state_forwards("fill_big_data", new_state) fill_big_data.database_forwards("fill_big_data", editor, project_state, new_state) def test_autofield__bigautofield_foreignfield_growth(self): """A field may be migrated from AutoField to BigAutoField.""" self._test_autofield_foreignfield_growth( models.AutoField, models.BigAutoField, 2 ** 33, ) def test_smallfield_autofield_foreignfield_growth(self): """A field may be migrated from SmallAutoField to AutoField.""" self._test_autofield_foreignfield_growth( models.SmallAutoField, models.AutoField, 2 ** 22, ) def test_smallfield_bigautofield_foreignfield_growth(self): """A field may be migrated from SmallAutoField to BigAutoField.""" self._test_autofield_foreignfield_growth( models.SmallAutoField, models.BigAutoField, 2 ** 33, ) def test_run_python_noop(self): """ #24098 - Tests no-op RunPython operations. """ project_state = ProjectState() new_state = project_state.clone() operation = migrations.RunPython(migrations.RunPython.noop, migrations.RunPython.noop) with connection.schema_editor() as editor: operation.database_forwards("test_runpython", editor, project_state, new_state) operation.database_backwards("test_runpython", editor, new_state, project_state) def test_separate_database_and_state(self): """ Tests the SeparateDatabaseAndState operation. """ project_state = self.set_up_test_model("test_separatedatabaseandstate") # Create the operation database_operation = migrations.RunSQL( "CREATE TABLE i_love_ponies (id int, special_thing int);", "DROP TABLE i_love_ponies;" ) state_operation = migrations.CreateModel("SomethingElse", [("id", models.AutoField(primary_key=True))]) operation = migrations.SeparateDatabaseAndState( state_operations=[state_operation], database_operations=[database_operation] ) self.assertEqual(operation.describe(), "Custom state/database change combination") # Test the state alteration new_state = project_state.clone() operation.state_forwards("test_separatedatabaseandstate", new_state) self.assertEqual(len(new_state.models["test_separatedatabaseandstate", "somethingelse"].fields), 1) # Make sure there's no table self.assertTableNotExists("i_love_ponies") # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards("test_separatedatabaseandstate", editor, project_state, new_state) self.assertTableExists("i_love_ponies") # And test reversal self.assertTrue(operation.reversible) with connection.schema_editor() as editor: operation.database_backwards("test_separatedatabaseandstate", editor, new_state, project_state) self.assertTableNotExists("i_love_ponies") # And deconstruction definition = operation.deconstruct() self.assertEqual(definition[0], "SeparateDatabaseAndState") self.assertEqual(definition[1], []) self.assertEqual(sorted(definition[2]), ["database_operations", "state_operations"]) def test_separate_database_and_state2(self): """ A complex SeparateDatabaseAndState operation: Multiple operations both for state and database. Verify the state dependencies within each list and that state ops don't affect the database. """ app_label = "test_separatedatabaseandstate2" project_state = self.set_up_test_model(app_label) # Create the operation database_operations = [ migrations.CreateModel( "ILovePonies", [("id", models.AutoField(primary_key=True))], options={"db_table": "iloveponies"}, ), migrations.CreateModel( "ILoveMorePonies", # We use IntegerField and not AutoField because # the model is going to be deleted immediately # and with an AutoField this fails on Oracle [("id", models.IntegerField(primary_key=True))], options={"db_table": "ilovemoreponies"}, ), migrations.DeleteModel("ILoveMorePonies"), migrations.CreateModel( "ILoveEvenMorePonies", [("id", models.AutoField(primary_key=True))], options={"db_table": "iloveevenmoreponies"}, ), ] state_operations = [ migrations.CreateModel( "SomethingElse", [("id", models.AutoField(primary_key=True))], options={"db_table": "somethingelse"}, ), migrations.DeleteModel("SomethingElse"), migrations.CreateModel( "SomethingCompletelyDifferent", [("id", models.AutoField(primary_key=True))], options={"db_table": "somethingcompletelydifferent"}, ), ] operation = migrations.SeparateDatabaseAndState( state_operations=state_operations, database_operations=database_operations, ) # Test the state alteration new_state = project_state.clone() operation.state_forwards(app_label, new_state) def assertModelsAndTables(after_db): # Tables and models exist, or don't, as they should: self.assertNotIn((app_label, "somethingelse"), new_state.models) self.assertEqual(len(new_state.models[app_label, "somethingcompletelydifferent"].fields), 1) self.assertNotIn((app_label, "iloveponiesonies"), new_state.models) self.assertNotIn((app_label, "ilovemoreponies"), new_state.models) self.assertNotIn((app_label, "iloveevenmoreponies"), new_state.models) self.assertTableNotExists("somethingelse") self.assertTableNotExists("somethingcompletelydifferent") self.assertTableNotExists("ilovemoreponies") if after_db: self.assertTableExists("iloveponies") self.assertTableExists("iloveevenmoreponies") else: self.assertTableNotExists("iloveponies") self.assertTableNotExists("iloveevenmoreponies") assertModelsAndTables(after_db=False) # Test the database alteration with connection.schema_editor() as editor: operation.database_forwards(app_label, editor, project_state, new_state) assertModelsAndTables(after_db=True) # And test reversal self.assertTrue(operation.reversible) with connection.schema_editor() as editor: operation.database_backwards(app_label, editor, new_state, project_state) assertModelsAndTables(after_db=False) class SwappableOperationTests(OperationTestBase): """ Key operations ignore swappable models (we don't want to replicate all of them here, as the functionality is in a common base class anyway) """ available_apps = ['migrations'] @override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel") def test_create_ignore_swapped(self): """ The CreateTable operation ignores swapped models. """ operation = migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.IntegerField(default=1)), ], options={ "swappable": "TEST_SWAP_MODEL", }, ) # Test the state alteration (it should still be there!) project_state = ProjectState() new_state = project_state.clone() operation.state_forwards("test_crigsw", new_state) self.assertEqual(new_state.models["test_crigsw", "pony"].name, "Pony") self.assertEqual(len(new_state.models["test_crigsw", "pony"].fields), 2) # Test the database alteration self.assertTableNotExists("test_crigsw_pony") with connection.schema_editor() as editor: operation.database_forwards("test_crigsw", editor, project_state, new_state) self.assertTableNotExists("test_crigsw_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_crigsw", editor, new_state, project_state) self.assertTableNotExists("test_crigsw_pony") @override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel") def test_delete_ignore_swapped(self): """ Tests the DeleteModel operation ignores swapped models. """ operation = migrations.DeleteModel("Pony") project_state, new_state = self.make_test_state("test_dligsw", operation) # Test the database alteration self.assertTableNotExists("test_dligsw_pony") with connection.schema_editor() as editor: operation.database_forwards("test_dligsw", editor, project_state, new_state) self.assertTableNotExists("test_dligsw_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_dligsw", editor, new_state, project_state) self.assertTableNotExists("test_dligsw_pony") @override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel") def test_add_field_ignore_swapped(self): """ Tests the AddField operation. """ # Test the state alteration operation = migrations.AddField( "Pony", "height", models.FloatField(null=True, default=5), ) project_state, new_state = self.make_test_state("test_adfligsw", operation) # Test the database alteration self.assertTableNotExists("test_adfligsw_pony") with connection.schema_editor() as editor: operation.database_forwards("test_adfligsw", editor, project_state, new_state) self.assertTableNotExists("test_adfligsw_pony") # And test reversal with connection.schema_editor() as editor: operation.database_backwards("test_adfligsw", editor, new_state, project_state) self.assertTableNotExists("test_adfligsw_pony") @override_settings(TEST_SWAP_MODEL='migrations.SomeFakeModel') def test_indexes_ignore_swapped(self): """ Add/RemoveIndex operations ignore swapped models. """ operation = migrations.AddIndex('Pony', models.Index(fields=['pink'], name='my_name_idx')) project_state, new_state = self.make_test_state('test_adinigsw', operation) with connection.schema_editor() as editor: # No database queries should be run for swapped models operation.database_forwards('test_adinigsw', editor, project_state, new_state) operation.database_backwards('test_adinigsw', editor, new_state, project_state) operation = migrations.RemoveIndex('Pony', models.Index(fields=['pink'], name='my_name_idx')) project_state, new_state = self.make_test_state("test_rminigsw", operation) with connection.schema_editor() as editor: operation.database_forwards('test_rminigsw', editor, project_state, new_state) operation.database_backwards('test_rminigsw', editor, new_state, project_state) class TestCreateModel(SimpleTestCase): def test_references_model_mixin(self): CreateModel('name', [], bases=(Mixin, models.Model)).references_model('other_model') class FieldOperationTests(SimpleTestCase): def test_references_model(self): operation = FieldOperation('MoDel', 'field', models.ForeignKey('Other', models.CASCADE)) # Model name match. self.assertIs(operation.references_model('mOdEl'), True) # Referenced field. self.assertIs(operation.references_model('oTher'), True) # Doesn't reference. self.assertIs(operation.references_model('Whatever'), False) def test_references_field_by_name(self): operation = FieldOperation('MoDel', 'field', models.BooleanField(default=False)) self.assertIs(operation.references_field('model', 'field'), True) def test_references_field_by_remote_field_model(self): operation = FieldOperation('Model', 'field', models.ForeignKey('Other', models.CASCADE)) self.assertIs(operation.references_field('Other', 'whatever'), True) self.assertIs(operation.references_field('Missing', 'whatever'), False) def test_references_field_by_from_fields(self): operation = FieldOperation( 'Model', 'field', models.fields.related.ForeignObject('Other', models.CASCADE, ['from'], ['to']) ) self.assertIs(operation.references_field('Model', 'from'), True) self.assertIs(operation.references_field('Model', 'to'), False) self.assertIs(operation.references_field('Other', 'from'), False) self.assertIs(operation.references_field('Model', 'to'), False) def test_references_field_by_to_fields(self): operation = FieldOperation('Model', 'field', models.ForeignKey('Other', models.CASCADE, to_field='field')) self.assertIs(operation.references_field('Other', 'field'), True) self.assertIs(operation.references_field('Other', 'whatever'), False) self.assertIs(operation.references_field('Missing', 'whatever'), False) def test_references_field_by_through(self): operation = FieldOperation('Model', 'field', models.ManyToManyField('Other', through='Through')) self.assertIs(operation.references_field('Other', 'whatever'), True) self.assertIs(operation.references_field('Through', 'whatever'), True) self.assertIs(operation.references_field('Missing', 'whatever'), False) def test_reference_field_by_through_fields(self): operation = FieldOperation( 'Model', 'field', models.ManyToManyField('Other', through='Through', through_fields=('first', 'second')) ) self.assertIs(operation.references_field('Other', 'whatever'), True) self.assertIs(operation.references_field('Through', 'whatever'), False) self.assertIs(operation.references_field('Through', 'first'), True) self.assertIs(operation.references_field('Through', 'second'), True)
969def4b56e37cee98f92459371ca3c82ee5c50d57c627368e5ecbb156e62875
import os import shutil import tempfile from contextlib import contextmanager from importlib import import_module from django.apps import apps from django.db import connection, connections, migrations, models from django.db.migrations.migration import Migration from django.db.migrations.recorder import MigrationRecorder from django.db.migrations.state import ProjectState from django.test import TransactionTestCase from django.test.utils import extend_sys_path from django.utils.module_loading import module_dir class MigrationTestBase(TransactionTestCase): """ Contains an extended set of asserts for testing migrations and schema operations. """ available_apps = ["migrations"] databases = {'default', 'other'} def tearDown(self): # Reset applied-migrations state. for db in self.databases: recorder = MigrationRecorder(connections[db]) recorder.migration_qs.filter(app='migrations').delete() def get_table_description(self, table, using='default'): with connections[using].cursor() as cursor: return connections[using].introspection.get_table_description(cursor, table) def assertTableExists(self, table, using='default'): with connections[using].cursor() as cursor: self.assertIn(table, connections[using].introspection.table_names(cursor)) def assertTableNotExists(self, table, using='default'): with connections[using].cursor() as cursor: self.assertNotIn(table, connections[using].introspection.table_names(cursor)) def assertColumnExists(self, table, column, using='default'): self.assertIn(column, [c.name for c in self.get_table_description(table, using=using)]) def assertColumnNotExists(self, table, column, using='default'): self.assertNotIn(column, [c.name for c in self.get_table_description(table, using=using)]) def _get_column_allows_null(self, table, column, using): return [c.null_ok for c in self.get_table_description(table, using=using) if c.name == column][0] def assertColumnNull(self, table, column, using='default'): self.assertTrue(self._get_column_allows_null(table, column, using)) def assertColumnNotNull(self, table, column, using='default'): self.assertFalse(self._get_column_allows_null(table, column, using)) def assertIndexExists(self, table, columns, value=True, using='default', index_type=None): with connections[using].cursor() as cursor: self.assertEqual( value, any( c["index"] for c in connections[using].introspection.get_constraints(cursor, table).values() if c['columns'] == list(columns) and (index_type is None or c['type'] == index_type) ), ) def assertIndexNotExists(self, table, columns): return self.assertIndexExists(table, columns, False) def assertConstraintExists(self, table, name, value=True, using='default'): with connections[using].cursor() as cursor: constraints = connections[using].introspection.get_constraints(cursor, table).items() self.assertEqual( value, any(c['check'] for n, c in constraints if n == name), ) def assertConstraintNotExists(self, table, name): return self.assertConstraintExists(table, name, False) def assertFKExists(self, table, columns, to, value=True, using='default'): with connections[using].cursor() as cursor: self.assertEqual( value, any( c["foreign_key"] == to for c in connections[using].introspection.get_constraints(cursor, table).values() if c['columns'] == list(columns) ), ) def assertFKNotExists(self, table, columns, to): return self.assertFKExists(table, columns, to, False) @contextmanager def temporary_migration_module(self, app_label='migrations', module=None): """ Allows testing management commands in a temporary migrations module. Wrap all invocations to makemigrations and squashmigrations with this context manager in order to avoid creating migration files in your source tree inadvertently. Takes the application label that will be passed to makemigrations or squashmigrations and the Python path to a migrations module. The migrations module is used as a template for creating the temporary migrations module. If it isn't provided, the application's migrations module is used, if it exists. Returns the filesystem path to the temporary migrations module. """ with tempfile.TemporaryDirectory() as temp_dir: target_dir = tempfile.mkdtemp(dir=temp_dir) with open(os.path.join(target_dir, '__init__.py'), 'w'): pass target_migrations_dir = os.path.join(target_dir, 'migrations') if module is None: module = apps.get_app_config(app_label).name + '.migrations' try: source_migrations_dir = module_dir(import_module(module)) except (ImportError, ValueError): pass else: shutil.copytree(source_migrations_dir, target_migrations_dir) with extend_sys_path(temp_dir): new_module = os.path.basename(target_dir) + '.migrations' with self.settings(MIGRATION_MODULES={app_label: new_module}): yield target_migrations_dir class OperationTestBase(MigrationTestBase): """Common functions to help test operations.""" @classmethod def setUpClass(cls): super().setUpClass() cls._initial_table_names = frozenset(connection.introspection.table_names()) def tearDown(self): self.cleanup_test_tables() super().tearDown() def cleanup_test_tables(self): table_names = frozenset(connection.introspection.table_names()) - self._initial_table_names with connection.schema_editor() as editor: with connection.constraint_checks_disabled(): for table_name in table_names: editor.execute(editor.sql_delete_table % { 'table': editor.quote_name(table_name), }) def apply_operations(self, app_label, project_state, operations, atomic=True): migration = Migration('name', app_label) migration.operations = operations with connection.schema_editor(atomic=atomic) as editor: return migration.apply(project_state, editor) def unapply_operations(self, app_label, project_state, operations, atomic=True): migration = Migration('name', app_label) migration.operations = operations with connection.schema_editor(atomic=atomic) as editor: return migration.unapply(project_state, editor) def make_test_state(self, app_label, operation, **kwargs): """ Makes a test state using set_up_test_model and returns the original state and the state after the migration is applied. """ project_state = self.set_up_test_model(app_label, **kwargs) new_state = project_state.clone() operation.state_forwards(app_label, new_state) return project_state, new_state def set_up_test_model( self, app_label, second_model=False, third_model=False, index=False, multicol_index=False, related_model=False, mti_model=False, proxy_model=False, manager_model=False, unique_together=False, options=False, db_table=None, index_together=False, constraints=None, ): """Creates a test model state and database table.""" # Make the "current" state. model_options = { 'swappable': 'TEST_SWAP_MODEL', 'index_together': [['weight', 'pink']] if index_together else [], 'unique_together': [['pink', 'weight']] if unique_together else [], } if options: model_options['permissions'] = [('can_groom', 'Can groom')] if db_table: model_options['db_table'] = db_table operations = [migrations.CreateModel( 'Pony', [ ('id', models.AutoField(primary_key=True)), ('pink', models.IntegerField(default=3)), ('weight', models.FloatField()), ], options=model_options, )] if index: operations.append(migrations.AddIndex( 'Pony', models.Index(fields=['pink'], name='pony_pink_idx'), )) if multicol_index: operations.append(migrations.AddIndex( 'Pony', models.Index(fields=['pink', 'weight'], name='pony_test_idx'), )) if constraints: for constraint in constraints: operations.append(migrations.AddConstraint('Pony', constraint)) if second_model: operations.append(migrations.CreateModel( 'Stable', [ ('id', models.AutoField(primary_key=True)), ] )) if third_model: operations.append(migrations.CreateModel( 'Van', [ ('id', models.AutoField(primary_key=True)), ] )) if related_model: operations.append(migrations.CreateModel( 'Rider', [ ('id', models.AutoField(primary_key=True)), ('pony', models.ForeignKey('Pony', models.CASCADE)), ('friend', models.ForeignKey('self', models.CASCADE)) ], )) if mti_model: operations.append(migrations.CreateModel( 'ShetlandPony', fields=[ ('pony_ptr', models.OneToOneField( 'Pony', models.CASCADE, auto_created=True, parent_link=True, primary_key=True, to_field='id', serialize=False, )), ('cuteness', models.IntegerField(default=1)), ], bases=['%s.Pony' % app_label], )) if proxy_model: operations.append(migrations.CreateModel( 'ProxyPony', fields=[], options={'proxy': True}, bases=['%s.Pony' % app_label], )) if manager_model: from .models import FoodManager, FoodQuerySet operations.append(migrations.CreateModel( 'Food', fields=[ ('id', models.AutoField(primary_key=True)), ], managers=[ ('food_qs', FoodQuerySet.as_manager()), ('food_mgr', FoodManager('a', 'b')), ('food_mgr_kwargs', FoodManager('x', 'y', 3, 4)), ] )) return self.apply_operations(app_label, ProjectState(), operations)
ee5d644238a6134079cc4c02c432e05337ef77f6dbf48e202f8162103bfbfd24
from django.apps.registry import Apps from django.contrib.contenttypes.fields import GenericForeignKey from django.db import models from django.db.migrations.exceptions import InvalidBasesError from django.db.migrations.operations import ( AddField, AlterField, DeleteModel, RemoveField, ) from django.db.migrations.state import ( ModelState, ProjectState, get_related_models_recursive, ) from django.test import SimpleTestCase, override_settings from django.test.utils import isolate_apps from .models import ( FoodManager, FoodQuerySet, ModelWithCustomBase, NoMigrationFoodManager, UnicodeModel, ) class StateTests(SimpleTestCase): """ Tests state construction, rendering and modification by operations. """ def test_create(self): """ Tests making a ProjectState from an Apps """ new_apps = Apps(["migrations"]) class Author(models.Model): name = models.CharField(max_length=255) bio = models.TextField() age = models.IntegerField(blank=True, null=True) class Meta: app_label = "migrations" apps = new_apps unique_together = ["name", "bio"] index_together = ["bio", "age"] class AuthorProxy(Author): class Meta: app_label = "migrations" apps = new_apps proxy = True ordering = ["name"] class SubAuthor(Author): width = models.FloatField(null=True) class Meta: app_label = "migrations" apps = new_apps class Book(models.Model): title = models.CharField(max_length=1000) author = models.ForeignKey(Author, models.CASCADE) contributors = models.ManyToManyField(Author) class Meta: app_label = "migrations" apps = new_apps verbose_name = "tome" db_table = "test_tome" indexes = [models.Index(fields=['title'])] class Food(models.Model): food_mgr = FoodManager('a', 'b') food_qs = FoodQuerySet.as_manager() food_no_mgr = NoMigrationFoodManager('x', 'y') class Meta: app_label = "migrations" apps = new_apps class FoodNoManagers(models.Model): class Meta: app_label = "migrations" apps = new_apps class FoodNoDefaultManager(models.Model): food_no_mgr = NoMigrationFoodManager('x', 'y') food_mgr = FoodManager('a', 'b') food_qs = FoodQuerySet.as_manager() class Meta: app_label = "migrations" apps = new_apps mgr1 = FoodManager('a', 'b') mgr2 = FoodManager('x', 'y', c=3, d=4) class FoodOrderedManagers(models.Model): # The managers on this model should be ordered by their creation # counter and not by the order in model body food_no_mgr = NoMigrationFoodManager('x', 'y') food_mgr2 = mgr2 food_mgr1 = mgr1 class Meta: app_label = "migrations" apps = new_apps project_state = ProjectState.from_apps(new_apps) author_state = project_state.models['migrations', 'author'] author_proxy_state = project_state.models['migrations', 'authorproxy'] sub_author_state = project_state.models['migrations', 'subauthor'] book_state = project_state.models['migrations', 'book'] food_state = project_state.models['migrations', 'food'] food_no_managers_state = project_state.models['migrations', 'foodnomanagers'] food_no_default_manager_state = project_state.models['migrations', 'foodnodefaultmanager'] food_order_manager_state = project_state.models['migrations', 'foodorderedmanagers'] book_index = models.Index(fields=['title']) book_index.set_name_with_model(Book) self.assertEqual(author_state.app_label, "migrations") self.assertEqual(author_state.name, "Author") self.assertEqual([x for x, y in author_state.fields], ["id", "name", "bio", "age"]) self.assertEqual(author_state.fields[1][1].max_length, 255) self.assertIs(author_state.fields[2][1].null, False) self.assertIs(author_state.fields[3][1].null, True) self.assertEqual( author_state.options, { "unique_together": {("name", "bio")}, "index_together": {("bio", "age")}, "indexes": [], "constraints": [], } ) self.assertEqual(author_state.bases, (models.Model,)) self.assertEqual(book_state.app_label, "migrations") self.assertEqual(book_state.name, "Book") self.assertEqual([x for x, y in book_state.fields], ["id", "title", "author", "contributors"]) self.assertEqual(book_state.fields[1][1].max_length, 1000) self.assertIs(book_state.fields[2][1].null, False) self.assertEqual(book_state.fields[3][1].__class__.__name__, "ManyToManyField") self.assertEqual( book_state.options, {"verbose_name": "tome", "db_table": "test_tome", "indexes": [book_index], "constraints": []}, ) self.assertEqual(book_state.bases, (models.Model,)) self.assertEqual(author_proxy_state.app_label, "migrations") self.assertEqual(author_proxy_state.name, "AuthorProxy") self.assertEqual(author_proxy_state.fields, []) self.assertEqual( author_proxy_state.options, {"proxy": True, "ordering": ["name"], "indexes": [], "constraints": []}, ) self.assertEqual(author_proxy_state.bases, ("migrations.author",)) self.assertEqual(sub_author_state.app_label, "migrations") self.assertEqual(sub_author_state.name, "SubAuthor") self.assertEqual(len(sub_author_state.fields), 2) self.assertEqual(sub_author_state.bases, ("migrations.author",)) # The default manager is used in migrations self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr']) self.assertTrue(all(isinstance(name, str) for name, mgr in food_state.managers)) self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2)) # No explicit managers defined. Migrations will fall back to the default self.assertEqual(food_no_managers_state.managers, []) # food_mgr is used in migration but isn't the default mgr, hence add the # default self.assertEqual([name for name, mgr in food_no_default_manager_state.managers], ['food_no_mgr', 'food_mgr']) self.assertTrue(all(isinstance(name, str) for name, mgr in food_no_default_manager_state.managers)) self.assertEqual(food_no_default_manager_state.managers[0][1].__class__, models.Manager) self.assertIsInstance(food_no_default_manager_state.managers[1][1], FoodManager) self.assertEqual([name for name, mgr in food_order_manager_state.managers], ['food_mgr1', 'food_mgr2']) self.assertTrue(all(isinstance(name, str) for name, mgr in food_order_manager_state.managers)) self.assertEqual([mgr.args for name, mgr in food_order_manager_state.managers], [('a', 'b', 1, 2), ('x', 'y', 3, 4)]) def test_custom_default_manager_added_to_the_model_state(self): """ When the default manager of the model is a custom manager, it needs to be added to the model state. """ new_apps = Apps(['migrations']) custom_manager = models.Manager() class Author(models.Model): objects = models.TextField() authors = custom_manager class Meta: app_label = 'migrations' apps = new_apps project_state = ProjectState.from_apps(new_apps) author_state = project_state.models['migrations', 'author'] self.assertEqual(author_state.managers, [('authors', custom_manager)]) def test_custom_default_manager_named_objects_with_false_migration_flag(self): """ When a manager is added with a name of 'objects' but it does not have `use_in_migrations = True`, no migration should be added to the model state (#26643). """ new_apps = Apps(['migrations']) class Author(models.Model): objects = models.Manager() class Meta: app_label = 'migrations' apps = new_apps project_state = ProjectState.from_apps(new_apps) author_state = project_state.models['migrations', 'author'] self.assertEqual(author_state.managers, []) def test_no_duplicate_managers(self): """ When a manager is added with `use_in_migrations = True` and a parent model had a manager with the same name and `use_in_migrations = True`, the parent's manager shouldn't appear in the model state (#26881). """ new_apps = Apps(['migrations']) class PersonManager(models.Manager): use_in_migrations = True class Person(models.Model): objects = PersonManager() class Meta: abstract = True class BossManager(PersonManager): use_in_migrations = True class Boss(Person): objects = BossManager() class Meta: app_label = 'migrations' apps = new_apps project_state = ProjectState.from_apps(new_apps) boss_state = project_state.models['migrations', 'boss'] self.assertEqual(boss_state.managers, [('objects', Boss.objects)]) def test_custom_default_manager(self): new_apps = Apps(['migrations']) class Author(models.Model): manager1 = models.Manager() manager2 = models.Manager() class Meta: app_label = 'migrations' apps = new_apps default_manager_name = 'manager2' project_state = ProjectState.from_apps(new_apps) author_state = project_state.models['migrations', 'author'] self.assertEqual(author_state.options['default_manager_name'], 'manager2') self.assertEqual(author_state.managers, [('manager2', Author.manager1)]) def test_custom_base_manager(self): new_apps = Apps(['migrations']) class Author(models.Model): manager1 = models.Manager() manager2 = models.Manager() class Meta: app_label = 'migrations' apps = new_apps base_manager_name = 'manager2' class Author2(models.Model): manager1 = models.Manager() manager2 = models.Manager() class Meta: app_label = 'migrations' apps = new_apps base_manager_name = 'manager1' project_state = ProjectState.from_apps(new_apps) author_state = project_state.models['migrations', 'author'] self.assertEqual(author_state.options['base_manager_name'], 'manager2') self.assertEqual(author_state.managers, [ ('manager1', Author.manager1), ('manager2', Author.manager2), ]) author2_state = project_state.models['migrations', 'author2'] self.assertEqual(author2_state.options['base_manager_name'], 'manager1') self.assertEqual(author2_state.managers, [ ('manager1', Author2.manager1), ]) def test_apps_bulk_update(self): """ StateApps.bulk_update() should update apps.ready to False and reset the value afterwards. """ project_state = ProjectState() apps = project_state.apps with apps.bulk_update(): self.assertFalse(apps.ready) self.assertTrue(apps.ready) with self.assertRaises(ValueError): with apps.bulk_update(): self.assertFalse(apps.ready) raise ValueError() self.assertTrue(apps.ready) def test_render(self): """ Tests rendering a ProjectState into an Apps. """ project_state = ProjectState() project_state.add_model(ModelState( app_label="migrations", name="Tag", fields=[ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ("hidden", models.BooleanField()), ], )) project_state.add_model(ModelState( app_label="migrations", name="SubTag", fields=[ ('tag_ptr', models.OneToOneField( 'migrations.Tag', models.CASCADE, auto_created=True, parent_link=True, primary_key=True, to_field='id', serialize=False, )), ("awesome", models.BooleanField()), ], bases=("migrations.Tag",), )) base_mgr = models.Manager() mgr1 = FoodManager('a', 'b') mgr2 = FoodManager('x', 'y', c=3, d=4) project_state.add_model(ModelState( app_label="migrations", name="Food", fields=[ ("id", models.AutoField(primary_key=True)), ], managers=[ # The ordering we really want is objects, mgr1, mgr2 ('default', base_mgr), ('food_mgr2', mgr2), ('food_mgr1', mgr1), ] )) new_apps = project_state.apps self.assertEqual(new_apps.get_model("migrations", "Tag")._meta.get_field("name").max_length, 100) self.assertIs(new_apps.get_model("migrations", "Tag")._meta.get_field("hidden").null, False) self.assertEqual(len(new_apps.get_model("migrations", "SubTag")._meta.local_fields), 2) Food = new_apps.get_model("migrations", "Food") self.assertEqual([mgr.name for mgr in Food._meta.managers], ['default', 'food_mgr1', 'food_mgr2']) self.assertTrue(all(isinstance(mgr.name, str) for mgr in Food._meta.managers)) self.assertEqual([mgr.__class__ for mgr in Food._meta.managers], [models.Manager, FoodManager, FoodManager]) def test_render_model_inheritance(self): class Book(models.Model): title = models.CharField(max_length=1000) class Meta: app_label = "migrations" apps = Apps() class Novel(Book): class Meta: app_label = "migrations" apps = Apps() # First, test rendering individually apps = Apps(["migrations"]) # We shouldn't be able to render yet ms = ModelState.from_model(Novel) with self.assertRaises(InvalidBasesError): ms.render(apps) # Once the parent model is in the app registry, it should be fine ModelState.from_model(Book).render(apps) ModelState.from_model(Novel).render(apps) def test_render_model_with_multiple_inheritance(self): class Foo(models.Model): class Meta: app_label = "migrations" apps = Apps() class Bar(models.Model): class Meta: app_label = "migrations" apps = Apps() class FooBar(Foo, Bar): class Meta: app_label = "migrations" apps = Apps() class AbstractSubFooBar(FooBar): class Meta: abstract = True apps = Apps() class SubFooBar(AbstractSubFooBar): class Meta: app_label = "migrations" apps = Apps() apps = Apps(["migrations"]) # We shouldn't be able to render yet ms = ModelState.from_model(FooBar) with self.assertRaises(InvalidBasesError): ms.render(apps) # Once the parent models are in the app registry, it should be fine ModelState.from_model(Foo).render(apps) self.assertSequenceEqual(ModelState.from_model(Foo).bases, [models.Model]) ModelState.from_model(Bar).render(apps) self.assertSequenceEqual(ModelState.from_model(Bar).bases, [models.Model]) ModelState.from_model(FooBar).render(apps) self.assertSequenceEqual(ModelState.from_model(FooBar).bases, ['migrations.foo', 'migrations.bar']) ModelState.from_model(SubFooBar).render(apps) self.assertSequenceEqual(ModelState.from_model(SubFooBar).bases, ['migrations.foobar']) def test_render_project_dependencies(self): """ The ProjectState render method correctly renders models to account for inter-model base dependencies. """ new_apps = Apps() class A(models.Model): class Meta: app_label = "migrations" apps = new_apps class B(A): class Meta: app_label = "migrations" apps = new_apps class C(B): class Meta: app_label = "migrations" apps = new_apps class D(A): class Meta: app_label = "migrations" apps = new_apps class E(B): class Meta: app_label = "migrations" apps = new_apps proxy = True class F(D): class Meta: app_label = "migrations" apps = new_apps proxy = True # Make a ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) project_state.add_model(ModelState.from_model(C)) project_state.add_model(ModelState.from_model(D)) project_state.add_model(ModelState.from_model(E)) project_state.add_model(ModelState.from_model(F)) final_apps = project_state.apps self.assertEqual(len(final_apps.get_models()), 6) # Now make an invalid ProjectState and make sure it fails project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) project_state.add_model(ModelState.from_model(C)) project_state.add_model(ModelState.from_model(F)) with self.assertRaises(InvalidBasesError): project_state.apps def test_render_unique_app_labels(self): """ The ProjectState render method doesn't raise an ImproperlyConfigured exception about unique labels if two dotted app names have the same last part. """ class A(models.Model): class Meta: app_label = "django.contrib.auth" class B(models.Model): class Meta: app_label = "vendor.auth" # Make a ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) self.assertEqual(len(project_state.apps.get_models()), 2) def test_add_relations(self): """ #24573 - Adding relations to existing models should reload the referenced models too. """ new_apps = Apps() class A(models.Model): class Meta: app_label = 'something' apps = new_apps class B(A): class Meta: app_label = 'something' apps = new_apps class C(models.Model): class Meta: app_label = 'something' apps = new_apps project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) project_state.add_model(ModelState.from_model(C)) project_state.apps # We need to work with rendered models old_state = project_state.clone() model_a_old = old_state.apps.get_model('something', 'A') model_b_old = old_state.apps.get_model('something', 'B') model_c_old = old_state.apps.get_model('something', 'C') # The relations between the old models are correct self.assertIs(model_a_old._meta.get_field('b').related_model, model_b_old) self.assertIs(model_b_old._meta.get_field('a_ptr').related_model, model_a_old) operation = AddField('c', 'to_a', models.OneToOneField( 'something.A', models.CASCADE, related_name='from_c', )) operation.state_forwards('something', project_state) model_a_new = project_state.apps.get_model('something', 'A') model_b_new = project_state.apps.get_model('something', 'B') model_c_new = project_state.apps.get_model('something', 'C') # All models have changed self.assertIsNot(model_a_old, model_a_new) self.assertIsNot(model_b_old, model_b_new) self.assertIsNot(model_c_old, model_c_new) # The relations between the old models still hold self.assertIs(model_a_old._meta.get_field('b').related_model, model_b_old) self.assertIs(model_b_old._meta.get_field('a_ptr').related_model, model_a_old) # The relations between the new models correct self.assertIs(model_a_new._meta.get_field('b').related_model, model_b_new) self.assertIs(model_b_new._meta.get_field('a_ptr').related_model, model_a_new) self.assertIs(model_a_new._meta.get_field('from_c').related_model, model_c_new) self.assertIs(model_c_new._meta.get_field('to_a').related_model, model_a_new) def test_remove_relations(self): """ #24225 - Relations between models are updated while remaining the relations and references for models of an old state. """ new_apps = Apps() class A(models.Model): class Meta: app_label = "something" apps = new_apps class B(models.Model): to_a = models.ForeignKey(A, models.CASCADE) class Meta: app_label = "something" apps = new_apps def get_model_a(state): return [mod for mod in state.apps.get_models() if mod._meta.model_name == 'a'][0] project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1) old_state = project_state.clone() operation = RemoveField("b", "to_a") operation.state_forwards("something", project_state) # Model from old_state still has the relation model_a_old = get_model_a(old_state) model_a_new = get_model_a(project_state) self.assertIsNot(model_a_old, model_a_new) self.assertEqual(len(model_a_old._meta.related_objects), 1) self.assertEqual(len(model_a_new._meta.related_objects), 0) # Same test for deleted model project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) old_state = project_state.clone() operation = DeleteModel("b") operation.state_forwards("something", project_state) model_a_old = get_model_a(old_state) model_a_new = get_model_a(project_state) self.assertIsNot(model_a_old, model_a_new) self.assertEqual(len(model_a_old._meta.related_objects), 1) self.assertEqual(len(model_a_new._meta.related_objects), 0) def test_self_relation(self): """ #24513 - Modifying an object pointing to itself would cause it to be rendered twice and thus breaking its related M2M through objects. """ class A(models.Model): to_a = models.ManyToManyField('something.A', symmetrical=False) class Meta: app_label = "something" def get_model_a(state): return [mod for mod in state.apps.get_models() if mod._meta.model_name == 'a'][0] project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1) old_state = project_state.clone() operation = AlterField( model_name="a", name="to_a", field=models.ManyToManyField("something.A", symmetrical=False, blank=True) ) # At this point the model would be rendered twice causing its related # M2M through objects to point to an old copy and thus breaking their # attribute lookup. operation.state_forwards("something", project_state) model_a_old = get_model_a(old_state) model_a_new = get_model_a(project_state) self.assertIsNot(model_a_old, model_a_new) # The old model's _meta is still consistent field_to_a_old = model_a_old._meta.get_field("to_a") self.assertEqual(field_to_a_old.m2m_field_name(), "from_a") self.assertEqual(field_to_a_old.m2m_reverse_field_name(), "to_a") self.assertIs(field_to_a_old.related_model, model_a_old) self.assertIs(field_to_a_old.remote_field.through._meta.get_field('to_a').related_model, model_a_old) self.assertIs(field_to_a_old.remote_field.through._meta.get_field('from_a').related_model, model_a_old) # The new model's _meta is still consistent field_to_a_new = model_a_new._meta.get_field("to_a") self.assertEqual(field_to_a_new.m2m_field_name(), "from_a") self.assertEqual(field_to_a_new.m2m_reverse_field_name(), "to_a") self.assertIs(field_to_a_new.related_model, model_a_new) self.assertIs(field_to_a_new.remote_field.through._meta.get_field('to_a').related_model, model_a_new) self.assertIs(field_to_a_new.remote_field.through._meta.get_field('from_a').related_model, model_a_new) def test_equality(self): """ == and != are implemented correctly. """ # Test two things that should be equal project_state = ProjectState() project_state.add_model(ModelState( "migrations", "Tag", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ("hidden", models.BooleanField()), ], {}, None, )) project_state.apps # Fill the apps cached property other_state = project_state.clone() self.assertEqual(project_state, project_state) self.assertEqual(project_state, other_state) self.assertIs(project_state != project_state, False) self.assertIs(project_state != other_state, False) self.assertNotEqual(project_state.apps, other_state.apps) # Make a very small change (max_len 99) and see if that affects it project_state = ProjectState() project_state.add_model(ModelState( "migrations", "Tag", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=99)), ("hidden", models.BooleanField()), ], {}, None, )) self.assertNotEqual(project_state, other_state) self.assertIs(project_state == other_state, False) def test_dangling_references_throw_error(self): new_apps = Apps() class Author(models.Model): name = models.TextField() class Meta: app_label = "migrations" apps = new_apps class Publisher(models.Model): name = models.TextField() class Meta: app_label = "migrations" apps = new_apps class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE) publisher = models.ForeignKey(Publisher, models.CASCADE) class Meta: app_label = "migrations" apps = new_apps class Magazine(models.Model): authors = models.ManyToManyField(Author) class Meta: app_label = "migrations" apps = new_apps # Make a valid ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(Author)) project_state.add_model(ModelState.from_model(Publisher)) project_state.add_model(ModelState.from_model(Book)) project_state.add_model(ModelState.from_model(Magazine)) self.assertEqual(len(project_state.apps.get_models()), 4) # now make an invalid one with a ForeignKey project_state = ProjectState() project_state.add_model(ModelState.from_model(Book)) msg = ( "The field migrations.Book.author was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n" "The field migrations.Book.publisher was declared with a lazy reference " "to 'migrations.publisher', but app 'migrations' doesn't provide model 'publisher'." ) with self.assertRaisesMessage(ValueError, msg): project_state.apps # And another with ManyToManyField. project_state = ProjectState() project_state.add_model(ModelState.from_model(Magazine)) msg = ( "The field migrations.Magazine.authors was declared with a lazy reference " "to 'migrations.author\', but app 'migrations' doesn't provide model 'author'.\n" "The field migrations.Magazine_authors.author was declared with a lazy reference " "to \'migrations.author\', but app 'migrations' doesn't provide model 'author'." ) with self.assertRaisesMessage(ValueError, msg): project_state.apps # And now with multiple models and multiple fields. project_state.add_model(ModelState.from_model(Book)) msg = ( "The field migrations.Book.author was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n" "The field migrations.Book.publisher was declared with a lazy reference " "to 'migrations.publisher', but app 'migrations' doesn't provide model 'publisher'.\n" "The field migrations.Magazine.authors was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n" "The field migrations.Magazine_authors.author was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model 'author'." ) with self.assertRaisesMessage(ValueError, msg): project_state.apps def test_real_apps(self): """ Including real apps can resolve dangling FK errors. This test relies on the fact that contenttypes is always loaded. """ new_apps = Apps() class TestModel(models.Model): ct = models.ForeignKey("contenttypes.ContentType", models.CASCADE) class Meta: app_label = "migrations" apps = new_apps # If we just stick it into an empty state it should fail project_state = ProjectState() project_state.add_model(ModelState.from_model(TestModel)) with self.assertRaises(ValueError): project_state.apps # If we include the real app it should succeed project_state = ProjectState(real_apps=["contenttypes"]) project_state.add_model(ModelState.from_model(TestModel)) rendered_state = project_state.apps self.assertEqual( len([x for x in rendered_state.get_models() if x._meta.app_label == "migrations"]), 1, ) def test_ignore_order_wrt(self): """ Makes sure ProjectState doesn't include OrderWrt fields when making from existing models. """ new_apps = Apps() class Author(models.Model): name = models.TextField() class Meta: app_label = "migrations" apps = new_apps class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE) class Meta: app_label = "migrations" apps = new_apps order_with_respect_to = "author" # Make a valid ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(Author)) project_state.add_model(ModelState.from_model(Book)) self.assertEqual( [name for name, field in project_state.models["migrations", "book"].fields], ["id", "author"], ) def test_manager_refer_correct_model_version(self): """ #24147 - Managers refer to the correct version of a historical model """ project_state = ProjectState() project_state.add_model(ModelState( app_label="migrations", name="Tag", fields=[ ("id", models.AutoField(primary_key=True)), ("hidden", models.BooleanField()), ], managers=[ ('food_mgr', FoodManager('a', 'b')), ('food_qs', FoodQuerySet.as_manager()), ] )) old_model = project_state.apps.get_model('migrations', 'tag') new_state = project_state.clone() operation = RemoveField("tag", "hidden") operation.state_forwards("migrations", new_state) new_model = new_state.apps.get_model('migrations', 'tag') self.assertIsNot(old_model, new_model) self.assertIs(old_model, old_model.food_mgr.model) self.assertIs(old_model, old_model.food_qs.model) self.assertIs(new_model, new_model.food_mgr.model) self.assertIs(new_model, new_model.food_qs.model) self.assertIsNot(old_model.food_mgr, new_model.food_mgr) self.assertIsNot(old_model.food_qs, new_model.food_qs) self.assertIsNot(old_model.food_mgr.model, new_model.food_mgr.model) self.assertIsNot(old_model.food_qs.model, new_model.food_qs.model) def test_choices_iterator(self): """ #24483 - ProjectState.from_apps should not destructively consume Field.choices iterators. """ new_apps = Apps(["migrations"]) choices = [('a', 'A'), ('b', 'B')] class Author(models.Model): name = models.CharField(max_length=255) choice = models.CharField(max_length=255, choices=iter(choices)) class Meta: app_label = "migrations" apps = new_apps ProjectState.from_apps(new_apps) choices_field = Author._meta.get_field('choice') self.assertEqual(list(choices_field.choices), choices) class ModelStateTests(SimpleTestCase): def test_custom_model_base(self): state = ModelState.from_model(ModelWithCustomBase) self.assertEqual(state.bases, (models.Model,)) def test_bound_field_sanity_check(self): field = models.CharField(max_length=1) field.model = models.Model with self.assertRaisesMessage(ValueError, 'ModelState.fields cannot be bound to a model - "field" is.'): ModelState('app', 'Model', [('field', field)]) def test_sanity_check_to(self): field = models.ForeignKey(UnicodeModel, models.CASCADE) with self.assertRaisesMessage( ValueError, 'ModelState.fields cannot refer to a model class - "field.to" does. ' 'Use a string reference instead.' ): ModelState('app', 'Model', [('field', field)]) def test_sanity_check_through(self): field = models.ManyToManyField('UnicodeModel') field.remote_field.through = UnicodeModel with self.assertRaisesMessage( ValueError, 'ModelState.fields cannot refer to a model class - "field.through" does. ' 'Use a string reference instead.' ): ModelState('app', 'Model', [('field', field)]) def test_sanity_index_name(self): field = models.IntegerField() options = {'indexes': [models.Index(fields=['field'])]} msg = "Indexes passed to ModelState require a name attribute. <Index: fields='field'> doesn't have one." with self.assertRaisesMessage(ValueError, msg): ModelState('app', 'Model', [('field', field)], options=options) def test_fields_immutability(self): """ Rendering a model state doesn't alter its internal fields. """ apps = Apps() field = models.CharField(max_length=1) state = ModelState('app', 'Model', [('name', field)]) Model = state.render(apps) self.assertNotEqual(Model._meta.get_field('name'), field) def test_repr(self): field = models.CharField(max_length=1) state = ModelState('app', 'Model', [('name', field)], bases=['app.A', 'app.B', 'app.C']) self.assertEqual(repr(state), "<ModelState: 'app.Model'>") project_state = ProjectState() project_state.add_model(state) with self.assertRaisesMessage(InvalidBasesError, "Cannot resolve bases for [<ModelState: 'app.Model'>]"): project_state.apps @override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel') def test_create_swappable(self): """ Tests making a ProjectState from an Apps with a swappable model """ new_apps = Apps(['migrations']) class Author(models.Model): name = models.CharField(max_length=255) bio = models.TextField() age = models.IntegerField(blank=True, null=True) class Meta: app_label = 'migrations' apps = new_apps swappable = 'TEST_SWAPPABLE_MODEL' author_state = ModelState.from_model(Author) self.assertEqual(author_state.app_label, 'migrations') self.assertEqual(author_state.name, 'Author') self.assertEqual([x for x, y in author_state.fields], ['id', 'name', 'bio', 'age']) self.assertEqual(author_state.fields[1][1].max_length, 255) self.assertIs(author_state.fields[2][1].null, False) self.assertIs(author_state.fields[3][1].null, True) self.assertEqual(author_state.options, {'swappable': 'TEST_SWAPPABLE_MODEL', 'indexes': [], "constraints": []}) self.assertEqual(author_state.bases, (models.Model,)) self.assertEqual(author_state.managers, []) @override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel') def test_create_swappable_from_abstract(self): """ A swappable model inheriting from a hierarchy: concrete -> abstract -> concrete. """ new_apps = Apps(['migrations']) class SearchableLocation(models.Model): keywords = models.CharField(max_length=256) class Meta: app_label = 'migrations' apps = new_apps class Station(SearchableLocation): name = models.CharField(max_length=128) class Meta: abstract = True class BusStation(Station): bus_routes = models.CharField(max_length=128) inbound = models.BooleanField(default=False) class Meta(Station.Meta): app_label = 'migrations' apps = new_apps swappable = 'TEST_SWAPPABLE_MODEL' station_state = ModelState.from_model(BusStation) self.assertEqual(station_state.app_label, 'migrations') self.assertEqual(station_state.name, 'BusStation') self.assertEqual( [x for x, y in station_state.fields], ['searchablelocation_ptr', 'name', 'bus_routes', 'inbound'] ) self.assertEqual(station_state.fields[1][1].max_length, 128) self.assertIs(station_state.fields[2][1].null, False) self.assertEqual( station_state.options, {'abstract': False, 'swappable': 'TEST_SWAPPABLE_MODEL', 'indexes': [], 'constraints': []} ) self.assertEqual(station_state.bases, ('migrations.searchablelocation',)) self.assertEqual(station_state.managers, []) @override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel') def test_custom_manager_swappable(self): """ Tests making a ProjectState from unused models with custom managers """ new_apps = Apps(['migrations']) class Food(models.Model): food_mgr = FoodManager('a', 'b') food_qs = FoodQuerySet.as_manager() food_no_mgr = NoMigrationFoodManager('x', 'y') class Meta: app_label = "migrations" apps = new_apps swappable = 'TEST_SWAPPABLE_MODEL' food_state = ModelState.from_model(Food) # The default manager is used in migrations self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr']) self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2)) @isolate_apps('migrations', 'django.contrib.contenttypes') def test_order_with_respect_to_private_field(self): class PrivateFieldModel(models.Model): content_type = models.ForeignKey('contenttypes.ContentType', models.CASCADE) object_id = models.PositiveIntegerField() private = GenericForeignKey() class Meta: order_with_respect_to = 'private' state = ModelState.from_model(PrivateFieldModel) self.assertNotIn('order_with_respect_to', state.options) @isolate_apps('migrations') def test_abstract_model_children_inherit_indexes(self): class Abstract(models.Model): name = models.CharField(max_length=50) class Meta: app_label = 'migrations' abstract = True indexes = [models.Index(fields=['name'])] class Child1(Abstract): pass class Child2(Abstract): pass child1_state = ModelState.from_model(Child1) child2_state = ModelState.from_model(Child2) index_names = [index.name for index in child1_state.options['indexes']] self.assertEqual(index_names, ['migrations__name_b0afd7_idx']) index_names = [index.name for index in child2_state.options['indexes']] self.assertEqual(index_names, ['migrations__name_016466_idx']) # Modifying the state doesn't modify the index on the model. child1_state.options['indexes'][0].name = 'bar' self.assertEqual(Child1._meta.indexes[0].name, 'migrations__name_b0afd7_idx') @isolate_apps('migrations') def test_explicit_index_name(self): class TestModel(models.Model): name = models.CharField(max_length=50) class Meta: app_label = 'migrations' indexes = [models.Index(fields=['name'], name='foo_idx')] model_state = ModelState.from_model(TestModel) index_names = [index.name for index in model_state.options['indexes']] self.assertEqual(index_names, ['foo_idx']) @isolate_apps('migrations') def test_from_model_constraints(self): class ModelWithConstraints(models.Model): size = models.IntegerField() class Meta: constraints = [models.CheckConstraint(check=models.Q(size__gt=1), name='size_gt_1')] state = ModelState.from_model(ModelWithConstraints) model_constraints = ModelWithConstraints._meta.constraints state_constraints = state.options['constraints'] self.assertEqual(model_constraints, state_constraints) self.assertIsNot(model_constraints, state_constraints) self.assertIsNot(model_constraints[0], state_constraints[0]) class RelatedModelsTests(SimpleTestCase): def setUp(self): self.apps = Apps(['migrations.related_models_app']) def create_model(self, name, foreign_keys=[], bases=(), abstract=False, proxy=False): test_name = 'related_models_app' assert not (abstract and proxy) meta_contents = { 'abstract': abstract, 'app_label': test_name, 'apps': self.apps, 'proxy': proxy, } meta = type("Meta", (), meta_contents) if not bases: bases = (models.Model,) body = { 'Meta': meta, '__module__': "__fake__", } fname_base = fname = '%s_%%d' % name.lower() for i, fk in enumerate(foreign_keys, 1): fname = fname_base % i body[fname] = fk return type(name, bases, body) def assertRelated(self, model, needle): self.assertEqual( get_related_models_recursive(model), {(n._meta.app_label, n._meta.model_name) for n in needle}, ) def test_unrelated(self): A = self.create_model("A") B = self.create_model("B") self.assertRelated(A, []) self.assertRelated(B, []) def test_direct_fk(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)]) B = self.create_model("B") self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_direct_hidden_fk(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE, related_name='+')]) B = self.create_model("B") self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_fk_through_proxy(self): A = self.create_model("A") B = self.create_model("B", bases=(A,), proxy=True) C = self.create_model("C", bases=(B,), proxy=True) D = self.create_model("D", foreign_keys=[models.ForeignKey('C', models.CASCADE)]) self.assertRelated(A, [B, C, D]) self.assertRelated(B, [A, C, D]) self.assertRelated(C, [A, B, D]) self.assertRelated(D, [A, B, C]) def test_nested_fk(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)]) B = self.create_model("B", foreign_keys=[models.ForeignKey('C', models.CASCADE)]) C = self.create_model("C") self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_two_sided(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)]) B = self.create_model("B", foreign_keys=[models.ForeignKey('A', models.CASCADE)]) self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_circle(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)]) B = self.create_model("B", foreign_keys=[models.ForeignKey('C', models.CASCADE)]) C = self.create_model("C", foreign_keys=[models.ForeignKey('A', models.CASCADE)]) self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,)) self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_nested_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,)) C = self.create_model("C", bases=(B,)) self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_multiple_bases(self): A = self.create_model("A") B = self.create_model("B") C = self.create_model("C", bases=(A, B,)) self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_multiple_nested_bases(self): A = self.create_model("A") B = self.create_model("B") C = self.create_model("C", bases=(A, B,)) D = self.create_model("D") E = self.create_model("E", bases=(D,)) F = self.create_model("F", bases=(C, E,)) Y = self.create_model("Y") Z = self.create_model("Z", bases=(Y,)) self.assertRelated(A, [B, C, D, E, F]) self.assertRelated(B, [A, C, D, E, F]) self.assertRelated(C, [A, B, D, E, F]) self.assertRelated(D, [A, B, C, E, F]) self.assertRelated(E, [A, B, C, D, F]) self.assertRelated(F, [A, B, C, D, E]) self.assertRelated(Y, [Z]) self.assertRelated(Z, [Y]) def test_base_to_base_fk(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('Y', models.CASCADE)]) B = self.create_model("B", bases=(A,)) Y = self.create_model("Y") Z = self.create_model("Z", bases=(Y,)) self.assertRelated(A, [B, Y, Z]) self.assertRelated(B, [A, Y, Z]) self.assertRelated(Y, [A, B, Z]) self.assertRelated(Z, [A, B, Y]) def test_base_to_subclass_fk(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('Z', models.CASCADE)]) B = self.create_model("B", bases=(A,)) Y = self.create_model("Y") Z = self.create_model("Z", bases=(Y,)) self.assertRelated(A, [B, Y, Z]) self.assertRelated(B, [A, Y, Z]) self.assertRelated(Y, [A, B, Z]) self.assertRelated(Z, [A, B, Y]) def test_direct_m2m(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('B')]) B = self.create_model("B") self.assertRelated(A, [A.a_1.rel.through, B]) self.assertRelated(B, [A, A.a_1.rel.through]) def test_direct_m2m_self(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('A')]) self.assertRelated(A, [A.a_1.rel.through]) def test_intermediate_m2m_self(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('A', through='T')]) T = self.create_model("T", foreign_keys=[ models.ForeignKey('A', models.CASCADE), models.ForeignKey('A', models.CASCADE), ]) self.assertRelated(A, [T]) self.assertRelated(T, [A]) def test_intermediate_m2m(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')]) B = self.create_model("B") T = self.create_model("T", foreign_keys=[ models.ForeignKey('A', models.CASCADE), models.ForeignKey('B', models.CASCADE), ]) self.assertRelated(A, [B, T]) self.assertRelated(B, [A, T]) self.assertRelated(T, [A, B]) def test_intermediate_m2m_extern_fk(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')]) B = self.create_model("B") Z = self.create_model("Z") T = self.create_model("T", foreign_keys=[ models.ForeignKey('A', models.CASCADE), models.ForeignKey('B', models.CASCADE), models.ForeignKey('Z', models.CASCADE), ]) self.assertRelated(A, [B, T, Z]) self.assertRelated(B, [A, T, Z]) self.assertRelated(T, [A, B, Z]) self.assertRelated(Z, [A, B, T]) def test_intermediate_m2m_base(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')]) B = self.create_model("B") S = self.create_model("S") T = self.create_model("T", foreign_keys=[ models.ForeignKey('A', models.CASCADE), models.ForeignKey('B', models.CASCADE), ], bases=(S,)) self.assertRelated(A, [B, S, T]) self.assertRelated(B, [A, S, T]) self.assertRelated(S, [A, B, T]) self.assertRelated(T, [A, B, S]) def test_generic_fk(self): A = self.create_model("A", foreign_keys=[ models.ForeignKey('B', models.CASCADE), GenericForeignKey(), ]) B = self.create_model("B", foreign_keys=[ models.ForeignKey('C', models.CASCADE), ]) self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_abstract_base(self): A = self.create_model("A", abstract=True) B = self.create_model("B", bases=(A,)) self.assertRelated(A, [B]) self.assertRelated(B, []) def test_nested_abstract_base(self): A = self.create_model("A", abstract=True) B = self.create_model("B", bases=(A,), abstract=True) C = self.create_model("C", bases=(B,)) self.assertRelated(A, [B, C]) self.assertRelated(B, [C]) self.assertRelated(C, []) def test_proxy_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,), proxy=True) self.assertRelated(A, [B]) self.assertRelated(B, []) def test_nested_proxy_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,), proxy=True) C = self.create_model("C", bases=(B,), proxy=True) self.assertRelated(A, [B, C]) self.assertRelated(B, [C]) self.assertRelated(C, []) def test_multiple_mixed_bases(self): A = self.create_model("A", abstract=True) M = self.create_model("M") P = self.create_model("P") Q = self.create_model("Q", bases=(P,), proxy=True) Z = self.create_model("Z", bases=(A, M, Q)) # M has a pointer O2O field p_ptr to P self.assertRelated(A, [M, P, Q, Z]) self.assertRelated(M, [P, Q, Z]) self.assertRelated(P, [M, Q, Z]) self.assertRelated(Q, [M, P, Z]) self.assertRelated(Z, [M, P, Q])
81ec7d9042ee06a4ed22afa67494cb583306ec9bd094feaf347a5e9166778f22
from datetime import datetime, timedelta from django.conf import settings from django.contrib.auth.models import User from django.contrib.auth.tokens import PasswordResetTokenGenerator from django.test import TestCase class MockedPasswordResetTokenGenerator(PasswordResetTokenGenerator): def __init__(self, now): self._now_val = now def _now(self): return self._now_val class TokenGeneratorTest(TestCase): def test_make_token(self): user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) self.assertIs(p0.check_token(user, tk1), True) def test_10265(self): """ The token generated for a user created in the same request will work correctly. """ user = User.objects.create_user('comebackkid', '[email protected]', 'testpw') user_reload = User.objects.get(username='comebackkid') p0 = MockedPasswordResetTokenGenerator(datetime.now()) tk1 = p0.make_token(user) tk2 = p0.make_token(user_reload) self.assertEqual(tk1, tk2) def test_timeout(self): """The token is valid after n seconds, but no greater.""" # Uses a mocked version of PasswordResetTokenGenerator so we can change # the value of 'now'. user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) p1 = MockedPasswordResetTokenGenerator( datetime.now() + timedelta(seconds=settings.PASSWORD_RESET_TIMEOUT) ) self.assertIs(p1.check_token(user, tk1), True) p2 = MockedPasswordResetTokenGenerator( datetime.now() + timedelta(seconds=(settings.PASSWORD_RESET_TIMEOUT + 1)) ) self.assertIs(p2.check_token(user, tk1), False) with self.settings(PASSWORD_RESET_TIMEOUT=60 * 60): p3 = MockedPasswordResetTokenGenerator( datetime.now() + timedelta(seconds=settings.PASSWORD_RESET_TIMEOUT) ) self.assertIs(p3.check_token(user, tk1), True) p4 = MockedPasswordResetTokenGenerator( datetime.now() + timedelta(seconds=(settings.PASSWORD_RESET_TIMEOUT + 1)) ) self.assertIs(p4.check_token(user, tk1), False) def test_check_token_with_nonexistent_token_and_user(self): user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) self.assertIs(p0.check_token(None, tk1), False) self.assertIs(p0.check_token(user, None), False) def test_token_with_different_secret(self): """ A valid token can be created with a secret other than SECRET_KEY by using the PasswordResetTokenGenerator.secret attribute. """ user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw') new_secret = 'abcdefghijkl' # Create and check a token with a different secret. p0 = PasswordResetTokenGenerator() p0.secret = new_secret tk0 = p0.make_token(user) self.assertIs(p0.check_token(user, tk0), True) # Create and check a token with the default secret. p1 = PasswordResetTokenGenerator() self.assertEqual(p1.secret, settings.SECRET_KEY) self.assertNotEqual(p1.secret, new_secret) tk1 = p1.make_token(user) # Tokens created with a different secret don't validate. self.assertIs(p0.check_token(user, tk1), False) self.assertIs(p1.check_token(user, tk0), False)
0c109feff437a0c05b7b1a121f7b0ddc2660f1f0e27de1111dce7bd921ac2e27
from django.contrib.auth.checks import ( check_models_permissions, check_user_model, ) from django.contrib.auth.models import AbstractBaseUser from django.core import checks from django.db import models from django.test import ( SimpleTestCase, override_settings, override_system_checks, ) from django.test.utils import isolate_apps from .models import CustomUserNonUniqueUsername @isolate_apps('auth_tests', attr_name='apps') @override_system_checks([check_user_model]) class UserModelChecksTests(SimpleTestCase): @override_settings(AUTH_USER_MODEL='auth_tests.CustomUserNonListRequiredFields') def test_required_fields_is_list(self): """REQUIRED_FIELDS should be a list.""" class CustomUserNonListRequiredFields(AbstractBaseUser): username = models.CharField(max_length=30, unique=True) date_of_birth = models.DateField() USERNAME_FIELD = 'username' REQUIRED_FIELDS = 'date_of_birth' errors = checks.run_checks(app_configs=self.apps.get_app_configs()) self.assertEqual(errors, [ checks.Error( "'REQUIRED_FIELDS' must be a list or tuple.", obj=CustomUserNonListRequiredFields, id='auth.E001', ), ]) @override_settings(AUTH_USER_MODEL='auth_tests.CustomUserBadRequiredFields') def test_username_not_in_required_fields(self): """USERNAME_FIELD should not appear in REQUIRED_FIELDS.""" class CustomUserBadRequiredFields(AbstractBaseUser): username = models.CharField(max_length=30, unique=True) date_of_birth = models.DateField() USERNAME_FIELD = 'username' REQUIRED_FIELDS = ['username', 'date_of_birth'] errors = checks.run_checks(self.apps.get_app_configs()) self.assertEqual(errors, [ checks.Error( "The field named as the 'USERNAME_FIELD' for a custom user model " "must not be included in 'REQUIRED_FIELDS'.", obj=CustomUserBadRequiredFields, id='auth.E002', ), ]) @override_settings(AUTH_USER_MODEL='auth_tests.CustomUserNonUniqueUsername') def test_username_non_unique(self): """ A non-unique USERNAME_FIELD raises an error only if the default authentication backend is used. Otherwise, a warning is raised. """ errors = checks.run_checks() self.assertEqual(errors, [ checks.Error( "'CustomUserNonUniqueUsername.username' must be " "unique because it is named as the 'USERNAME_FIELD'.", obj=CustomUserNonUniqueUsername, id='auth.E003', ), ]) with self.settings(AUTHENTICATION_BACKENDS=['my.custom.backend']): errors = checks.run_checks() self.assertEqual(errors, [ checks.Warning( "'CustomUserNonUniqueUsername.username' is named as " "the 'USERNAME_FIELD', but it is not unique.", hint='Ensure that your authentication backend(s) can handle non-unique usernames.', obj=CustomUserNonUniqueUsername, id='auth.W004', ), ]) @override_settings(AUTH_USER_MODEL='auth_tests.BadUser') def test_is_anonymous_authenticated_methods(self): """ <User Model>.is_anonymous/is_authenticated must not be methods. """ class BadUser(AbstractBaseUser): username = models.CharField(max_length=30, unique=True) USERNAME_FIELD = 'username' def is_anonymous(self): return True def is_authenticated(self): return True errors = checks.run_checks(app_configs=self.apps.get_app_configs()) self.assertEqual(errors, [ checks.Critical( '%s.is_anonymous must be an attribute or property rather than ' 'a method. Ignoring this is a security issue as anonymous ' 'users will be treated as authenticated!' % BadUser, obj=BadUser, id='auth.C009', ), checks.Critical( '%s.is_authenticated must be an attribute or property rather ' 'than a method. Ignoring this is a security issue as anonymous ' 'users will be treated as authenticated!' % BadUser, obj=BadUser, id='auth.C010', ), ]) @isolate_apps('auth_tests', attr_name='apps') @override_system_checks([check_models_permissions]) class ModelsPermissionsChecksTests(SimpleTestCase): def test_clashing_default_permissions(self): class Checked(models.Model): class Meta: permissions = [ ('change_checked', 'Can edit permission (duplicate)') ] errors = checks.run_checks(self.apps.get_app_configs()) self.assertEqual(errors, [ checks.Error( "The permission codenamed 'change_checked' clashes with a builtin " "permission for model 'auth_tests.Checked'.", obj=Checked, id='auth.E005', ), ]) def test_non_clashing_custom_permissions(self): class Checked(models.Model): class Meta: permissions = [ ('my_custom_permission', 'Some permission'), ('other_one', 'Some other permission'), ] errors = checks.run_checks(self.apps.get_app_configs()) self.assertEqual(errors, []) def test_clashing_custom_permissions(self): class Checked(models.Model): class Meta: permissions = [ ('my_custom_permission', 'Some permission'), ('other_one', 'Some other permission'), ('my_custom_permission', 'Some permission with duplicate permission code'), ] errors = checks.run_checks(self.apps.get_app_configs()) self.assertEqual(errors, [ checks.Error( "The permission codenamed 'my_custom_permission' is duplicated for " "model 'auth_tests.Checked'.", obj=Checked, id='auth.E006', ), ]) def test_verbose_name_max_length(self): class Checked(models.Model): class Meta: verbose_name = 'some ridiculously long verbose name that is out of control' * 5 errors = checks.run_checks(self.apps.get_app_configs()) self.assertEqual(errors, [ checks.Error( "The verbose_name of model 'auth_tests.Checked' must be at most 244 " "characters for its builtin permission names to be at most 255 characters.", obj=Checked, id='auth.E007', ), ]) def test_model_name_max_length(self): model_name = 'X' * 94 model = type(model_name, (models.Model,), {'__module__': self.__module__}) errors = checks.run_checks(self.apps.get_app_configs()) self.assertEqual(errors, [ checks.Error( "The name of model 'auth_tests.%s' must be at most 93 " "characters for its builtin permission codenames to be at " "most 100 characters." % model_name, obj=model, id='auth.E011', ), ]) def test_custom_permission_name_max_length(self): custom_permission_name = 'some ridiculously long verbose name that is out of control' * 5 class Checked(models.Model): class Meta: permissions = [ ('my_custom_permission', custom_permission_name), ] errors = checks.run_checks(self.apps.get_app_configs()) self.assertEqual(errors, [ checks.Error( "The permission named '%s' of model 'auth_tests.Checked' is longer " "than 255 characters." % custom_permission_name, obj=Checked, id='auth.E008', ), ]) def test_custom_permission_codename_max_length(self): custom_permission_codename = 'x' * 101 class Checked(models.Model): class Meta: permissions = [ (custom_permission_codename, 'Custom permission'), ] errors = checks.run_checks(self.apps.get_app_configs()) self.assertEqual(errors, [ checks.Error( "The permission codenamed '%s' of model 'auth_tests.Checked' " "is longer than 100 characters." % custom_permission_codename, obj=Checked, id='auth.E012', ), ]) def test_empty_default_permissions(self): class Checked(models.Model): class Meta: default_permissions = () self.assertEqual(checks.run_checks(self.apps.get_app_configs()), [])
39ae50b7e28c78aef9878671e2ad80dca7d0738abba783a9c6fe5f6ebefe58e9
from unittest import mock, skipUnless from django.conf.global_settings import PASSWORD_HASHERS from django.contrib.auth.hashers import ( UNUSABLE_PASSWORD_PREFIX, UNUSABLE_PASSWORD_SUFFIX_LENGTH, BasePasswordHasher, PBKDF2PasswordHasher, PBKDF2SHA1PasswordHasher, check_password, get_hasher, identify_hasher, is_password_usable, make_password, ) from django.test import SimpleTestCase from django.test.utils import override_settings try: import crypt except ImportError: crypt = None else: # On some platforms (e.g. OpenBSD), crypt.crypt() always return None. if crypt.crypt('') is None: crypt = None try: import bcrypt except ImportError: bcrypt = None try: import argon2 except ImportError: argon2 = None class PBKDF2SingleIterationHasher(PBKDF2PasswordHasher): iterations = 1 @override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS) class TestUtilsHashPass(SimpleTestCase): def test_simple(self): encoded = make_password('lètmein') self.assertTrue(encoded.startswith('pbkdf2_sha256$')) self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) # Blank passwords blank_encoded = make_password('') self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) def test_pbkdf2(self): encoded = make_password('lètmein', 'seasalt', 'pbkdf2_sha256') self.assertEqual(encoded, 'pbkdf2_sha256$216000$seasalt$youGZxOw6ZOcfrXv2i8/AhrnpZflJJ9EshS9XmUJTUg=') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "pbkdf2_sha256") # Blank passwords blank_encoded = make_password('', 'seasalt', 'pbkdf2_sha256') self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher']) def test_sha1(self): encoded = make_password('lètmein', 'seasalt', 'sha1') self.assertEqual(encoded, 'sha1$seasalt$cff36ea83f5706ce9aa7454e63e431fc726b2dc8') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "sha1") # Blank passwords blank_encoded = make_password('', 'seasalt', 'sha1') self.assertTrue(blank_encoded.startswith('sha1$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.MD5PasswordHasher']) def test_md5(self): encoded = make_password('lètmein', 'seasalt', 'md5') self.assertEqual(encoded, 'md5$seasalt$3f86d0d3d465b7b458c231bf3555c0e3') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "md5") # Blank passwords blank_encoded = make_password('', 'seasalt', 'md5') self.assertTrue(blank_encoded.startswith('md5$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedMD5PasswordHasher']) def test_unsalted_md5(self): encoded = make_password('lètmein', '', 'unsalted_md5') self.assertEqual(encoded, '88a434c88cca4e900f7874cd98123f43') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_md5") # Alternate unsalted syntax alt_encoded = "md5$$%s" % encoded self.assertTrue(is_password_usable(alt_encoded)) self.assertTrue(check_password('lètmein', alt_encoded)) self.assertFalse(check_password('lètmeinz', alt_encoded)) # Blank passwords blank_encoded = make_password('', '', 'unsalted_md5') self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher']) def test_unsalted_sha1(self): encoded = make_password('lètmein', '', 'unsalted_sha1') self.assertEqual(encoded, 'sha1$$6d138ca3ae545631b3abd71a4f076ce759c5700b') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_sha1") # Raw SHA1 isn't acceptable alt_encoded = encoded[6:] self.assertFalse(check_password('lètmein', alt_encoded)) # Blank passwords blank_encoded = make_password('', '', 'unsalted_sha1') self.assertTrue(blank_encoded.startswith('sha1$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @skipUnless(crypt, "no crypt module to generate password.") @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.CryptPasswordHasher']) def test_crypt(self): encoded = make_password('lètmei', 'ab', 'crypt') self.assertEqual(encoded, 'crypt$$ab1Hv2Lg7ltQo') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmei', encoded)) self.assertFalse(check_password('lètmeiz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "crypt") # Blank passwords blank_encoded = make_password('', 'ab', 'crypt') self.assertTrue(blank_encoded.startswith('crypt$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @skipUnless(bcrypt, "bcrypt not installed") def test_bcrypt_sha256(self): encoded = make_password('lètmein', hasher='bcrypt_sha256') self.assertTrue(is_password_usable(encoded)) self.assertTrue(encoded.startswith('bcrypt_sha256$')) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt_sha256") # password truncation no longer works password = ( 'VSK0UYV6FFQVZ0KG88DYN9WADAADZO1CTSIVDJUNZSUML6IBX7LN7ZS3R5' 'JGB3RGZ7VI7G7DJQ9NI8BQFSRPTG6UWTTVESA5ZPUN' ) encoded = make_password(password, hasher='bcrypt_sha256') self.assertTrue(check_password(password, encoded)) self.assertFalse(check_password(password[:72], encoded)) # Blank passwords blank_encoded = make_password('', hasher='bcrypt_sha256') self.assertTrue(blank_encoded.startswith('bcrypt_sha256$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @skipUnless(bcrypt, "bcrypt not installed") @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.BCryptPasswordHasher']) def test_bcrypt(self): encoded = make_password('lètmein', hasher='bcrypt') self.assertTrue(is_password_usable(encoded)) self.assertTrue(encoded.startswith('bcrypt$')) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt") # Blank passwords blank_encoded = make_password('', hasher='bcrypt') self.assertTrue(blank_encoded.startswith('bcrypt$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) @skipUnless(bcrypt, "bcrypt not installed") @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.BCryptPasswordHasher']) def test_bcrypt_upgrade(self): hasher = get_hasher('bcrypt') self.assertEqual('bcrypt', hasher.algorithm) self.assertNotEqual(hasher.rounds, 4) old_rounds = hasher.rounds try: # Generate a password with 4 rounds. hasher.rounds = 4 encoded = make_password('letmein', hasher='bcrypt') rounds = hasher.safe_summary(encoded)['work factor'] self.assertEqual(rounds, '04') state = {'upgraded': False} def setter(password): state['upgraded'] = True # No upgrade is triggered. self.assertTrue(check_password('letmein', encoded, setter, 'bcrypt')) self.assertFalse(state['upgraded']) # Revert to the old rounds count and ... hasher.rounds = old_rounds # ... check if the password would get updated to the new count. self.assertTrue(check_password('letmein', encoded, setter, 'bcrypt')) self.assertTrue(state['upgraded']) finally: hasher.rounds = old_rounds @skipUnless(bcrypt, "bcrypt not installed") @override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.BCryptPasswordHasher']) def test_bcrypt_harden_runtime(self): hasher = get_hasher('bcrypt') self.assertEqual('bcrypt', hasher.algorithm) with mock.patch.object(hasher, 'rounds', 4): encoded = make_password('letmein', hasher='bcrypt') with mock.patch.object(hasher, 'rounds', 6), \ mock.patch.object(hasher, 'encode', side_effect=hasher.encode): hasher.harden_runtime('wrong_password', encoded) # Increasing rounds from 4 to 6 means an increase of 4 in workload, # therefore hardening should run 3 times to make the timing the # same (the original encode() call already ran once). self.assertEqual(hasher.encode.call_count, 3) # Get the original salt (includes the original workload factor) algorithm, data = encoded.split('$', 1) expected_call = (('wrong_password', data[:29].encode()),) self.assertEqual(hasher.encode.call_args_list, [expected_call] * 3) def test_unusable(self): encoded = make_password(None) self.assertEqual(len(encoded), len(UNUSABLE_PASSWORD_PREFIX) + UNUSABLE_PASSWORD_SUFFIX_LENGTH) self.assertFalse(is_password_usable(encoded)) self.assertFalse(check_password(None, encoded)) self.assertFalse(check_password(encoded, encoded)) self.assertFalse(check_password(UNUSABLE_PASSWORD_PREFIX, encoded)) self.assertFalse(check_password('', encoded)) self.assertFalse(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) with self.assertRaisesMessage(ValueError, 'Unknown password hashing algorith'): identify_hasher(encoded) # Assert that the unusable passwords actually contain a random part. # This might fail one day due to a hash collision. self.assertNotEqual(encoded, make_password(None), "Random password collision?") def test_unspecified_password(self): """ Makes sure specifying no plain password with a valid encoded password returns `False`. """ self.assertFalse(check_password(None, make_password('lètmein'))) def test_bad_algorithm(self): msg = ( "Unknown password hashing algorithm '%s'. Did you specify it in " "the PASSWORD_HASHERS setting?" ) with self.assertRaisesMessage(ValueError, msg % 'lolcat'): make_password('lètmein', hasher='lolcat') with self.assertRaisesMessage(ValueError, msg % 'lolcat'): identify_hasher('lolcat$salt$hash') def test_is_password_usable(self): passwords = ('lètmein_badencoded', '', None) for password in passwords: with self.subTest(password=password): self.assertIs(is_password_usable(password), True) def test_low_level_pbkdf2(self): hasher = PBKDF2PasswordHasher() encoded = hasher.encode('lètmein', 'seasalt2') self.assertEqual(encoded, 'pbkdf2_sha256$216000$seasalt2$gHyszNJ9lwTG5y3MQUjZe+OJmYVTBPl/y7bYq9dtk8M=') self.assertTrue(hasher.verify('lètmein', encoded)) def test_low_level_pbkdf2_sha1(self): hasher = PBKDF2SHA1PasswordHasher() encoded = hasher.encode('lètmein', 'seasalt2') self.assertEqual(encoded, 'pbkdf2_sha1$216000$seasalt2$E1KH89wMKuPXrrQzifVcG4cBtiA=') self.assertTrue(hasher.verify('lètmein', encoded)) @override_settings( PASSWORD_HASHERS=[ 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.SHA1PasswordHasher', 'django.contrib.auth.hashers.MD5PasswordHasher', ], ) def test_upgrade(self): self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm) for algo in ('sha1', 'md5'): with self.subTest(algo=algo): encoded = make_password('lètmein', hasher=algo) state = {'upgraded': False} def setter(password): state['upgraded'] = True self.assertTrue(check_password('lètmein', encoded, setter)) self.assertTrue(state['upgraded']) def test_no_upgrade(self): encoded = make_password('lètmein') state = {'upgraded': False} def setter(): state['upgraded'] = True self.assertFalse(check_password('WRONG', encoded, setter)) self.assertFalse(state['upgraded']) @override_settings( PASSWORD_HASHERS=[ 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.SHA1PasswordHasher', 'django.contrib.auth.hashers.MD5PasswordHasher', ], ) def test_no_upgrade_on_incorrect_pass(self): self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm) for algo in ('sha1', 'md5'): with self.subTest(algo=algo): encoded = make_password('lètmein', hasher=algo) state = {'upgraded': False} def setter(): state['upgraded'] = True self.assertFalse(check_password('WRONG', encoded, setter)) self.assertFalse(state['upgraded']) def test_pbkdf2_upgrade(self): hasher = get_hasher('default') self.assertEqual('pbkdf2_sha256', hasher.algorithm) self.assertNotEqual(hasher.iterations, 1) old_iterations = hasher.iterations try: # Generate a password with 1 iteration. hasher.iterations = 1 encoded = make_password('letmein') algo, iterations, salt, hash = encoded.split('$', 3) self.assertEqual(iterations, '1') state = {'upgraded': False} def setter(password): state['upgraded'] = True # No upgrade is triggered self.assertTrue(check_password('letmein', encoded, setter)) self.assertFalse(state['upgraded']) # Revert to the old iteration count and ... hasher.iterations = old_iterations # ... check if the password would get updated to the new iteration count. self.assertTrue(check_password('letmein', encoded, setter)) self.assertTrue(state['upgraded']) finally: hasher.iterations = old_iterations def test_pbkdf2_harden_runtime(self): hasher = get_hasher('default') self.assertEqual('pbkdf2_sha256', hasher.algorithm) with mock.patch.object(hasher, 'iterations', 1): encoded = make_password('letmein') with mock.patch.object(hasher, 'iterations', 6), \ mock.patch.object(hasher, 'encode', side_effect=hasher.encode): hasher.harden_runtime('wrong_password', encoded) # Encode should get called once ... self.assertEqual(hasher.encode.call_count, 1) # ... with the original salt and 5 iterations. algorithm, iterations, salt, hash = encoded.split('$', 3) expected_call = (('wrong_password', salt, 5),) self.assertEqual(hasher.encode.call_args, expected_call) def test_pbkdf2_upgrade_new_hasher(self): hasher = get_hasher('default') self.assertEqual('pbkdf2_sha256', hasher.algorithm) self.assertNotEqual(hasher.iterations, 1) state = {'upgraded': False} def setter(password): state['upgraded'] = True with self.settings(PASSWORD_HASHERS=[ 'auth_tests.test_hashers.PBKDF2SingleIterationHasher']): encoded = make_password('letmein') algo, iterations, salt, hash = encoded.split('$', 3) self.assertEqual(iterations, '1') # No upgrade is triggered self.assertTrue(check_password('letmein', encoded, setter)) self.assertFalse(state['upgraded']) # Revert to the old iteration count and check if the password would get # updated to the new iteration count. with self.settings(PASSWORD_HASHERS=[ 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'auth_tests.test_hashers.PBKDF2SingleIterationHasher']): self.assertTrue(check_password('letmein', encoded, setter)) self.assertTrue(state['upgraded']) def test_check_password_calls_harden_runtime(self): hasher = get_hasher('default') encoded = make_password('letmein') with mock.patch.object(hasher, 'harden_runtime'), \ mock.patch.object(hasher, 'must_update', return_value=True): # Correct password supplied, no hardening needed check_password('letmein', encoded) self.assertEqual(hasher.harden_runtime.call_count, 0) # Wrong password supplied, hardening needed check_password('wrong_password', encoded) self.assertEqual(hasher.harden_runtime.call_count, 1) class BasePasswordHasherTests(SimpleTestCase): not_implemented_msg = 'subclasses of BasePasswordHasher must provide %s() method' def setUp(self): self.hasher = BasePasswordHasher() def test_load_library_no_algorithm(self): msg = "Hasher 'BasePasswordHasher' doesn't specify a library attribute" with self.assertRaisesMessage(ValueError, msg): self.hasher._load_library() def test_load_library_importerror(self): PlainHasher = type('PlainHasher', (BasePasswordHasher,), {'algorithm': 'plain', 'library': 'plain'}) msg = "Couldn't load 'PlainHasher' algorithm library: No module named 'plain'" with self.assertRaisesMessage(ValueError, msg): PlainHasher()._load_library() def test_attributes(self): self.assertIsNone(self.hasher.algorithm) self.assertIsNone(self.hasher.library) def test_encode(self): msg = self.not_implemented_msg % 'an encode' with self.assertRaisesMessage(NotImplementedError, msg): self.hasher.encode('password', 'salt') def test_harden_runtime(self): msg = 'subclasses of BasePasswordHasher should provide a harden_runtime() method' with self.assertWarnsMessage(Warning, msg): self.hasher.harden_runtime('password', 'encoded') def test_must_update(self): self.assertIs(self.hasher.must_update('encoded'), False) def test_safe_summary(self): msg = self.not_implemented_msg % 'a safe_summary' with self.assertRaisesMessage(NotImplementedError, msg): self.hasher.safe_summary('encoded') def test_verify(self): msg = self.not_implemented_msg % 'a verify' with self.assertRaisesMessage(NotImplementedError, msg): self.hasher.verify('password', 'encoded') @skipUnless(argon2, "argon2-cffi not installed") @override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS) class TestUtilsHashPassArgon2(SimpleTestCase): def test_argon2(self): encoded = make_password('lètmein', hasher='argon2') self.assertTrue(is_password_usable(encoded)) self.assertTrue(encoded.startswith('argon2$')) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, 'argon2') # Blank passwords blank_encoded = make_password('', hasher='argon2') self.assertTrue(blank_encoded.startswith('argon2$')) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password('', blank_encoded)) self.assertFalse(check_password(' ', blank_encoded)) # Old hashes without version attribute encoded = ( 'argon2$argon2i$m=8,t=1,p=1$c29tZXNhbHQ$gwQOXSNhxiOxPOA0+PY10P9QFO' '4NAYysnqRt1GSQLE55m+2GYDt9FEjPMHhP2Cuf0nOEXXMocVrsJAtNSsKyfg' ) self.assertTrue(check_password('secret', encoded)) self.assertFalse(check_password('wrong', encoded)) def test_argon2_upgrade(self): self._test_argon2_upgrade('time_cost', 'time cost', 1) self._test_argon2_upgrade('memory_cost', 'memory cost', 16) self._test_argon2_upgrade('parallelism', 'parallelism', 1) def test_argon2_version_upgrade(self): hasher = get_hasher('argon2') state = {'upgraded': False} encoded = ( 'argon2$argon2i$m=8,t=1,p=1$c29tZXNhbHQ$gwQOXSNhxiOxPOA0+PY10P9QFO' '4NAYysnqRt1GSQLE55m+2GYDt9FEjPMHhP2Cuf0nOEXXMocVrsJAtNSsKyfg' ) def setter(password): state['upgraded'] = True old_m = hasher.memory_cost old_t = hasher.time_cost old_p = hasher.parallelism try: hasher.memory_cost = 8 hasher.time_cost = 1 hasher.parallelism = 1 self.assertTrue(check_password('secret', encoded, setter, 'argon2')) self.assertTrue(state['upgraded']) finally: hasher.memory_cost = old_m hasher.time_cost = old_t hasher.parallelism = old_p def _test_argon2_upgrade(self, attr, summary_key, new_value): hasher = get_hasher('argon2') self.assertEqual('argon2', hasher.algorithm) self.assertNotEqual(getattr(hasher, attr), new_value) old_value = getattr(hasher, attr) try: # Generate hash with attr set to 1 setattr(hasher, attr, new_value) encoded = make_password('letmein', hasher='argon2') attr_value = hasher.safe_summary(encoded)[summary_key] self.assertEqual(attr_value, new_value) state = {'upgraded': False} def setter(password): state['upgraded'] = True # No upgrade is triggered. self.assertTrue(check_password('letmein', encoded, setter, 'argon2')) self.assertFalse(state['upgraded']) # Revert to the old rounds count and ... setattr(hasher, attr, old_value) # ... check if the password would get updated to the new count. self.assertTrue(check_password('letmein', encoded, setter, 'argon2')) self.assertTrue(state['upgraded']) finally: setattr(hasher, attr, old_value)
2d82f0ac3b17829a2c07de5fc7b7de9c505f0c34336ff0aee32ace6a073903c4
import datetime import re from decimal import Decimal from django.core.exceptions import FieldError from django.db import connection from django.db.models import ( Avg, Case, Count, DecimalField, DurationField, Exists, F, FloatField, Func, IntegerField, Max, Min, OuterRef, Subquery, Sum, Value, When, ) from django.db.models.functions import Coalesce from django.test import TestCase from django.test.testcases import skipUnlessDBFeature from django.test.utils import Approximate, CaptureQueriesContext from django.utils import timezone from .models import Author, Book, Publisher, Store class AggregateTestCase(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='Brad Dayley', age=45) cls.a4 = Author.objects.create(name='James Bennett', age=29) cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37) cls.a6 = Author.objects.create(name='Paul Bissex', age=29) cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25) cls.a8 = Author.objects.create(name='Peter Norvig', age=57) cls.a9 = Author.objects.create(name='Stuart Russell', age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1)) cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2)) cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7) cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6) ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3) ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23) ) cls.b4 = Book.objects.create( isbn='013235613', name='Python Web Development with Django', pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3) ) cls.b5 = Book.objects.create( isbn='013790395', name='Artificial Intelligence: A Modern Approach', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15) ) cls.b6 = Book.objects.create( isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15) ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) s2 = Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30) ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def test_empty_aggregate(self): self.assertEqual(Author.objects.all().aggregate(), {}) def test_aggregate_in_order_by(self): msg = ( 'Using an aggregate in order_by() without also including it in ' 'annotate() is not allowed: Avg(F(book__rating)' ) with self.assertRaisesMessage(FieldError, msg): Author.objects.values('age').order_by(Avg('book__rating')) def test_single_aggregate(self): vals = Author.objects.aggregate(Avg("age")) self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)}) def test_multiple_aggregates(self): vals = Author.objects.aggregate(Sum("age"), Avg("age")) self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}) def test_filter_aggregate(self): vals = Author.objects.filter(age__gt=29).aggregate(Sum("age")) self.assertEqual(vals, {'age__sum': 254}) def test_related_aggregate(self): vals = Author.objects.aggregate(Avg("friends__age")) self.assertEqual(vals, {'friends__age__avg': Approximate(34.07, places=2)}) vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age")) self.assertEqual(vals, {'authors__age__avg': Approximate(38.2857, places=2)}) vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating")) self.assertEqual(vals, {'book__rating__avg': 4.0}) vals = Book.objects.aggregate(Sum("publisher__num_awards")) self.assertEqual(vals, {'publisher__num_awards__sum': 30}) vals = Publisher.objects.aggregate(Sum("book__price")) self.assertEqual(vals, {'book__price__sum': Decimal('270.27')}) def test_aggregate_multi_join(self): vals = Store.objects.aggregate(Max("books__authors__age")) self.assertEqual(vals, {'books__authors__age__max': 57}) vals = Author.objects.aggregate(Min("book__publisher__num_awards")) self.assertEqual(vals, {'book__publisher__num_awards__min': 1}) def test_aggregate_alias(self): vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating")) self.assertEqual(vals, {'amazon_mean': Approximate(4.08, places=2)}) def test_annotate_basic(self): self.assertQuerysetEqual( Book.objects.annotate().order_by('pk'), [ "The Definitive Guide to Django: Web Development Done Right", "Sams Teach Yourself Django in 24 Hours", "Practical Django Projects", "Python Web Development with Django", "Artificial Intelligence: A Modern Approach", "Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp" ], lambda b: b.name ) books = Book.objects.annotate(mean_age=Avg("authors__age")) b = books.get(pk=self.b1.pk) self.assertEqual( b.name, 'The Definitive Guide to Django: Web Development Done Right' ) self.assertEqual(b.mean_age, 34.5) def test_annotate_defer(self): qs = Book.objects.annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (self.b1.id, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name) ) def test_annotate_defer_select_related(self): qs = Book.objects.select_related('contact').annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (self.b1.id, "159059725", 447, "Adrian Holovaty", "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name) ) def test_annotate_m2m(self): books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 51.5), ('Practical Django Projects', 29.0), ('Python Web Development with Django', Approximate(30.3, places=1)), ('Sams Teach Yourself Django in 24 Hours', 45.0) ], lambda b: (b.name, b.authors__age__avg), ) books = Book.objects.annotate(num_authors=Count("authors")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Practical Django Projects', 1), ('Python Web Development with Django', 3), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 2) ], lambda b: (b.name, b.num_authors) ) def test_backwards_m2m_annotate(self): authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 4.5), ('Brad Dayley', 3.0), ('Jacob Kaplan-Moss', 4.5), ('James Bennett', 4.0), ('Paul Bissex', 4.0), ('Stuart Russell', 4.0) ], lambda a: (a.name, a.book__rating__avg) ) authors = Author.objects.annotate(num_books=Count("book")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 1), ('Brad Dayley', 1), ('Jacob Kaplan-Moss', 1), ('James Bennett', 1), ('Jeffrey Forcier', 1), ('Paul Bissex', 1), ('Peter Norvig', 2), ('Stuart Russell', 1), ('Wesley J. Chun', 1) ], lambda a: (a.name, a.num_books) ) def test_reverse_fkey_annotate(self): books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 7), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9), ('Practical Django Projects', 3), ('Python Web Development with Django', 7), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 3) ], lambda b: (b.name, b.publisher__num_awards__sum) ) publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name") self.assertQuerysetEqual( publishers, [ ('Apress', Decimal("59.69")), ("Jonno's House of Books", None), ('Morgan Kaufmann', Decimal("75.00")), ('Prentice Hall', Decimal("112.49")), ('Sams', Decimal("23.09")) ], lambda p: (p.name, p.book__price__sum) ) def test_annotate_values(self): books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values()) self.assertEqual( books, [ { "contact_id": self.a1.id, "id": self.b1.id, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": self.p1.id, "rating": 4.5, } ] ) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg('authors__age')) .values('pk', 'isbn', 'mean_age') ) self.assertEqual( list(books), [ { "pk": self.b1.pk, "isbn": "159059725", "mean_age": 34.5, } ] ) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name") self.assertEqual( list(books), [{'name': 'The Definitive Guide to Django: Web Development Done Right'}], ) books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age')) self.assertEqual( list(books), [ { "contact_id": self.a1.id, "id": self.b1.id, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": self.p1.id, "rating": 4.5, } ] ) books = ( Book.objects .values("rating") .annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")) .order_by("rating") ) self.assertEqual( list(books), [ { "rating": 3.0, "n_authors": 1, "mean_age": 45.0, }, { "rating": 4.0, "n_authors": 6, "mean_age": Approximate(37.16, places=1) }, { "rating": 4.5, "n_authors": 2, "mean_age": 34.5, }, { "rating": 5.0, "n_authors": 1, "mean_age": 57.0, } ] ) authors = Author.objects.annotate(Avg("friends__age")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 32.0), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 29.5), ('James Bennett', 34.0), ('Jeffrey Forcier', 27.0), ('Paul Bissex', 31.0), ('Peter Norvig', 46.0), ('Stuart Russell', 57.0), ('Wesley J. Chun', Approximate(33.66, places=1)) ], lambda a: (a.name, a.friends__age__avg) ) def test_count(self): vals = Book.objects.aggregate(Count("rating")) self.assertEqual(vals, {"rating__count": 6}) def test_count_star(self): with self.assertNumQueries(1) as ctx: Book.objects.aggregate(n=Count("*")) sql = ctx.captured_queries[0]['sql'] self.assertIn('SELECT COUNT(*) ', sql) def test_count_distinct_expression(self): aggs = Book.objects.aggregate( distinct_ratings=Count(Case(When(pages__gt=300, then='rating')), distinct=True), ) self.assertEqual(aggs['distinct_ratings'], 4) def test_distinct_on_aggregate(self): for aggregate, expected_result in ( (Avg, 4.125), (Count, 4), (Sum, 16.5), ): with self.subTest(aggregate=aggregate.__name__): books = Book.objects.aggregate(ratings=aggregate('rating', distinct=True)) self.assertEqual(books['ratings'], expected_result) def test_non_grouped_annotation_not_in_group_by(self): """ An annotation not included in values() before an aggregate should be excluded from the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual(list(qs), [{'rating': 4.0, 'count': 2}]) def test_grouped_annotation_in_group_by(self): """ An annotation included in values() before an aggregate should be included in the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual( list(qs), [ {'rating': 4.0, 'count': 1}, {'rating': 4.0, 'count': 2}, ] ) def test_fkey_aggregate(self): explicit = list(Author.objects.annotate(Count('book__id'))) implicit = list(Author.objects.annotate(Count('book'))) self.assertCountEqual(explicit, implicit) def test_annotate_ordering(self): books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating') self.assertEqual( list(books), [ {'rating': 4.5, 'oldest': 35}, {'rating': 3.0, 'oldest': 45}, {'rating': 4.0, 'oldest': 57}, {'rating': 5.0, 'oldest': 57}, ] ) books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating") self.assertEqual( list(books), [ {'rating': 5.0, 'oldest': 57}, {'rating': 4.0, 'oldest': 57}, {'rating': 3.0, 'oldest': 45}, {'rating': 4.5, 'oldest': 35}, ] ) def test_aggregate_annotation(self): vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors")) self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)}) def test_avg_duration_field(self): # Explicit `output_field`. self.assertEqual( Publisher.objects.aggregate(Avg('duration', output_field=DurationField())), {'duration__avg': datetime.timedelta(days=1, hours=12)} ) # Implicit `output_field`. self.assertEqual( Publisher.objects.aggregate(Avg('duration')), {'duration__avg': datetime.timedelta(days=1, hours=12)} ) def test_sum_duration_field(self): self.assertEqual( Publisher.objects.aggregate(Sum('duration', output_field=DurationField())), {'duration__sum': datetime.timedelta(days=3)} ) def test_sum_distinct_aggregate(self): """ Sum on a distinct() QuerySet should aggregate only the distinct items. """ authors = Author.objects.filter(book__in=[self.b5, self.b6]) self.assertEqual(authors.count(), 3) distinct_authors = authors.distinct() self.assertEqual(distinct_authors.count(), 2) # Selected author ages are 57 and 46 age_sum = distinct_authors.aggregate(Sum('age')) self.assertEqual(age_sum['age__sum'], 103) def test_filtering(self): p = Publisher.objects.create(name='Expensive Publisher', num_awards=0) Book.objects.create( name='ExpensiveBook1', pages=1, isbn='111', rating=3.5, price=Decimal("1000"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 1) ) Book.objects.create( name='ExpensiveBook2', pages=1, isbn='222', rating=4.0, price=Decimal("1000"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 2) ) Book.objects.create( name='ExpensiveBook3', pages=1, isbn='333', rating=4.5, price=Decimal("35"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 3) ) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual( publishers, ['Apress', 'Prentice Hall', 'Expensive Publisher'], lambda p: p.name, ) publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Apress", "Sams", "Prentice Hall", "Expensive Publisher", ], lambda p: p.name ) publishers = ( Publisher.objects .annotate(num_books=Count("book__id")) .filter(num_books__gt=1, book__price__lt=Decimal("40.0")) .order_by("pk") ) self.assertQuerysetEqual( publishers, ['Apress', 'Prentice Hall', 'Expensive Publisher'], lambda p: p.name, ) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) .order_by("pk") ) self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Sams", "Prentice Hall", "Morgan Kaufmann", "Expensive Publisher", ], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk") self.assertQuerysetEqual( publishers, ['Apress', 'Sams', 'Prentice Hall', 'Morgan Kaufmann'], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, ['Sams', 'Morgan Kaufmann', 'Expensive Publisher'], lambda p: p.name, ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True) self.assertEqual(len(publishers), 0) def test_annotation(self): vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id")) self.assertEqual(vals, {"friends__id__count": 2}) books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk") self.assertQuerysetEqual( books, [ "The Definitive Guide to Django: Web Development Done Right", "Artificial Intelligence: A Modern Approach", ], lambda b: b.name ) authors = ( Author.objects .annotate(num_friends=Count("friends__id", distinct=True)) .filter(num_friends=0) .order_by("pk") ) self.assertQuerysetEqual(authors, ['Brad Dayley'], lambda a: a.name) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual(publishers, ['Apress', 'Prentice Hall'], lambda p: p.name) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) ) self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name) books = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) ) self.assertQuerysetEqual( books, ['Artificial Intelligence: A Modern Approach'], lambda b: b.name ) def test_more_aggregation(self): a = Author.objects.get(name__contains='Norvig') b = Book.objects.get(name__contains='Done Right') b.authors.add(a) b.save() vals = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) .aggregate(Avg("rating")) ) self.assertEqual(vals, {"rating__avg": 4.25}) def test_even_more_aggregate(self): publishers = Publisher.objects.annotate( earliest_book=Min("book__pubdate"), ).exclude(earliest_book=None).order_by("earliest_book").values( 'earliest_book', 'num_awards', 'id', 'name', ) self.assertEqual( list(publishers), [ { 'earliest_book': datetime.date(1991, 10, 15), 'num_awards': 9, 'id': self.p4.id, 'name': 'Morgan Kaufmann' }, { 'earliest_book': datetime.date(1995, 1, 15), 'num_awards': 7, 'id': self.p3.id, 'name': 'Prentice Hall' }, { 'earliest_book': datetime.date(2007, 12, 6), 'num_awards': 3, 'id': self.p1.id, 'name': 'Apress' }, { 'earliest_book': datetime.date(2008, 3, 3), 'num_awards': 1, 'id': self.p2.id, 'name': 'Sams' } ] ) vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening")) self.assertEqual( vals, { "friday_night_closing__max": datetime.time(23, 59, 59), "original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14), } ) def test_annotate_values_list(self): books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("pk", "isbn", "mean_age") ) self.assertEqual(list(books), [(self.b1.id, '159059725', 34.5)]) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn") self.assertEqual(list(books), [('159059725',)]) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age") self.assertEqual(list(books), [(34.5,)]) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("mean_age", flat=True) ) self.assertEqual(list(books), [34.5]) books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price") self.assertEqual( list(books), [ (Decimal("29.69"), 2), (Decimal('23.09'), 1), (Decimal('30'), 1), (Decimal('75'), 1), (Decimal('82.8'), 1), ] ) def test_dates_with_aggregation(self): """ .dates() returns a distinct set of dates when applied to a QuerySet with aggregation. Refs #18056. Previously, .dates() would return distinct (date_kind, aggregation) sets, in this case (year, num_authors), so 2008 would be returned twice because there are books from 2008 with a different number of authors. """ dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year') self.assertQuerysetEqual( dates, [ "datetime.date(1991, 1, 1)", "datetime.date(1995, 1, 1)", "datetime.date(2007, 1, 1)", "datetime.date(2008, 1, 1)" ] ) def test_values_aggregation(self): # Refs #20782 max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating')) self.assertEqual(max_rating['max_rating'], 5) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3}) def test_ticket17424(self): """ Doing exclude() on a foreign model after annotate() doesn't crash. """ all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk')) annotated_books = Book.objects.order_by('pk').annotate(one=Count("id")) # The value doesn't matter, we just need any negative # constraint on a related model that's a noop. excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__") # Try to generate query tree str(excluded_books.query) self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk) # Check internal state self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type) self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type) def test_ticket12886(self): """ Aggregation over sliced queryset works correctly. """ qs = Book.objects.all().order_by('-rating')[0:3] vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating'] self.assertAlmostEqual(vals, 4.5, places=2) def test_ticket11881(self): """ Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or select_related() stuff. """ qs = Book.objects.all().select_for_update().order_by( 'pk').select_related('publisher').annotate(max_pk=Max('pk')) with CaptureQueriesContext(connection) as captured_queries: qs.aggregate(avg_pk=Avg('max_pk')) self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]['sql'].lower() self.assertNotIn('for update', qstr) forced_ordering = connection.ops.force_no_ordering() if forced_ordering: # If the backend needs to force an ordering we make sure it's # the only "ORDER BY" clause present in the query. self.assertEqual( re.findall(r'order by (\w+)', qstr), [', '.join(f[1][0] for f in forced_ordering).lower()] ) else: self.assertNotIn('order by', qstr) self.assertEqual(qstr.count(' join '), 0) def test_decimal_max_digits_has_no_effect(self): Book.objects.all().delete() a1 = Author.objects.first() p1 = Publisher.objects.first() thedate = timezone.now() for i in range(10): Book.objects.create( isbn="abcde{}".format(i), name="none", pages=10, rating=4.0, price=9999.98, contact=a1, publisher=p1, pubdate=thedate) book = Book.objects.aggregate(price_sum=Sum('price')) self.assertEqual(book['price_sum'], Decimal("99999.80")) def test_nonaggregate_aggregation_throws(self): with self.assertRaisesMessage(TypeError, 'fail is not an aggregate expression'): Book.objects.aggregate(fail=F('price')) def test_nonfield_annotation(self): book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first() self.assertEqual(book.val, 2) def test_missing_output_field_raises_error(self): with self.assertRaisesMessage(FieldError, 'Cannot resolve expression type, unknown output_field'): Book.objects.annotate(val=Max(2)).first() def test_annotation_expressions(self): authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name') authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name') for qs in (authors, authors2): self.assertQuerysetEqual( qs, [ ('Adrian Holovaty', 132), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 129), ('James Bennett', 63), ('Jeffrey Forcier', 128), ('Paul Bissex', 120), ('Peter Norvig', 103), ('Stuart Russell', 103), ('Wesley J. Chun', 176) ], lambda a: (a.name, a.combined_ages) ) def test_aggregation_expressions(self): a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*')) a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age')) a3 = Author.objects.aggregate(av_age=Avg('age')) self.assertEqual(a1, {'av_age': 37}) self.assertEqual(a2, {'av_age': 37}) self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)}) def test_avg_decimal_field(self): v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price'] self.assertIsInstance(v, Decimal) self.assertEqual(v, Approximate(Decimal('47.39'), places=2)) def test_order_of_precedence(self): p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3) self.assertEqual(p1, {'avg_price': Approximate(Decimal('148.18'), places=2)}) p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3) self.assertEqual(p2, {'avg_price': Approximate(Decimal('53.39'), places=2)}) def test_combine_different_types(self): msg = ( 'Expression contains mixed types: FloatField, IntegerField. ' 'You must set output_field.' ) qs = Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')) with self.assertRaisesMessage(FieldError, msg): qs.first() with self.assertRaisesMessage(FieldError, msg): qs.first() b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=IntegerField())).get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=FloatField())).get(pk=self.b4.pk) self.assertEqual(b2.sums, 383.69) b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=DecimalField())).get(pk=self.b4.pk) self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2)) def test_complex_aggregations_require_kwarg(self): with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'): Author.objects.annotate(Sum(F('age') + F('friends__age'))) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum('age') / Count('age')) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum(1)) def test_aggregate_over_complex_annotation(self): qs = Author.objects.annotate( combined_ages=Sum(F('age') + F('friends__age'))) age = qs.aggregate(max_combined_age=Max('combined_ages')) self.assertEqual(age['max_combined_age'], 176) age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age=Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age'], 954) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age_doubled'], 954 * 2) def test_values_annotation_with_expression(self): # ensure the F() is promoted to the group by clause qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['another_age'], 68) qs = qs.annotate(friend_count=Count('friends')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['friend_count'], 2) qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter( name="Adrian Holovaty").order_by('-combined_age') self.assertEqual( list(qs), [ { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 69 }, { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 63 } ] ) vals = qs.values('name', 'combined_age') self.assertEqual( list(vals), [ {'name': 'Adrian Holovaty', 'combined_age': 69}, {'name': 'Adrian Holovaty', 'combined_age': 63}, ] ) def test_annotate_values_aggregate(self): alias_age = Author.objects.annotate( age_alias=F('age') ).values( 'age_alias', ).aggregate(sum_age=Sum('age_alias')) age = Author.objects.values('age').aggregate(sum_age=Sum('age')) self.assertEqual(alias_age['sum_age'], age['sum_age']) def test_annotate_over_annotate(self): author = Author.objects.annotate( age_alias=F('age') ).annotate( sum_age=Sum('age_alias') ).get(name="Adrian Holovaty") other_author = Author.objects.annotate( sum_age=Sum('age') ).get(name="Adrian Holovaty") self.assertEqual(author.sum_age, other_author.sum_age) def test_annotated_aggregate_over_annotated_aggregate(self): with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(Sum('id__max')) class MyMax(Max): def as_sql(self, compiler, connection): self.set_source_expressions(self.get_source_expressions()[0:1]) return super().as_sql(compiler, connection) with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price')) def test_multi_arg_aggregate(self): class MyMax(Max): output_field = DecimalField() def as_sql(self, compiler, connection): copy = self.copy() copy.set_source_expressions(copy.get_source_expressions()[0:1]) return super(MyMax, copy).as_sql(compiler, connection) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Book.objects.aggregate(MyMax('pages', 'price')) with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'): Book.objects.annotate(MyMax('pages', 'price')) Book.objects.aggregate(max_field=MyMax('pages', 'price')) def test_add_implementation(self): class MySum(Sum): pass # test completely changing how the output is rendered def lower_case_function_override(self, compiler, connection): sql, params = compiler.compile(self.source_expressions[0]) substitutions = {'function': self.function.lower(), 'expressions': sql, 'distinct': ''} substitutions.update(self.extra) return self.template % substitutions, params setattr(MySum, 'as_' + connection.vendor, lower_case_function_override) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test changing the dict and delegating def lower_case_function_super(self, compiler, connection): self.extra['function'] = self.function.lower() return super(MySum, self).as_sql(compiler, connection) setattr(MySum, 'as_' + connection.vendor, lower_case_function_super) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test overriding all parts of the template def be_evil(self, compiler, connection): substitutions = {'function': 'MAX', 'expressions': '2', 'distinct': ''} substitutions.update(self.extra) return self.template % substitutions, () setattr(MySum, 'as_' + connection.vendor, be_evil) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('MAX('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 2) def test_complex_values_aggregation(self): max_rating = Book.objects.values('rating').aggregate( double_max_rating=Max('rating') + Max('rating')) self.assertEqual(max_rating['double_max_rating'], 5 * 2) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') + 5 ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3 + 5}) def test_expression_on_aggregation(self): # Create a plain expression class Greatest(Func): function = 'GREATEST' def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function='MAX', **extra_context) qs = Publisher.objects.annotate( price_or_median=Greatest(Avg('book__rating', output_field=DecimalField()), Avg('book__price')) ).filter(price_or_median__gte=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs, [1, 3, 7, 9], lambda v: v.num_awards) qs2 = Publisher.objects.annotate( rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'), output_field=FloatField()) ).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs2, [1, 3], lambda v: v.num_awards) def test_arguments_must_be_expressions(self): msg = 'QuerySet.aggregate() received non-expression(s): %s.' with self.assertRaisesMessage(TypeError, msg % FloatField()): Book.objects.aggregate(FloatField()) with self.assertRaisesMessage(TypeError, msg % True): Book.objects.aggregate(is_book=True) with self.assertRaisesMessage(TypeError, msg % ', '.join([str(FloatField()), 'True'])): Book.objects.aggregate(FloatField(), Avg('price'), is_book=True) def test_aggregation_subquery_annotation(self): """Subquery annotations are excluded from the GROUP BY if they are not explicitly grouped against.""" latest_book_pubdate_qs = Book.objects.filter( publisher=OuterRef('pk') ).order_by('-pubdate').values('pubdate')[:1] publisher_qs = Publisher.objects.annotate( latest_book_pubdate=Subquery(latest_book_pubdate_qs), ).annotate(count=Count('book')) with self.assertNumQueries(1) as ctx: list(publisher_qs) self.assertEqual(ctx[0]['sql'].count('SELECT'), 2) # The GROUP BY should not be by alias either. self.assertEqual(ctx[0]['sql'].lower().count('latest_book_pubdate'), 1) def test_aggregation_subquery_annotation_exists(self): latest_book_pubdate_qs = Book.objects.filter( publisher=OuterRef('pk') ).order_by('-pubdate').values('pubdate')[:1] publisher_qs = Publisher.objects.annotate( latest_book_pubdate=Subquery(latest_book_pubdate_qs), count=Count('book'), ) self.assertTrue(publisher_qs.exists()) def test_aggregation_exists_annotation(self): published_books = Book.objects.filter(publisher=OuterRef('pk')) publisher_qs = Publisher.objects.annotate( published_book=Exists(published_books), count=Count('book'), ).values_list('name', flat=True) self.assertCountEqual(list(publisher_qs), [ 'Apress', 'Morgan Kaufmann', "Jonno's House of Books", 'Prentice Hall', 'Sams', ]) def test_aggregation_subquery_annotation_values(self): """ Subquery annotations and external aliases are excluded from the GROUP BY if they are not selected. """ books_qs = Book.objects.annotate( first_author_the_same_age=Subquery( Author.objects.filter( age=OuterRef('contact__friends__age'), ).order_by('age').values('id')[:1], ) ).filter( publisher=self.p1, first_author_the_same_age__isnull=False, ).annotate( min_age=Min('contact__friends__age'), ).values('name', 'min_age').order_by('name') self.assertEqual(list(books_qs), [ {'name': 'Practical Django Projects', 'min_age': 34}, { 'name': 'The Definitive Guide to Django: Web Development Done Right', 'min_age': 29, }, ]) def test_aggregation_order_by_not_selected_annotation_values(self): result_asc = [ self.b4.pk, self.b3.pk, self.b1.pk, self.b2.pk, self.b5.pk, self.b6.pk, ] result_desc = result_asc[::-1] tests = [ ('min_related_age', result_asc), ('-min_related_age', result_desc), (F('min_related_age'), result_asc), (F('min_related_age').asc(), result_asc), (F('min_related_age').desc(), result_desc), ] for ordering, expected_result in tests: with self.subTest(ordering=ordering): books_qs = Book.objects.annotate( min_age=Min('authors__age'), ).annotate( min_related_age=Coalesce('min_age', 'contact__age'), ).order_by(ordering).values_list('pk', flat=True) self.assertEqual(list(books_qs), expected_result) @skipUnlessDBFeature('supports_subqueries_in_group_by') def test_group_by_subquery_annotation(self): """ Subquery annotations are included in the GROUP BY if they are grouped against. """ long_books_count_qs = Book.objects.filter( publisher=OuterRef('pk'), pages__gt=400, ).values( 'publisher' ).annotate(count=Count('pk')).values('count') long_books_count_breakdown = Publisher.objects.values_list( Subquery(long_books_count_qs, IntegerField()), ).annotate(total=Count('*')) self.assertEqual(dict(long_books_count_breakdown), {None: 1, 1: 4}) @skipUnlessDBFeature('supports_subqueries_in_group_by') def test_group_by_exists_annotation(self): """ Exists annotations are included in the GROUP BY if they are grouped against. """ long_books_qs = Book.objects.filter( publisher=OuterRef('pk'), pages__gt=800, ) has_long_books_breakdown = Publisher.objects.values_list( Exists(long_books_qs), ).annotate(total=Count('*')) self.assertEqual(dict(has_long_books_breakdown), {True: 2, False: 3}) def test_aggregation_subquery_annotation_related_field(self): publisher = Publisher.objects.create(name=self.a9.name, num_awards=2) book = Book.objects.create( isbn='159059999', name='Test book.', pages=819, rating=2.5, price=Decimal('14.44'), contact=self.a9, publisher=publisher, pubdate=datetime.date(2019, 12, 6), ) book.authors.add(self.a5, self.a6, self.a7) books_qs = Book.objects.annotate( contact_publisher=Subquery( Publisher.objects.filter( pk=OuterRef('publisher'), name=OuterRef('contact__name'), ).values('name')[:1], ) ).filter( contact_publisher__isnull=False, ).annotate(count=Count('authors')) self.assertSequenceEqual(books_qs, [book])
7f954585af10f5e52f297c9a080d919065fa9527b8f57e166f55a70ef7ff9d37
import datetime from django.core import signing from django.test import SimpleTestCase from django.test.utils import freeze_time class TestSigner(SimpleTestCase): def test_signature(self): "signature() method should generate a signature" signer = signing.Signer('predictable-secret') signer2 = signing.Signer('predictable-secret2') for s in ( b'hello', b'3098247:529:087:', '\u2019'.encode(), ): self.assertEqual( signer.signature(s), signing.base64_hmac(signer.salt + 'signer', s, 'predictable-secret') ) self.assertNotEqual(signer.signature(s), signer2.signature(s)) def test_signature_with_salt(self): "signature(value, salt=...) should work" signer = signing.Signer('predictable-secret', salt='extra-salt') self.assertEqual( signer.signature('hello'), signing.base64_hmac('extra-salt' + 'signer', 'hello', 'predictable-secret') ) self.assertNotEqual( signing.Signer('predictable-secret', salt='one').signature('hello'), signing.Signer('predictable-secret', salt='two').signature('hello')) def test_sign_unsign(self): "sign/unsign should be reversible" signer = signing.Signer('predictable-secret') examples = [ 'q;wjmbk;wkmb', '3098247529087', '3098247:529:087:', 'jkw osanteuh ,rcuh nthu aou oauh ,ud du', '\u2019', ] for example in examples: signed = signer.sign(example) self.assertIsInstance(signed, str) self.assertNotEqual(example, signed) self.assertEqual(example, signer.unsign(signed)) def test_sign_unsign_non_string(self): signer = signing.Signer('predictable-secret') values = [ 123, 1.23, True, datetime.date.today(), ] for value in values: with self.subTest(value): signed = signer.sign(value) self.assertIsInstance(signed, str) self.assertNotEqual(signed, value) self.assertEqual(signer.unsign(signed), str(value)) def test_unsign_detects_tampering(self): "unsign should raise an exception if the value has been tampered with" signer = signing.Signer('predictable-secret') value = 'Another string' signed_value = signer.sign(value) transforms = ( lambda s: s.upper(), lambda s: s + 'a', lambda s: 'a' + s[1:], lambda s: s.replace(':', ''), ) self.assertEqual(value, signer.unsign(signed_value)) for transform in transforms: with self.assertRaises(signing.BadSignature): signer.unsign(transform(signed_value)) def test_dumps_loads(self): "dumps and loads be reversible for any JSON serializable object" objects = [ ['a', 'list'], 'a string \u2019', {'a': 'dictionary'}, ] for o in objects: self.assertNotEqual(o, signing.dumps(o)) self.assertEqual(o, signing.loads(signing.dumps(o))) self.assertNotEqual(o, signing.dumps(o, compress=True)) self.assertEqual(o, signing.loads(signing.dumps(o, compress=True))) def test_decode_detects_tampering(self): "loads should raise exception for tampered objects" transforms = ( lambda s: s.upper(), lambda s: s + 'a', lambda s: 'a' + s[1:], lambda s: s.replace(':', ''), ) value = { 'foo': 'bar', 'baz': 1, } encoded = signing.dumps(value) self.assertEqual(value, signing.loads(encoded)) for transform in transforms: with self.assertRaises(signing.BadSignature): signing.loads(transform(encoded)) def test_works_with_non_ascii_keys(self): binary_key = b'\xe7' # Set some binary (non-ASCII key) s = signing.Signer(binary_key) self.assertEqual('foo:6NB0fssLW5RQvZ3Y-MTerq2rX7w', s.sign('foo')) def test_valid_sep(self): separators = ['/', '*sep*', ','] for sep in separators: signer = signing.Signer('predictable-secret', sep=sep) self.assertEqual('foo%ssH9B01cZcJ9FoT_jEVkRkNULrl8' % sep, signer.sign('foo')) def test_invalid_sep(self): """should warn on invalid separator""" msg = 'Unsafe Signer separator: %r (cannot be empty or consist of only A-z0-9-_=)' separators = ['', '-', 'abc'] for sep in separators: with self.assertRaisesMessage(ValueError, msg % sep): signing.Signer(sep=sep) class TestTimestampSigner(SimpleTestCase): def test_timestamp_signer(self): value = 'hello' with freeze_time(123456789): signer = signing.TimestampSigner('predictable-key') ts = signer.sign(value) self.assertNotEqual(ts, signing.Signer('predictable-key').sign(value)) self.assertEqual(signer.unsign(ts), value) with freeze_time(123456800): self.assertEqual(signer.unsign(ts, max_age=12), value) # max_age parameter can also accept a datetime.timedelta object self.assertEqual(signer.unsign(ts, max_age=datetime.timedelta(seconds=11)), value) with self.assertRaises(signing.SignatureExpired): signer.unsign(ts, max_age=10)
ea81564fd2ab214a5fd73fe7f67414b96dda2d33155831eece4380f6334db59a
from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.db import models __all__ = ('Link', 'Place', 'Restaurant', 'Person', 'Address', 'CharLink', 'TextLink', 'OddRelation1', 'OddRelation2', 'Contact', 'Organization', 'Note', 'Company') class Link(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey() def __str__(self): return "Link to %s id=%s" % (self.content_type, self.object_id) class LinkProxy(Link): class Meta: proxy = True class Place(models.Model): name = models.CharField(max_length=100) links = GenericRelation(Link, related_query_name='places') link_proxy = GenericRelation(LinkProxy) def __str__(self): return "Place: %s" % self.name class Restaurant(Place): def __str__(self): return "Restaurant: %s" % self.name class Cafe(Restaurant): def __str__(self): return "Cafe: %s" % self.name class Address(models.Model): street = models.CharField(max_length=80) city = models.CharField(max_length=50) state = models.CharField(max_length=2) zipcode = models.CharField(max_length=5) content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey() def __str__(self): return '%s %s, %s %s' % (self.street, self.city, self.state, self.zipcode) class Person(models.Model): account = models.IntegerField(primary_key=True) name = models.CharField(max_length=128) addresses = GenericRelation(Address) def __str__(self): return self.name class CharLink(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.CharField(max_length=100) content_object = GenericForeignKey() class TextLink(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.TextField() content_object = GenericForeignKey() class OddRelation1(models.Model): name = models.CharField(max_length=100) clinks = GenericRelation(CharLink) class OddRelation2(models.Model): name = models.CharField(max_length=100) tlinks = GenericRelation(TextLink) # models for test_q_object_or: class Note(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey() note = models.TextField() class Contact(models.Model): notes = GenericRelation(Note) class Organization(models.Model): name = models.CharField(max_length=255) contacts = models.ManyToManyField(Contact, related_name='organizations') class Company(models.Model): name = models.CharField(max_length=100) links = GenericRelation(Link) def __str__(self): return "Company: %s" % self.name # For testing #13085 fix, we also use Note model defined above class Developer(models.Model): name = models.CharField(max_length=15) class Team(models.Model): name = models.CharField(max_length=15) members = models.ManyToManyField(Developer) def __str__(self): return "%s team" % self.name def __len__(self): return self.members.count() class Guild(models.Model): name = models.CharField(max_length=15) members = models.ManyToManyField(Developer) def __bool__(self): return False class Tag(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='g_r_r_tags') object_id = models.CharField(max_length=15) content_object = GenericForeignKey() label = models.CharField(max_length=15) class Board(models.Model): name = models.CharField(primary_key=True, max_length=15) class SpecialGenericRelation(GenericRelation): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.editable = True self.save_form_data_calls = 0 def save_form_data(self, *args, **kwargs): self.save_form_data_calls += 1 class HasLinks(models.Model): links = SpecialGenericRelation(Link, related_query_name='targets') class Meta: abstract = True class HasLinkThing(HasLinks): pass class A(models.Model): flag = models.BooleanField(null=True) content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey('content_type', 'object_id') class B(models.Model): a = GenericRelation(A) class Meta: ordering = ('id',) class C(models.Model): b = models.ForeignKey(B, models.CASCADE) class Meta: ordering = ('id',) class D(models.Model): b = models.ForeignKey(B, models.SET_NULL, null=True) class Meta: ordering = ('id',) # Ticket #22998 class Node(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content = GenericForeignKey('content_type', 'object_id') class Content(models.Model): nodes = GenericRelation(Node) related_obj = models.ForeignKey('Related', models.CASCADE) class Related(models.Model): pass def prevent_deletes(sender, instance, **kwargs): raise models.ProtectedError("Not allowed to delete.", [instance]) models.signals.pre_delete.connect(prevent_deletes, sender=Node)
e9921be49ed128e871c6aa88c4656db04fb63956a539f2c4a27afe21052c5683
""" Test PostgreSQL full text search. These tests use dialogue from the 1975 film Monty Python and the Holy Grail. All text copyright Python (Monty) Pictures. Thanks to sacred-texts.com for the transcript. """ from django.contrib.postgres.search import ( SearchQuery, SearchRank, SearchVector, ) from django.db import connection from django.db.models import F from django.test import SimpleTestCase, modify_settings, skipUnlessDBFeature from . import PostgreSQLTestCase from .models import Character, Line, Scene class GrailTestData: @classmethod def setUpTestData(cls): cls.robin = Scene.objects.create(scene='Scene 10', setting='The dark forest of Ewing') cls.minstrel = Character.objects.create(name='Minstrel') verses = [ ( 'Bravely bold Sir Robin, rode forth from Camelot. ' 'He was not afraid to die, o Brave Sir Robin. ' 'He was not at all afraid to be killed in nasty ways. ' 'Brave, brave, brave, brave Sir Robin!' ), ( 'He was not in the least bit scared to be mashed into a pulp, ' 'Or to have his eyes gouged out, and his elbows broken. ' 'To have his kneecaps split, and his body burned away, ' 'And his limbs all hacked and mangled, brave Sir Robin!' ), ( 'His head smashed in and his heart cut out, ' 'And his liver removed and his bowels unplugged, ' 'And his nostrils ripped and his bottom burned off,' 'And his --' ), ] cls.verses = [Line.objects.create( scene=cls.robin, character=cls.minstrel, dialogue=verse, ) for verse in verses] cls.verse0, cls.verse1, cls.verse2 = cls.verses cls.witch_scene = Scene.objects.create(scene='Scene 5', setting="Sir Bedemir's Castle") bedemir = Character.objects.create(name='Bedemir') crowd = Character.objects.create(name='Crowd') witch = Character.objects.create(name='Witch') duck = Character.objects.create(name='Duck') cls.bedemir0 = Line.objects.create( scene=cls.witch_scene, character=bedemir, dialogue='We shall use my larger scales!', dialogue_config='english', ) cls.bedemir1 = Line.objects.create( scene=cls.witch_scene, character=bedemir, dialogue='Right, remove the supports!', dialogue_config='english', ) cls.duck = Line.objects.create(scene=cls.witch_scene, character=duck, dialogue=None) cls.crowd = Line.objects.create(scene=cls.witch_scene, character=crowd, dialogue='A witch! A witch!') cls.witch = Line.objects.create(scene=cls.witch_scene, character=witch, dialogue="It's a fair cop.") trojan_rabbit = Scene.objects.create(scene='Scene 8', setting="The castle of Our Master Ruiz' de lu la Ramper") guards = Character.objects.create(name='French Guards') cls.french = Line.objects.create( scene=trojan_rabbit, character=guards, dialogue='Oh. Un beau cadeau. Oui oui.', dialogue_config='french', ) @modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'}) class SimpleSearchTest(GrailTestData, PostgreSQLTestCase): def test_simple(self): searched = Line.objects.filter(dialogue__search='elbows') self.assertSequenceEqual(searched, [self.verse1]) def test_non_exact_match(self): searched = Line.objects.filter(dialogue__search='hearts') self.assertSequenceEqual(searched, [self.verse2]) def test_search_two_terms(self): searched = Line.objects.filter(dialogue__search='heart bowel') self.assertSequenceEqual(searched, [self.verse2]) def test_search_two_terms_with_partial_match(self): searched = Line.objects.filter(dialogue__search='Robin killed') self.assertSequenceEqual(searched, [self.verse0]) @modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'}) class SearchVectorFieldTest(GrailTestData, PostgreSQLTestCase): def test_existing_vector(self): Line.objects.update(dialogue_search_vector=SearchVector('dialogue')) searched = Line.objects.filter(dialogue_search_vector=SearchQuery('Robin killed')) self.assertSequenceEqual(searched, [self.verse0]) def test_existing_vector_config_explicit(self): Line.objects.update(dialogue_search_vector=SearchVector('dialogue')) searched = Line.objects.filter(dialogue_search_vector=SearchQuery('cadeaux', config='french')) self.assertSequenceEqual(searched, [self.french]) def test_single_coalesce_expression(self): searched = Line.objects.annotate(search=SearchVector('dialogue')).filter(search='cadeaux') self.assertNotIn('COALESCE(COALESCE', str(searched.query)) class MultipleFieldsTest(GrailTestData, PostgreSQLTestCase): def test_simple_on_dialogue(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='elbows') self.assertSequenceEqual(searched, [self.verse1]) def test_simple_on_scene(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='Forest') self.assertCountEqual(searched, self.verses) def test_non_exact_match(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='heart') self.assertSequenceEqual(searched, [self.verse2]) def test_search_two_terms(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='heart forest') self.assertSequenceEqual(searched, [self.verse2]) def test_terms_adjacent(self): searched = Line.objects.annotate( search=SearchVector('character__name', 'dialogue'), ).filter(search='minstrel') self.assertCountEqual(searched, self.verses) searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='minstrelbravely') self.assertSequenceEqual(searched, []) def test_search_with_null(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search='bedemir') self.assertCountEqual(searched, [self.bedemir0, self.bedemir1, self.crowd, self.witch, self.duck]) def test_search_with_non_text(self): searched = Line.objects.annotate( search=SearchVector('id'), ).filter(search=str(self.crowd.id)) self.assertSequenceEqual(searched, [self.crowd]) @skipUnlessDBFeature('has_phraseto_tsquery') def test_phrase_search(self): line_qs = Line.objects.annotate(search=SearchVector('dialogue')) searched = line_qs.filter(search=SearchQuery('burned body his away', search_type='phrase')) self.assertSequenceEqual(searched, []) searched = line_qs.filter(search=SearchQuery('his body burned away', search_type='phrase')) self.assertSequenceEqual(searched, [self.verse1]) @skipUnlessDBFeature('has_phraseto_tsquery') def test_phrase_search_with_config(self): line_qs = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue', config='french'), ) searched = line_qs.filter( search=SearchQuery('cadeau beau un', search_type='phrase', config='french'), ) self.assertSequenceEqual(searched, []) searched = line_qs.filter( search=SearchQuery('un beau cadeau', search_type='phrase', config='french'), ) self.assertSequenceEqual(searched, [self.french]) def test_raw_search(self): line_qs = Line.objects.annotate(search=SearchVector('dialogue')) searched = line_qs.filter(search=SearchQuery('Robin', search_type='raw')) self.assertCountEqual(searched, [self.verse0, self.verse1]) searched = line_qs.filter(search=SearchQuery("Robin & !'Camelot'", search_type='raw')) self.assertSequenceEqual(searched, [self.verse1]) def test_raw_search_with_config(self): line_qs = Line.objects.annotate(search=SearchVector('dialogue', config='french')) searched = line_qs.filter( search=SearchQuery("'cadeaux' & 'beaux'", search_type='raw', config='french'), ) self.assertSequenceEqual(searched, [self.french]) @skipUnlessDBFeature('has_websearch_to_tsquery') def test_web_search(self): line_qs = Line.objects.annotate(search=SearchVector('dialogue')) searched = line_qs.filter( search=SearchQuery( '"burned body" "split kneecaps"', search_type='websearch', ), ) self.assertSequenceEqual(searched, []) searched = line_qs.filter( search=SearchQuery( '"body burned" "kneecaps split" -"nostrils"', search_type='websearch', ), ) self.assertSequenceEqual(searched, [self.verse1]) searched = line_qs.filter( search=SearchQuery( '"Sir Robin" ("kneecaps" OR "Camelot")', search_type='websearch', ), ) self.assertSequenceEqual(searched, [self.verse0, self.verse1]) @skipUnlessDBFeature('has_websearch_to_tsquery') def test_web_search_with_config(self): line_qs = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue', config='french'), ) searched = line_qs.filter( search=SearchQuery('cadeau -beau', search_type='websearch', config='french'), ) self.assertSequenceEqual(searched, []) searched = line_qs.filter( search=SearchQuery('beau cadeau', search_type='websearch', config='french'), ) self.assertSequenceEqual(searched, [self.french]) def test_bad_search_type(self): with self.assertRaisesMessage(ValueError, "Unknown search_type argument 'foo'."): SearchQuery('kneecaps', search_type='foo') def test_config_query_explicit(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue', config='french'), ).filter(search=SearchQuery('cadeaux', config='french')) self.assertSequenceEqual(searched, [self.french]) def test_config_query_implicit(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue', config='french'), ).filter(search='cadeaux') self.assertSequenceEqual(searched, [self.french]) def test_config_from_field_explicit(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue', config=F('dialogue_config')), ).filter(search=SearchQuery('cadeaux', config=F('dialogue_config'))) self.assertSequenceEqual(searched, [self.french]) def test_config_from_field_implicit(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue', config=F('dialogue_config')), ).filter(search='cadeaux') self.assertSequenceEqual(searched, [self.french]) @modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'}) class TestCombinations(GrailTestData, PostgreSQLTestCase): def test_vector_add(self): searched = Line.objects.annotate( search=SearchVector('scene__setting') + SearchVector('character__name'), ).filter(search='bedemir') self.assertCountEqual(searched, [self.bedemir0, self.bedemir1, self.crowd, self.witch, self.duck]) def test_vector_add_multi(self): searched = Line.objects.annotate( search=( SearchVector('scene__setting') + SearchVector('character__name') + SearchVector('dialogue') ), ).filter(search='bedemir') self.assertCountEqual(searched, [self.bedemir0, self.bedemir1, self.crowd, self.witch, self.duck]) def test_vector_combined_mismatch(self): msg = ( 'SearchVector can only be combined with other SearchVector ' 'instances, got NoneType.' ) with self.assertRaisesMessage(TypeError, msg): Line.objects.filter(dialogue__search=None + SearchVector('character__name')) def test_query_and(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search=SearchQuery('bedemir') & SearchQuery('scales')) self.assertSequenceEqual(searched, [self.bedemir0]) def test_query_multiple_and(self): searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search=SearchQuery('bedemir') & SearchQuery('scales') & SearchQuery('nostrils')) self.assertSequenceEqual(searched, []) searched = Line.objects.annotate( search=SearchVector('scene__setting', 'dialogue'), ).filter(search=SearchQuery('shall') & SearchQuery('use') & SearchQuery('larger')) self.assertSequenceEqual(searched, [self.bedemir0]) def test_query_or(self): searched = Line.objects.filter(dialogue__search=SearchQuery('kneecaps') | SearchQuery('nostrils')) self.assertCountEqual(searched, [self.verse1, self.verse2]) def test_query_multiple_or(self): searched = Line.objects.filter( dialogue__search=SearchQuery('kneecaps') | SearchQuery('nostrils') | SearchQuery('Sir Robin') ) self.assertCountEqual(searched, [self.verse1, self.verse2, self.verse0]) def test_query_invert(self): searched = Line.objects.filter(character=self.minstrel, dialogue__search=~SearchQuery('kneecaps')) self.assertCountEqual(searched, [self.verse0, self.verse2]) def test_combine_different_configs(self): searched = Line.objects.filter( dialogue__search=( SearchQuery('cadeau', config='french') | SearchQuery('nostrils', config='english') ) ) self.assertCountEqual(searched, [self.french, self.verse2]) @skipUnlessDBFeature('has_phraseto_tsquery') def test_combine_raw_phrase(self): searched = Line.objects.filter( dialogue__search=( SearchQuery('burn:*', search_type='raw', config='simple') | SearchQuery('rode forth from Camelot', search_type='phrase') ) ) self.assertCountEqual(searched, [self.verse0, self.verse1, self.verse2]) def test_query_combined_mismatch(self): msg = ( 'SearchQuery can only be combined with other SearchQuery ' 'instances, got NoneType.' ) with self.assertRaisesMessage(TypeError, msg): Line.objects.filter(dialogue__search=None | SearchQuery('kneecaps')) with self.assertRaisesMessage(TypeError, msg): Line.objects.filter(dialogue__search=None & SearchQuery('kneecaps')) @modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'}) class TestRankingAndWeights(GrailTestData, PostgreSQLTestCase): def test_ranking(self): searched = Line.objects.filter(character=self.minstrel).annotate( rank=SearchRank(SearchVector('dialogue'), SearchQuery('brave sir robin')), ).order_by('rank') self.assertSequenceEqual(searched, [self.verse2, self.verse1, self.verse0]) def test_rank_passing_untyped_args(self): searched = Line.objects.filter(character=self.minstrel).annotate( rank=SearchRank('dialogue', 'brave sir robin'), ).order_by('rank') self.assertSequenceEqual(searched, [self.verse2, self.verse1, self.verse0]) def test_weights_in_vector(self): vector = SearchVector('dialogue', weight='A') + SearchVector('character__name', weight='D') searched = Line.objects.filter(scene=self.witch_scene).annotate( rank=SearchRank(vector, SearchQuery('witch')), ).order_by('-rank')[:2] self.assertSequenceEqual(searched, [self.crowd, self.witch]) vector = SearchVector('dialogue', weight='D') + SearchVector('character__name', weight='A') searched = Line.objects.filter(scene=self.witch_scene).annotate( rank=SearchRank(vector, SearchQuery('witch')), ).order_by('-rank')[:2] self.assertSequenceEqual(searched, [self.witch, self.crowd]) def test_ranked_custom_weights(self): vector = SearchVector('dialogue', weight='D') + SearchVector('character__name', weight='A') searched = Line.objects.filter(scene=self.witch_scene).annotate( rank=SearchRank(vector, SearchQuery('witch'), weights=[1, 0, 0, 0.5]), ).order_by('-rank')[:2] self.assertSequenceEqual(searched, [self.crowd, self.witch]) def test_ranking_chaining(self): searched = Line.objects.filter(character=self.minstrel).annotate( rank=SearchRank(SearchVector('dialogue'), SearchQuery('brave sir robin')), ).filter(rank__gt=0.3) self.assertSequenceEqual(searched, [self.verse0]) class SearchVectorIndexTests(PostgreSQLTestCase): def test_search_vector_index(self): """SearchVector generates IMMUTABLE SQL in order to be indexable.""" # This test should be moved to test_indexes and use a functional # index instead once support lands (see #26167). query = Line.objects.all().query resolved = SearchVector('id', 'dialogue', config='english').resolve_expression(query) compiler = query.get_compiler(connection.alias) sql, params = resolved.as_sql(compiler, connection) # Indexed function must be IMMUTABLE. with connection.cursor() as cursor: cursor.execute( 'CREATE INDEX search_vector_index ON %s USING GIN (%s)' % (Line._meta.db_table, sql), params, ) class SearchQueryTests(SimpleTestCase): def test_str(self): tests = ( (~SearchQuery('a'), '~SearchQuery(a)'), ( (SearchQuery('a') | SearchQuery('b')) & (SearchQuery('c') | SearchQuery('d')), '((SearchQuery(a) || SearchQuery(b)) && (SearchQuery(c) || SearchQuery(d)))', ), ( SearchQuery('a') & (SearchQuery('b') | SearchQuery('c')), '(SearchQuery(a) && (SearchQuery(b) || SearchQuery(c)))', ), ( (SearchQuery('a') | SearchQuery('b')) & SearchQuery('c'), '((SearchQuery(a) || SearchQuery(b)) && SearchQuery(c))' ), ( SearchQuery('a') & (SearchQuery('b') & (SearchQuery('c') | SearchQuery('d'))), '(SearchQuery(a) && (SearchQuery(b) && (SearchQuery(c) || SearchQuery(d))))', ), ) for query, expected_str in tests: with self.subTest(query=query): self.assertEqual(str(query), expected_str)
1ee2de84509bcb63f63a4d20e611246be8baa5bacc5f483e2405fe9807288d9f
import unittest from migrations.test_base import OperationTestBase from django.db import NotSupportedError, connection from django.db.models import Index from django.test import modify_settings try: from django.contrib.postgres.operations import ( AddIndexConcurrently, RemoveIndexConcurrently, ) from django.contrib.postgres.indexes import BrinIndex, BTreeIndex except ImportError: pass @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.') @modify_settings(INSTALLED_APPS={'append': 'migrations'}) class AddIndexConcurrentlyTests(OperationTestBase): app_label = 'test_add_concurrently' def test_requires_atomic_false(self): project_state = self.set_up_test_model(self.app_label) new_state = project_state.clone() operation = AddIndexConcurrently( 'Pony', Index(fields=['pink'], name='pony_pink_idx'), ) msg = ( 'The AddIndexConcurrently operation cannot be executed inside ' 'a transaction (set atomic = False on the migration).' ) with self.assertRaisesMessage(NotSupportedError, msg): with connection.schema_editor(atomic=True) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) def test_add(self): project_state = self.set_up_test_model(self.app_label, index=False) table_name = '%s_pony' % self.app_label index = Index(fields=['pink'], name='pony_pink_idx') new_state = project_state.clone() operation = AddIndexConcurrently('Pony', index) self.assertEqual( operation.describe(), 'Concurrently create index pony_pink_idx on field(s) pink of ' 'model Pony' ) operation.state_forwards(self.app_label, new_state) self.assertEqual(len(new_state.models[self.app_label, 'pony'].options['indexes']), 1) self.assertIndexNotExists(table_name, ['pink']) # Add index. with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertIndexExists(table_name, ['pink']) # Reversal. with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertIndexNotExists(table_name, ['pink']) # Deconstruction. name, args, kwargs = operation.deconstruct() self.assertEqual(name, 'AddIndexConcurrently') self.assertEqual(args, []) self.assertEqual(kwargs, {'model_name': 'Pony', 'index': index}) def test_add_other_index_type(self): project_state = self.set_up_test_model(self.app_label, index=False) table_name = '%s_pony' % self.app_label new_state = project_state.clone() operation = AddIndexConcurrently( 'Pony', BrinIndex(fields=['pink'], name='pony_pink_brin_idx'), ) self.assertIndexNotExists(table_name, ['pink']) # Add index. with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertIndexExists(table_name, ['pink'], index_type='brin') # Reversal. with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertIndexNotExists(table_name, ['pink']) def test_add_with_options(self): project_state = self.set_up_test_model(self.app_label, index=False) table_name = '%s_pony' % self.app_label new_state = project_state.clone() index = BTreeIndex(fields=['pink'], name='pony_pink_btree_idx', fillfactor=70) operation = AddIndexConcurrently('Pony', index) self.assertIndexNotExists(table_name, ['pink']) # Add index. with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertIndexExists(table_name, ['pink'], index_type='btree') # Reversal. with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertIndexNotExists(table_name, ['pink']) @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.') @modify_settings(INSTALLED_APPS={'append': 'migrations'}) class RemoveIndexConcurrentlyTests(OperationTestBase): app_label = 'test_rm_concurrently' def test_requires_atomic_false(self): project_state = self.set_up_test_model(self.app_label, index=True) new_state = project_state.clone() operation = RemoveIndexConcurrently('Pony', 'pony_pink_idx') msg = ( 'The RemoveIndexConcurrently operation cannot be executed inside ' 'a transaction (set atomic = False on the migration).' ) with self.assertRaisesMessage(NotSupportedError, msg): with connection.schema_editor(atomic=True) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) def test_remove(self): project_state = self.set_up_test_model(self.app_label, index=True) table_name = '%s_pony' % self.app_label self.assertTableExists(table_name) new_state = project_state.clone() operation = RemoveIndexConcurrently('Pony', 'pony_pink_idx') self.assertEqual( operation.describe(), 'Concurrently remove index pony_pink_idx from Pony', ) operation.state_forwards(self.app_label, new_state) self.assertEqual(len(new_state.models[self.app_label, 'pony'].options['indexes']), 0) self.assertIndexExists(table_name, ['pink']) # Remove index. with connection.schema_editor(atomic=False) as editor: operation.database_forwards(self.app_label, editor, project_state, new_state) self.assertIndexNotExists(table_name, ['pink']) # Reversal. with connection.schema_editor(atomic=False) as editor: operation.database_backwards(self.app_label, editor, new_state, project_state) self.assertIndexExists(table_name, ['pink']) # Deconstruction. name, args, kwargs = operation.deconstruct() self.assertEqual(name, 'RemoveIndexConcurrently') self.assertEqual(args, []) self.assertEqual(kwargs, {'model_name': 'Pony', 'name': 'pony_pink_idx'})
19a77e2068666f494174590df108447ae1d62039113259a7a9e13a4e9c3fa31d
import datetime from unittest import mock from django.db import IntegrityError, connection, transaction from django.db.models import CheckConstraint, F, Func, Q from django.utils import timezone from . import PostgreSQLTestCase from .models import HotelReservation, RangesModel, Room try: from django.contrib.postgres.constraints import ExclusionConstraint from django.contrib.postgres.fields import DateTimeRangeField, RangeBoundary, RangeOperators from psycopg2.extras import DateRange, NumericRange except ImportError: pass class SchemaTests(PostgreSQLTestCase): def get_constraints(self, table): """Get the constraints on the table using a new cursor.""" with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def test_check_constraint_range_value(self): constraint_name = 'ints_between' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(ints__contained_by=NumericRange(10, 30)), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(20, 50)) RangesModel.objects.create(ints=(10, 30)) def test_check_constraint_daterange_contains(self): constraint_name = 'dates_contains' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(dates__contains=F('dates_inner')), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) date_1 = datetime.date(2016, 1, 1) date_2 = datetime.date(2016, 1, 4) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create( dates=(date_1, date_2), dates_inner=(date_1, date_2.replace(day=5)), ) RangesModel.objects.create( dates=(date_1, date_2), dates_inner=(date_1, date_2), ) def test_check_constraint_datetimerange_contains(self): constraint_name = 'timestamps_contains' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = CheckConstraint( check=Q(timestamps__contains=F('timestamps_inner')), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) datetime_1 = datetime.datetime(2016, 1, 1) datetime_2 = datetime.datetime(2016, 1, 2, 12) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create( timestamps=(datetime_1, datetime_2), timestamps_inner=(datetime_1, datetime_2.replace(hour=13)), ) RangesModel.objects.create( timestamps=(datetime_1, datetime_2), timestamps_inner=(datetime_1, datetime_2), ) class ExclusionConstraintTests(PostgreSQLTestCase): def get_constraints(self, table): """Get the constraints on the table using a new cursor.""" with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def test_invalid_condition(self): msg = 'ExclusionConstraint.condition must be a Q instance.' with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='GIST', name='exclude_invalid_condition', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], condition=F('invalid'), ) def test_invalid_index_type(self): msg = 'Exclusion constraints only support GiST or SP-GiST indexes.' with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='gin', name='exclude_invalid_index_type', expressions=[(F('datespan'), RangeOperators.OVERLAPS)], ) def test_invalid_expressions(self): msg = 'The expressions must be a list of 2-tuples.' for expressions in (['foo'], [('foo')], [('foo_1', 'foo_2', 'foo_3')]): with self.subTest(expressions), self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='GIST', name='exclude_invalid_expressions', expressions=expressions, ) def test_empty_expressions(self): msg = 'At least one expression is required to define an exclusion constraint.' for empty_expressions in (None, []): with self.subTest(empty_expressions), self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type='GIST', name='exclude_empty_expressions', expressions=empty_expressions, ) def test_repr(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[ (F('datespan'), RangeOperators.OVERLAPS), (F('room'), RangeOperators.EQUAL), ], ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type=GIST, expressions=[" "(F(datespan), '&&'), (F(room), '=')]>", ) constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)], condition=Q(cancelled=False), index_type='SPGiST', ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type=SPGiST, expressions=[" "(F(datespan), '-|-')], condition=(AND: ('cancelled', False))>", ) def test_eq(self): constraint_1 = ExclusionConstraint( name='exclude_overlapping', expressions=[ (F('datespan'), RangeOperators.OVERLAPS), (F('room'), RangeOperators.EQUAL), ], condition=Q(cancelled=False), ) constraint_2 = ExclusionConstraint( name='exclude_overlapping', expressions=[ ('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL), ], ) constraint_3 = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS)], condition=Q(cancelled=False), ) self.assertEqual(constraint_1, constraint_1) self.assertEqual(constraint_1, mock.ANY) self.assertNotEqual(constraint_1, constraint_2) self.assertNotEqual(constraint_1, constraint_3) self.assertNotEqual(constraint_2, constraint_3) self.assertNotEqual(constraint_1, object()) def test_deconstruct(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], }) def test_deconstruct_index_type(self): constraint = ExclusionConstraint( name='exclude_overlapping', index_type='SPGIST', expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'index_type': 'SPGIST', 'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], }) def test_deconstruct_condition(self): constraint = ExclusionConstraint( name='exclude_overlapping', expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], condition=Q(cancelled=False), ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'name': 'exclude_overlapping', 'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)], 'condition': Q(cancelled=False), }) def _test_range_overlaps(self, constraint): # Create exclusion constraint. self.assertNotIn(constraint.name, self.get_constraints(HotelReservation._meta.db_table)) with connection.schema_editor() as editor: editor.add_constraint(HotelReservation, constraint) self.assertIn(constraint.name, self.get_constraints(HotelReservation._meta.db_table)) # Add initial reservations. room101 = Room.objects.create(number=101) room102 = Room.objects.create(number=102) datetimes = [ timezone.datetime(2018, 6, 20), timezone.datetime(2018, 6, 24), timezone.datetime(2018, 6, 26), timezone.datetime(2018, 6, 28), timezone.datetime(2018, 6, 29), ] HotelReservation.objects.create( datespan=DateRange(datetimes[0].date(), datetimes[1].date()), start=datetimes[0], end=datetimes[1], room=room102, ) HotelReservation.objects.create( datespan=DateRange(datetimes[1].date(), datetimes[3].date()), start=datetimes[1], end=datetimes[3], room=room102, ) # Overlap dates. with self.assertRaises(IntegrityError), transaction.atomic(): reservation = HotelReservation( datespan=(datetimes[1].date(), datetimes[2].date()), start=datetimes[1], end=datetimes[2], room=room102, ) reservation.save() # Valid range. HotelReservation.objects.bulk_create([ # Other room. HotelReservation( datespan=(datetimes[1].date(), datetimes[2].date()), start=datetimes[1], end=datetimes[2], room=room101, ), # Cancelled reservation. HotelReservation( datespan=(datetimes[1].date(), datetimes[1].date()), start=datetimes[1], end=datetimes[2], room=room102, cancelled=True, ), # Other adjacent dates. HotelReservation( datespan=(datetimes[3].date(), datetimes[4].date()), start=datetimes[3], end=datetimes[4], room=room102, ), ]) def test_range_overlaps_custom(self): class TsTzRange(Func): function = 'TSTZRANGE' output_field = DateTimeRangeField() constraint = ExclusionConstraint( name='exclude_overlapping_reservations_custom', expressions=[ (TsTzRange('start', 'end', RangeBoundary()), RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL) ], condition=Q(cancelled=False), ) self._test_range_overlaps(constraint) def test_range_overlaps(self): constraint = ExclusionConstraint( name='exclude_overlapping_reservations', expressions=[ (F('datespan'), RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL) ], condition=Q(cancelled=False), ) self._test_range_overlaps(constraint) def test_range_adjacent(self): constraint_name = 'ints_adjacent' self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[('ints', RangeOperators.ADJACENT_TO)], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) RangesModel.objects.create(ints=(20, 50)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(10, 20)) RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60))
1da69f5056bce57e3b32abf7e9e815733a1d8a4199e7d7e5e447b27b1b403298
# Unit tests for cache framework # Uses whatever cache backend is set in the test settings file. import copy import io import os import pickle import re import shutil import tempfile import threading import time import unittest from pathlib import Path from unittest import mock from django.conf import settings from django.core import management, signals from django.core.cache import ( DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches, ) from django.core.cache.utils import make_template_fragment_key from django.db import close_old_connections, connection, connections from django.http import ( HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse, ) from django.middleware.cache import ( CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware, ) from django.middleware.csrf import CsrfViewMiddleware from django.template import engines from django.template.context_processors import csrf from django.template.response import TemplateResponse from django.test import ( RequestFactory, SimpleTestCase, TestCase, TransactionTestCase, override_settings, ) from django.test.signals import setting_changed from django.utils import timezone, translation from django.utils.cache import ( get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers, ) from django.views.decorators.cache import cache_control, cache_page from .models import Poll, expensive_calculation # functions/classes for complex data type tests def f(): return 42 class C: def m(n): return 24 class Unpicklable: def __getstate__(self): raise pickle.PickleError() KEY_ERRORS_WITH_MEMCACHED_MSG = ( 'Cache key contains characters that will cause errors if used with ' 'memcached: %r' ) @override_settings(CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', } }) class DummyCacheTests(SimpleTestCase): # The Dummy cache backend doesn't really behave like a test backend, # so it has its own test case. def test_simple(self): "Dummy cache backend ignores cache set calls" cache.set("key", "value") self.assertIsNone(cache.get("key")) def test_add(self): "Add doesn't do anything in dummy cache backend" self.assertIs(cache.add("addkey1", "value"), True) self.assertIs(cache.add("addkey1", "newvalue"), True) self.assertIsNone(cache.get("addkey1")) def test_non_existent(self): "Nonexistent keys aren't found in the dummy cache backend" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): "get_many returns nothing for the dummy cache backend" cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}) self.assertEqual(cache.get_many(['a', 'c', 'd']), {}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {}) def test_get_many_invalid_key(self): msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces' with self.assertWarnsMessage(CacheKeyWarning, msg): cache.get_many(['key with spaces']) def test_delete(self): "Cache deletion is transparently ignored on the dummy cache backend" cache.set_many({'key1': 'spam', 'key2': 'eggs'}) self.assertIsNone(cache.get("key1")) self.assertIs(cache.delete("key1"), False) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_has_key(self): "The has_key method doesn't ever return True for the dummy cache backend" cache.set("hello1", "goodbye1") self.assertIs(cache.has_key("hello1"), False) self.assertIs(cache.has_key("goodbye1"), False) def test_in(self): "The in operator doesn't ever return True for the dummy cache backend" cache.set("hello2", "goodbye2") self.assertNotIn("hello2", cache) self.assertNotIn("goodbye2", cache) def test_incr(self): "Dummy cache values can't be incremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.incr('answer') with self.assertRaises(ValueError): cache.incr('does_not_exist') def test_decr(self): "Dummy cache values can't be decremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.decr('answer') with self.assertRaises(ValueError): cache.decr('does_not_exist') def test_touch(self): """Dummy cache can't do touch().""" self.assertIs(cache.touch('whatever'), False) def test_data_types(self): "All data types are ignored equally by the dummy cache" stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertIsNone(cache.get("stuff")) def test_expiration(self): "Expiration has no effect on the dummy cache" cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) self.assertIs(cache.add("expire2", "newvalue"), True) self.assertIsNone(cache.get("expire2")) self.assertIs(cache.has_key("expire3"), False) def test_unicode(self): "Unicode values are ignored by the dummy cache" stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } for (key, value) in stuff.items(): with self.subTest(key=key): cache.set(key, value) self.assertIsNone(cache.get(key)) def test_set_many(self): "set_many does nothing for the dummy cache backend" self.assertEqual(cache.set_many({'a': 1, 'b': 2}), []) self.assertEqual(cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1'), []) def test_set_many_invalid_key(self): msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces' with self.assertWarnsMessage(CacheKeyWarning, msg): cache.set_many({'key with spaces': 'foo'}) def test_delete_many(self): "delete_many does nothing for the dummy cache backend" cache.delete_many(['a', 'b']) def test_delete_many_invalid_key(self): msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces' with self.assertWarnsMessage(CacheKeyWarning, msg): cache.delete_many({'key with spaces': 'foo'}) def test_clear(self): "clear does nothing for the dummy cache backend" cache.clear() def test_incr_version(self): "Dummy cache versions can't be incremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.incr_version('answer') with self.assertRaises(ValueError): cache.incr_version('does_not_exist') def test_decr_version(self): "Dummy cache versions can't be decremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.decr_version('answer') with self.assertRaises(ValueError): cache.decr_version('does_not_exist') def test_get_or_set(self): self.assertEqual(cache.get_or_set('mykey', 'default'), 'default') self.assertIsNone(cache.get_or_set('mykey', None)) def test_get_or_set_callable(self): def my_callable(): return 'default' self.assertEqual(cache.get_or_set('mykey', my_callable), 'default') self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default') def custom_key_func(key, key_prefix, version): "A customized cache key function" return 'CUSTOM-' + '-'.join([key_prefix, str(version), key]) _caches_setting_base = { 'default': {}, 'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())}, 'v2': {'VERSION': 2}, 'custom_key': {'KEY_FUNCTION': custom_key_func}, 'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'}, 'cull': {'OPTIONS': {'MAX_ENTRIES': 30}}, 'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}}, } def caches_setting_for_tests(base=None, exclude=None, **params): # `base` is used to pull in the memcached config from the original settings, # `exclude` is a set of cache names denoting which `_caches_setting_base` keys # should be omitted. # `params` are test specific overrides and `_caches_settings_base` is the # base config for the tests. # This results in the following search order: # params -> _caches_setting_base -> base base = base or {} exclude = exclude or set() setting = {k: base.copy() for k in _caches_setting_base if k not in exclude} for key, cache_params in setting.items(): cache_params.update(_caches_setting_base[key]) cache_params.update(params) return setting class BaseCacheTests: # A common set of tests to apply to all cache backends factory = RequestFactory() def tearDown(self): cache.clear() def test_simple(self): # Simple cache set/get works cache.set("key", "value") self.assertEqual(cache.get("key"), "value") def test_default_used_when_none_is_set(self): """If None is cached, get() returns it instead of the default.""" cache.set('key_default_none', None) self.assertIsNone(cache.get('key_default_none', default='default')) def test_add(self): # A key can be added to a cache self.assertIs(cache.add("addkey1", "value"), True) self.assertIs(cache.add("addkey1", "newvalue"), False) self.assertEqual(cache.get("addkey1"), "value") def test_prefix(self): # Test for same cache key conflicts between shared backend cache.set('somekey', 'value') # should not be set in the prefixed cache self.assertIs(caches['prefix'].has_key('somekey'), False) caches['prefix'].set('somekey', 'value2') self.assertEqual(cache.get('somekey'), 'value') self.assertEqual(caches['prefix'].get('somekey'), 'value2') def test_non_existent(self): """Nonexistent cache keys return as None/default.""" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): # Multiple cache keys can be returned using get_many cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}) self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'}) self.assertEqual(cache.get_many(iter(['a', 'b', 'e'])), {'a': 'a', 'b': 'b'}) def test_delete(self): # Cache keys can be deleted cache.set_many({'key1': 'spam', 'key2': 'eggs'}) self.assertEqual(cache.get("key1"), "spam") self.assertIs(cache.delete("key1"), True) self.assertIsNone(cache.get("key1")) self.assertEqual(cache.get("key2"), "eggs") def test_delete_nonexistent(self): self.assertIs(cache.delete('nonexistent_key'), False) def test_has_key(self): # The cache can be inspected for cache keys cache.set("hello1", "goodbye1") self.assertIs(cache.has_key("hello1"), True) self.assertIs(cache.has_key("goodbye1"), False) cache.set("no_expiry", "here", None) self.assertIs(cache.has_key("no_expiry"), True) def test_in(self): # The in operator can be used to inspect cache contents cache.set("hello2", "goodbye2") self.assertIn("hello2", cache) self.assertNotIn("goodbye2", cache) def test_incr(self): # Cache values can be incremented cache.set('answer', 41) self.assertEqual(cache.incr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.incr('answer', 10), 52) self.assertEqual(cache.get('answer'), 52) self.assertEqual(cache.incr('answer', -10), 42) with self.assertRaises(ValueError): cache.incr('does_not_exist') def test_decr(self): # Cache values can be decremented cache.set('answer', 43) self.assertEqual(cache.decr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.decr('answer', 10), 32) self.assertEqual(cache.get('answer'), 32) self.assertEqual(cache.decr('answer', -10), 42) with self.assertRaises(ValueError): cache.decr('does_not_exist') def test_close(self): self.assertTrue(hasattr(cache, 'close')) cache.close() def test_data_types(self): # Many different data types can be cached stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertEqual(cache.get("stuff"), stuff) def test_cache_read_for_model_instance(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() my_poll = Poll.objects.create(question="Well?") self.assertEqual(Poll.objects.count(), 1) pub_date = my_poll.pub_date cache.set('question', my_poll) cached_poll = cache.get('question') self.assertEqual(cached_poll.pub_date, pub_date) # We only want the default expensive calculation run once self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_write_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache write expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) self.assertEqual(expensive_calculation.num_runs, 1) cache.set('deferred_queryset', defer_qs) # cache set should not re-evaluate default functions self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_read_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) cache.set('deferred_queryset', defer_qs) self.assertEqual(expensive_calculation.num_runs, 1) runs_before_cache_read = expensive_calculation.num_runs cache.get('deferred_queryset') # We only want the default expensive calculation run on creation and set self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read) def test_expiration(self): # Cache values can be set to expire cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) self.assertIs(cache.add("expire2", "newvalue"), True) self.assertEqual(cache.get("expire2"), "newvalue") self.assertIs(cache.has_key("expire3"), False) def test_touch(self): # cache.touch() updates the timeout. cache.set('expire1', 'very quickly', timeout=1) self.assertIs(cache.touch('expire1', timeout=4), True) time.sleep(2) self.assertIs(cache.has_key('expire1'), True) time.sleep(3) self.assertIs(cache.has_key('expire1'), False) # cache.touch() works without the timeout argument. cache.set('expire1', 'very quickly', timeout=1) self.assertIs(cache.touch('expire1'), True) time.sleep(2) self.assertIs(cache.has_key('expire1'), True) self.assertIs(cache.touch('nonexistent'), False) def test_unicode(self): # Unicode values can be cached stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } # Test `set` for (key, value) in stuff.items(): with self.subTest(key=key): cache.set(key, value) self.assertEqual(cache.get(key), value) # Test `add` for (key, value) in stuff.items(): with self.subTest(key=key): self.assertIs(cache.delete(key), True) self.assertIs(cache.add(key, value), True) self.assertEqual(cache.get(key), value) # Test `set_many` for (key, value) in stuff.items(): self.assertIs(cache.delete(key), True) cache.set_many(stuff) for (key, value) in stuff.items(): with self.subTest(key=key): self.assertEqual(cache.get(key), value) def test_binary_string(self): # Binary strings should be cacheable from zlib import compress, decompress value = 'value_to_be_compressed' compressed_value = compress(value.encode()) # Test set cache.set('binary1', compressed_value) compressed_result = cache.get('binary1') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test add self.assertIs(cache.add('binary1-add', compressed_value), True) compressed_result = cache.get('binary1-add') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test set_many cache.set_many({'binary1-set_many': compressed_value}) compressed_result = cache.get('binary1-set_many') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) def test_set_many(self): # Multiple keys can be set using set_many cache.set_many({"key1": "spam", "key2": "eggs"}) self.assertEqual(cache.get("key1"), "spam") self.assertEqual(cache.get("key2"), "eggs") def test_set_many_returns_empty_list_on_success(self): """set_many() returns an empty list when all keys are inserted.""" failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'}) self.assertEqual(failing_keys, []) def test_set_many_expiration(self): # set_many takes a second ``timeout`` parameter cache.set_many({"key1": "spam", "key2": "eggs"}, 1) time.sleep(2) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_delete_many(self): # Multiple keys can be deleted using delete_many cache.set_many({'key1': 'spam', 'key2': 'eggs', 'key3': 'ham'}) cache.delete_many(["key1", "key2"]) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) self.assertEqual(cache.get("key3"), "ham") def test_clear(self): # The cache can be emptied using clear cache.set_many({'key1': 'spam', 'key2': 'eggs'}) cache.clear() self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_long_timeout(self): """ Follow memcached's convention where a timeout greater than 30 days is treated as an absolute expiration timestamp instead of a relative offset (#12399). """ cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second self.assertEqual(cache.get('key1'), 'eggs') self.assertIs(cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1), True) self.assertEqual(cache.get('key2'), 'ham') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') def test_forever_timeout(self): """ Passing in None into timeout results in a value that is cached forever """ cache.set('key1', 'eggs', None) self.assertEqual(cache.get('key1'), 'eggs') self.assertIs(cache.add('key2', 'ham', None), True) self.assertEqual(cache.get('key2'), 'ham') self.assertIs(cache.add('key1', 'new eggs', None), False) self.assertEqual(cache.get('key1'), 'eggs') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') cache.set('key5', 'belgian fries', timeout=1) self.assertIs(cache.touch('key5', timeout=None), True) time.sleep(2) self.assertEqual(cache.get('key5'), 'belgian fries') def test_zero_timeout(self): """ Passing in zero into timeout results in a value that is not cached """ cache.set('key1', 'eggs', 0) self.assertIsNone(cache.get('key1')) self.assertIs(cache.add('key2', 'ham', 0), True) self.assertIsNone(cache.get('key2')) cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0) self.assertIsNone(cache.get('key3')) self.assertIsNone(cache.get('key4')) cache.set('key5', 'belgian fries', timeout=5) self.assertIs(cache.touch('key5', timeout=0), True) self.assertIsNone(cache.get('key5')) def test_float_timeout(self): # Make sure a timeout given as a float doesn't crash anything. cache.set("key1", "spam", 100.2) self.assertEqual(cache.get("key1"), "spam") def _perform_cull_test(self, cull_cache, initial_count, final_count): # Create initial cache key entries. This will overflow the cache, # causing a cull. for i in range(1, initial_count): cull_cache.set('cull%d' % i, 'value', 1000) count = 0 # Count how many keys are left in the cache. for i in range(1, initial_count): if cull_cache.has_key('cull%d' % i): count += 1 self.assertEqual(count, final_count) def test_cull(self): self._perform_cull_test(caches['cull'], 50, 29) def test_zero_cull(self): self._perform_cull_test(caches['zero_cull'], 50, 19) def _perform_invalid_key_test(self, key, expected_warning): """ All the builtin backends (except memcached, see below) should warn on keys that would be refused by memcached. This encourages portable caching code without making it too difficult to use production backends with more liberal key rules. Refs #6447. """ # mimic custom ``make_key`` method being defined since the default will # never show the below warnings def func(key, *args): return key old_func = cache.key_func cache.key_func = func try: with self.assertWarnsMessage(CacheKeyWarning, expected_warning): cache.set(key, 'value') finally: cache.key_func = old_func def test_invalid_key_characters(self): # memcached doesn't allow whitespace or control characters in keys. key = 'key with spaces and 清' self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key) def test_invalid_key_length(self): # memcached limits key length to 250. key = ('a' * 250) + '清' expected_warning = ( 'Cache key will cause errors if used with memcached: ' '%r (longer than %s)' % (key, 250) ) self._perform_invalid_key_test(key, expected_warning) def test_cache_versioning_get_set(self): # set, using default version = 1 cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertEqual(cache.get('answer1', version=1), 42) self.assertIsNone(cache.get('answer1', version=2)) self.assertIsNone(caches['v2'].get('answer1')) self.assertEqual(caches['v2'].get('answer1', version=1), 42) self.assertIsNone(caches['v2'].get('answer1', version=2)) # set, default version = 1, but manually override version = 2 cache.set('answer2', 42, version=2) self.assertIsNone(cache.get('answer2')) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) # v2 set, using default version = 2 caches['v2'].set('answer3', 42) self.assertIsNone(cache.get('answer3')) self.assertIsNone(cache.get('answer3', version=1)) self.assertEqual(cache.get('answer3', version=2), 42) self.assertEqual(caches['v2'].get('answer3'), 42) self.assertIsNone(caches['v2'].get('answer3', version=1)) self.assertEqual(caches['v2'].get('answer3', version=2), 42) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set('answer4', 42, version=1) self.assertEqual(cache.get('answer4'), 42) self.assertEqual(cache.get('answer4', version=1), 42) self.assertIsNone(cache.get('answer4', version=2)) self.assertIsNone(caches['v2'].get('answer4')) self.assertEqual(caches['v2'].get('answer4', version=1), 42) self.assertIsNone(caches['v2'].get('answer4', version=2)) def test_cache_versioning_add(self): # add, default version = 1, but manually override version = 2 self.assertIs(cache.add('answer1', 42, version=2), True) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) self.assertIs(cache.add('answer1', 37, version=2), False) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) self.assertIs(cache.add('answer1', 37, version=1), True) self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) # v2 add, using default version = 2 self.assertIs(caches['v2'].add('answer2', 42), True) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) self.assertIs(caches['v2'].add('answer2', 37), False) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) self.assertIs(caches['v2'].add('answer2', 37, version=1), True) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) # v2 add, default version = 2, but manually override version = 1 self.assertIs(caches['v2'].add('answer3', 42, version=1), True) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) self.assertIs(caches['v2'].add('answer3', 37, version=1), False) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) self.assertIs(caches['v2'].add('answer3', 37), True) self.assertEqual(cache.get('answer3', version=1), 42) self.assertEqual(cache.get('answer3', version=2), 37) def test_cache_versioning_has_key(self): cache.set('answer1', 42) # has_key self.assertIs(cache.has_key('answer1'), True) self.assertIs(cache.has_key('answer1', version=1), True) self.assertIs(cache.has_key('answer1', version=2), False) self.assertIs(caches['v2'].has_key('answer1'), False) self.assertIs(caches['v2'].has_key('answer1', version=1), True) self.assertIs(caches['v2'].has_key('answer1', version=2), False) def test_cache_versioning_delete(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) self.assertIs(cache.delete('answer1'), True) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) self.assertIs(cache.delete('answer2', version=2), True) self.assertEqual(cache.get('answer2', version=1), 37) self.assertIsNone(cache.get('answer2', version=2)) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) self.assertIs(caches['v2'].delete('answer3'), True) self.assertEqual(cache.get('answer3', version=1), 37) self.assertIsNone(cache.get('answer3', version=2)) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) self.assertIs(caches['v2'].delete('answer4', version=1), True) self.assertIsNone(cache.get('answer4', version=1)) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_incr_decr(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) self.assertEqual(cache.incr('answer1'), 38) self.assertEqual(cache.get('answer1', version=1), 38) self.assertEqual(cache.get('answer1', version=2), 42) self.assertEqual(cache.decr('answer1'), 37) self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) self.assertEqual(cache.incr('answer2', version=2), 43) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 43) self.assertEqual(cache.decr('answer2', version=2), 42) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) self.assertEqual(caches['v2'].incr('answer3'), 43) self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 43) self.assertEqual(caches['v2'].decr('answer3'), 42) self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 42) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) self.assertEqual(caches['v2'].incr('answer4', version=1), 38) self.assertEqual(cache.get('answer4', version=1), 38) self.assertEqual(cache.get('answer4', version=2), 42) self.assertEqual(caches['v2'].decr('answer4', version=1), 37) self.assertEqual(cache.get('answer4', version=1), 37) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_get_set_many(self): # set, using default version = 1 cache.set_many({'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {}) # set, default version = 1, but manually override version = 2 cache.set_many({'ford2': 37, 'arthur2': 42}, version=2) self.assertEqual(cache.get_many(['ford2', 'arthur2']), {}) self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {}) self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) # v2 set, using default version = 2 caches['v2'].set_many({'ford3': 37, 'arthur3': 42}) self.assertEqual(cache.get_many(['ford3', 'arthur3']), {}) self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {}) self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1) self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42}) self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {}) def test_incr_version(self): cache.set('answer', 42, version=2) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertEqual(cache.get('answer', version=2), 42) self.assertIsNone(cache.get('answer', version=3)) self.assertEqual(cache.incr_version('answer', version=2), 3) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertIsNone(cache.get('answer', version=2)) self.assertEqual(cache.get('answer', version=3), 42) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertIsNone(caches['v2'].get('answer2', version=3)) self.assertEqual(caches['v2'].incr_version('answer2'), 3) self.assertIsNone(caches['v2'].get('answer2')) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertIsNone(caches['v2'].get('answer2', version=2)) self.assertEqual(caches['v2'].get('answer2', version=3), 42) with self.assertRaises(ValueError): cache.incr_version('does_not_exist') def test_decr_version(self): cache.set('answer', 42, version=2) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertEqual(cache.get('answer', version=2), 42) self.assertEqual(cache.decr_version('answer', version=2), 1) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.get('answer', version=1), 42) self.assertIsNone(cache.get('answer', version=2)) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertEqual(caches['v2'].decr_version('answer2'), 1) self.assertIsNone(caches['v2'].get('answer2')) self.assertEqual(caches['v2'].get('answer2', version=1), 42) self.assertIsNone(caches['v2'].get('answer2', version=2)) with self.assertRaises(ValueError): cache.decr_version('does_not_exist', version=2) def test_custom_key_func(self): # Two caches with different key functions aren't visible to each other cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertIsNone(caches['custom_key'].get('answer1')) self.assertIsNone(caches['custom_key2'].get('answer1')) caches['custom_key'].set('answer2', 42) self.assertIsNone(cache.get('answer2')) self.assertEqual(caches['custom_key'].get('answer2'), 42) self.assertEqual(caches['custom_key2'].get('answer2'), 42) def test_cache_write_unpicklable_object(self): update_middleware = UpdateCacheMiddleware() update_middleware.cache = cache fetch_middleware = FetchFromCacheMiddleware() fetch_middleware.cache = cache request = self.factory.get('/cache/test') request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) response = HttpResponse() content = 'Testing cookie serialization.' response.content = content response.set_cookie('foo', 'bar') update_middleware.process_response(request, response) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) update_middleware.process_response(request, get_cache_data) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) def test_add_fail_on_pickleerror(self): # Shouldn't fail silently if trying to cache an unpicklable type. with self.assertRaises(pickle.PickleError): cache.add('unpicklable', Unpicklable()) def test_set_fail_on_pickleerror(self): with self.assertRaises(pickle.PickleError): cache.set('unpicklable', Unpicklable()) def test_get_or_set(self): self.assertIsNone(cache.get('projector')) self.assertEqual(cache.get_or_set('projector', 42), 42) self.assertEqual(cache.get('projector'), 42) self.assertIsNone(cache.get_or_set('null', None)) def test_get_or_set_callable(self): def my_callable(): return 'value' self.assertEqual(cache.get_or_set('mykey', my_callable), 'value') self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value') def test_get_or_set_callable_returning_none(self): self.assertIsNone(cache.get_or_set('mykey', lambda: None)) # Previous get_or_set() doesn't store None in the cache. self.assertEqual(cache.get('mykey', 'default'), 'default') def test_get_or_set_version(self): msg = "get_or_set() missing 1 required positional argument: 'default'" self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979) with self.assertRaisesMessage(TypeError, msg): cache.get_or_set('brian') with self.assertRaisesMessage(TypeError, msg): cache.get_or_set('brian', version=1) self.assertIsNone(cache.get('brian', version=1)) self.assertEqual(cache.get_or_set('brian', 42, version=1), 42) self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979) self.assertIsNone(cache.get('brian', version=3)) def test_get_or_set_racing(self): with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add: # Simulate cache.add() failing to add a value. In that case, the # default value should be returned. cache_add.return_value = False self.assertEqual(cache.get_or_set('key', 'default'), 'default') @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.db.DatabaseCache', # Spaces are used in the table name to ensure quoting/escaping is working LOCATION='test cache table' )) class DBCacheTests(BaseCacheTests, TransactionTestCase): available_apps = ['cache'] def setUp(self): # The super calls needs to happen first for the settings override. super().setUp() self.create_table() def tearDown(self): # The super call needs to happen first because it uses the database. super().tearDown() self.drop_table() def create_table(self): management.call_command('createcachetable', verbosity=0) def drop_table(self): with connection.cursor() as cursor: table_name = connection.ops.quote_name('test cache table') cursor.execute('DROP TABLE %s' % table_name) def test_get_many_num_queries(self): cache.set_many({'a': 1, 'b': 2}) cache.set('expired', 'expired', 0.01) with self.assertNumQueries(1): self.assertEqual(cache.get_many(['a', 'b']), {'a': 1, 'b': 2}) time.sleep(0.02) with self.assertNumQueries(2): self.assertEqual(cache.get_many(['a', 'b', 'expired']), {'a': 1, 'b': 2}) def test_delete_many_num_queries(self): cache.set_many({'a': 1, 'b': 2, 'c': 3}) with self.assertNumQueries(1): cache.delete_many(['a', 'b', 'c']) def test_zero_cull(self): self._perform_cull_test(caches['zero_cull'], 50, 18) def test_second_call_doesnt_crash(self): out = io.StringIO() management.call_command('createcachetable', stdout=out) self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES)) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.db.DatabaseCache', # Use another table name to avoid the 'table already exists' message. LOCATION='createcachetable_dry_run_mode' )) def test_createcachetable_dry_run_mode(self): out = io.StringIO() management.call_command('createcachetable', dry_run=True, stdout=out) output = out.getvalue() self.assertTrue(output.startswith("CREATE TABLE")) def test_createcachetable_with_table_argument(self): """ Delete and recreate cache table with legacy behavior (explicitly specifying the table name). """ self.drop_table() out = io.StringIO() management.call_command( 'createcachetable', 'test cache table', verbosity=2, stdout=out, ) self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n") @override_settings(USE_TZ=True) class DBCacheWithTimeZoneTests(DBCacheTests): pass class DBCacheRouter: """A router that puts the cache table on the 'other' database.""" def db_for_read(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' return None def db_for_write(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' return None def allow_migrate(self, db, app_label, **hints): if app_label == 'django_cache': return db == 'other' return None @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', 'LOCATION': 'my_cache_table', }, }, ) class CreateCacheTableForDBCacheTests(TestCase): databases = {'default', 'other'} @override_settings(DATABASE_ROUTERS=[DBCacheRouter()]) def test_createcachetable_observes_database_router(self): # cache table should not be created on 'default' with self.assertNumQueries(0, using='default'): management.call_command('createcachetable', database='default', verbosity=0) # cache table should be created on 'other' # Queries: # 1: check table doesn't already exist # 2: create savepoint (if transactional DDL is supported) # 3: create the table # 4: create the index # 5: release savepoint (if transactional DDL is supported) num = 5 if connections['other'].features.can_rollback_ddl else 3 with self.assertNumQueries(num, using='other'): management.call_command('createcachetable', database='other', verbosity=0) class PicklingSideEffect: def __init__(self, cache): self.cache = cache self.locked = False def __getstate__(self): self.locked = self.cache._lock.locked() return {} limit_locmem_entries = override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.locmem.LocMemCache', OPTIONS={'MAX_ENTRIES': 9}, )) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.locmem.LocMemCache', )) class LocMemCacheTests(BaseCacheTests, TestCase): def setUp(self): super().setUp() # LocMem requires a hack to make the other caches # share a data store with the 'normal' cache. caches['prefix']._cache = cache._cache caches['prefix']._expire_info = cache._expire_info caches['v2']._cache = cache._cache caches['v2']._expire_info = cache._expire_info caches['custom_key']._cache = cache._cache caches['custom_key']._expire_info = cache._expire_info caches['custom_key2']._cache = cache._cache caches['custom_key2']._expire_info = cache._expire_info @override_settings(CACHES={ 'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other' }, }) def test_multiple_caches(self): "Multiple locmem caches are isolated" cache.set('value', 42) self.assertEqual(caches['default'].get('value'), 42) self.assertIsNone(caches['other'].get('value')) def test_locking_on_pickle(self): """#20613/#18541 -- Ensures pickling is done outside of the lock.""" bad_obj = PicklingSideEffect(cache) cache.set('set', bad_obj) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") self.assertIs(cache.add('add', bad_obj), True) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") def test_incr_decr_timeout(self): """incr/decr does not modify expiry time (matches memcached behavior)""" key = 'value' _key = cache.make_key(key) cache.set(key, 1, timeout=cache.default_timeout * 10) expire = cache._expire_info[_key] self.assertEqual(cache.incr(key), 2) self.assertEqual(expire, cache._expire_info[_key]) self.assertEqual(cache.decr(key), 1) self.assertEqual(expire, cache._expire_info[_key]) @limit_locmem_entries def test_lru_get(self): """get() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key) cache.set(9, 9, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key) for key in range(6, 9): self.assertIsNone(cache.get(key)) self.assertEqual(cache.get(9), 9) @limit_locmem_entries def test_lru_set(self): """set() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(3, 9): cache.set(key, key, timeout=None) cache.set(9, 9, timeout=None) for key in range(3, 10): self.assertEqual(cache.get(key), key) for key in range(3): self.assertIsNone(cache.get(key)) @limit_locmem_entries def test_lru_incr(self): """incr() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(6): self.assertEqual(cache.incr(key), key + 1) cache.set(9, 9, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key + 1) for key in range(6, 9): self.assertIsNone(cache.get(key)) self.assertEqual(cache.get(9), 9) # memcached backend isn't guaranteed to be available. # To check the memcached backend, the test settings file will # need to contain at least one cache backend setting that points at # your memcache server. configured_caches = {} for _cache_params in settings.CACHES.values(): configured_caches[_cache_params['BACKEND']] = _cache_params MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache') PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache') # The memcached backends don't support cull-related options like `MAX_ENTRIES`. memcached_excluded_caches = {'cull', 'zero_cull'} class BaseMemcachedTests(BaseCacheTests): # By default it's assumed that the client doesn't clean up connections # properly, in which case the backend must do so after each request. should_disconnect_on_close = True def test_location_multiple_servers(self): locations = [ ['server1.tld', 'server2:11211'], 'server1.tld;server2:11211', 'server1.tld,server2:11211', ] for location in locations: with self.subTest(location=location): params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location} with self.settings(CACHES={'default': params}): self.assertEqual(cache._servers, ['server1.tld', 'server2:11211']) def test_invalid_key_characters(self): """ On memcached, we don't introduce a duplicate key validation step (for speed reasons), we just let the memcached API library raise its own exception on bad keys. Refs #6447. In order to be memcached-API-library agnostic, we only assert that a generic exception of some kind is raised. """ # memcached does not allow whitespace or control characters in keys # when using the ascii protocol. with self.assertRaises(Exception): cache.set('key with spaces', 'value') def test_invalid_key_length(self): # memcached limits key length to 250 with self.assertRaises(Exception): cache.set('a' * 251, 'value') def test_default_never_expiring_timeout(self): # Regression test for #22845 with self.settings(CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, TIMEOUT=None)): cache.set('infinite_foo', 'bar') self.assertEqual(cache.get('infinite_foo'), 'bar') def test_default_far_future_timeout(self): # Regression test for #22845 with self.settings(CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, # 60*60*24*365, 1 year TIMEOUT=31536000)): cache.set('future_foo', 'bar') self.assertEqual(cache.get('future_foo'), 'bar') def test_cull(self): # culling isn't implemented, memcached deals with it. pass def test_zero_cull(self): # culling isn't implemented, memcached deals with it. pass def test_memcached_deletes_key_on_failed_set(self): # By default memcached allows objects up to 1MB. For the cache_db session # backend to always use the current session, memcached needs to delete # the old key if it fails to set. # pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can # tell from a quick check of its source code. This is falling back to # the default value exposed by python-memcached on my system. max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576) cache.set('small_value', 'a') self.assertEqual(cache.get('small_value'), 'a') large_value = 'a' * (max_value_length + 1) try: cache.set('small_value', large_value) except Exception: # Some clients (e.g. pylibmc) raise when the value is too large, # while others (e.g. python-memcached) intentionally return True # indicating success. This test is primarily checking that the key # was deleted, so the return/exception behavior for the set() # itself is not important. pass # small_value should be deleted, or set if configured to accept larger values value = cache.get('small_value') self.assertTrue(value is None or value == large_value) def test_close(self): # For clients that don't manage their connections properly, the # connection is closed when the request is complete. signals.request_finished.disconnect(close_old_connections) try: with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect: signals.request_finished.send(self.__class__) self.assertIs(mock_disconnect.called, self.should_disconnect_on_close) finally: signals.request_finished.connect(close_old_connections) def test_set_many_returns_failing_keys(self): def fail_set_multi(mapping, *args, **kwargs): return mapping.keys() with mock.patch('%s.Client.set_multi' % self.client_library_name, side_effect=fail_set_multi): failing_keys = cache.set_many({'key': 'value'}) self.assertEqual(failing_keys, ['key']) @unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured") @override_settings(CACHES=caches_setting_for_tests( base=MemcachedCache_params, exclude=memcached_excluded_caches, )) class MemcachedCacheTests(BaseMemcachedTests, TestCase): base_params = MemcachedCache_params client_library_name = 'memcache' def test_memcached_uses_highest_pickle_version(self): # Regression test for #19810 for cache_key in settings.CACHES: with self.subTest(cache_key=cache_key): self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL) @override_settings(CACHES=caches_setting_for_tests( base=MemcachedCache_params, exclude=memcached_excluded_caches, OPTIONS={'server_max_value_length': 9999}, )) def test_memcached_options(self): self.assertEqual(cache._cache.server_max_value_length, 9999) def test_default_used_when_none_is_set(self): """ python-memcached doesn't support default in get() so this test overrides the one in BaseCacheTests. """ cache.set('key_default_none', None) self.assertEqual(cache.get('key_default_none', default='default'), 'default') @unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured") @override_settings(CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, )) class PyLibMCCacheTests(BaseMemcachedTests, TestCase): base_params = PyLibMCCache_params client_library_name = 'pylibmc' # libmemcached manages its own connections. should_disconnect_on_close = False # By default, pylibmc/libmemcached don't verify keys client-side and so # this test triggers a server-side bug that causes later tests to fail # (#19914). The `verify_keys` behavior option could be set to True (which # would avoid triggering the server-side bug), however this test would # still fail due to https://github.com/lericson/pylibmc/issues/219. @unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail") def test_invalid_key_characters(self): pass @override_settings(CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, OPTIONS={ 'binary': True, 'behaviors': {'tcp_nodelay': True}, }, )) def test_pylibmc_options(self): self.assertTrue(cache._cache.binary) self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True)) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.filebased.FileBasedCache', )) class FileBasedCacheTests(BaseCacheTests, TestCase): """ Specific test cases for the file-based cache. """ def setUp(self): super().setUp() self.dirname = self.mkdtemp() # Caches location cannot be modified through override_settings / modify_settings, # hence settings are manipulated directly here and the setting_changed signal # is triggered manually. for cache_params in settings.CACHES.values(): cache_params['LOCATION'] = self.dirname setting_changed.send(self.__class__, setting='CACHES', enter=False) def tearDown(self): super().tearDown() # Call parent first, as cache.clear() may recreate cache base directory shutil.rmtree(self.dirname) def mkdtemp(self): return tempfile.mkdtemp() def test_ignores_non_cache_files(self): fname = os.path.join(self.dirname, 'not-a-cache-file') with open(fname, 'w'): os.utime(fname, None) cache.clear() self.assertTrue(os.path.exists(fname), 'Expected cache.clear to ignore non cache files') os.remove(fname) def test_clear_does_not_remove_cache_dir(self): cache.clear() self.assertTrue(os.path.exists(self.dirname), 'Expected cache.clear to keep the cache dir') def test_creates_cache_dir_if_nonexistent(self): os.rmdir(self.dirname) cache.set('foo', 'bar') self.assertTrue(os.path.exists(self.dirname)) def test_get_ignores_enoent(self): cache.set('foo', 'bar') os.unlink(cache._key_to_file('foo')) # Returns the default instead of erroring. self.assertEqual(cache.get('foo', 'baz'), 'baz') def test_get_does_not_ignore_non_filenotfound_exceptions(self): with mock.patch('builtins.open', side_effect=OSError): with self.assertRaises(OSError): cache.get('foo') def test_empty_cache_file_considered_expired(self): cache_file = cache._key_to_file('foo') with open(cache_file, 'wb') as fh: fh.write(b'') with open(cache_file, 'rb') as fh: self.assertIs(cache._is_expired(fh), True) class FileBasedCachePathLibTests(FileBasedCacheTests): def mkdtemp(self): tmp_dir = super().mkdtemp() return Path(tmp_dir) @override_settings(CACHES={ 'default': { 'BACKEND': 'cache.liberal_backend.CacheClass', }, }) class CustomCacheKeyValidationTests(SimpleTestCase): """ Tests for the ability to mixin a custom ``validate_key`` method to a custom cache backend that otherwise inherits from a builtin backend, and override the default key validation. Refs #6447. """ def test_custom_key_validation(self): # this key is both longer than 250 characters, and has spaces key = 'some key with spaces' * 15 val = 'a value' cache.set(key, val) self.assertEqual(cache.get(key), val) @override_settings( CACHES={ 'default': { 'BACKEND': 'cache.closeable_cache.CacheClass', } } ) class CacheClosingTests(SimpleTestCase): def test_close(self): self.assertFalse(cache.closed) signals.request_finished.send(self.__class__) self.assertTrue(cache.closed) DEFAULT_MEMORY_CACHES_SETTINGS = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'unique-snowflake', } } NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS) NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None class DefaultNonExpiringCacheKeyTests(SimpleTestCase): """ Settings having Cache arguments with a TIMEOUT=None create Caches that will set non-expiring keys. """ def setUp(self): # The 5 minute (300 seconds) default expiration time for keys is # defined in the implementation of the initializer method of the # BaseCache type. self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout def tearDown(self): del(self.DEFAULT_TIMEOUT) def test_default_expiration_time_for_keys_is_5_minutes(self): """The default expiration time of a cache key is 5 minutes. This value is defined in django.core.cache.backends.base.BaseCache.__init__(). """ self.assertEqual(300, self.DEFAULT_TIMEOUT) def test_caches_with_unset_timeout_has_correct_default_timeout(self): """Caches that have the TIMEOUT parameter undefined in the default settings will use the default 5 minute timeout. """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self): """Memory caches that have the TIMEOUT parameter set to `None` in the default settings with have `None` as the default timeout. This means "no timeout". """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertIsNone(cache.default_timeout) self.assertIsNone(cache.get_backend_timeout()) @override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS) def test_caches_with_unset_timeout_set_expiring_key(self): """Memory caches that have the TIMEOUT parameter unset will set cache keys having the default 5 minute timeout. """ key = "my-key" value = "my-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNotNone(cache._expire_info[cache_key]) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_set_non_expiring_key(self): """Memory caches that have the TIMEOUT parameter set to `None` will set a non expiring key by default. """ key = "another-key" value = "another-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNone(cache._expire_info[cache_key]) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ALLOWED_HOSTS=['.example.com'], ) class CacheUtils(SimpleTestCase): """TestCase for django.utils.cache functions.""" host = 'www.example.com' path = '/cache/test/' factory = RequestFactory(HTTP_HOST=host) def tearDown(self): cache.clear() def _get_request_cache(self, method='GET', query_string=None, update_cache=None): request = self._get_request(self.host, self.path, method, query_string=query_string) request._cache_update_cache = True if not update_cache else update_cache return request def _set_cache(self, request, msg): response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('*', ('Accept-Language', 'Cookie'), '*'), ('Accept-Language, Cookie', ('*',), '*'), ) for initial_vary, newheaders, resulting_vary in headers: with self.subTest(initial_vary=initial_vary, newheaders=newheaders): response = HttpResponse() if initial_vary is not None: response['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) # A specified key_prefix is taken into account. key_prefix = 'localprefix' learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' 'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e' ) def test_cache_key_varies_by_url(self): """ get_cache_key keys differ by fully-qualified URL instead of path """ request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com') learn_cache_key(request1, HttpResponse()) request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com') learn_cache_key(request2, HttpResponse()) self.assertNotEqual(get_cache_key(request1), get_cache_key(request2)) def test_learn_cache_key(self): request = self.factory.head(self.path) response = HttpResponse() response['Vary'] = 'Pony' # Make sure that the Vary header is added to the key hash learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_patch_cache_control(self): tests = ( # Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts (None, {'private': True}, {'private'}), ('', {'private': True}, {'private'}), # no-cache. ('', {'no_cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}), ('', {'no-cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}), ('no-cache=Set-Cookie', {'no_cache': True}, {'no-cache'}), ('no-cache=Set-Cookie,no-cache=Link', {'no_cache': True}, {'no-cache'}), ('no-cache=Set-Cookie', {'no_cache': 'Link'}, {'no-cache=Set-Cookie', 'no-cache=Link'}), ( 'no-cache=Set-Cookie,no-cache=Link', {'no_cache': 'Custom'}, {'no-cache=Set-Cookie', 'no-cache=Link', 'no-cache=Custom'}, ), # Test whether private/public attributes are mutually exclusive ('private', {'private': True}, {'private'}), ('private', {'public': True}, {'public'}), ('public', {'public': True}, {'public'}), ('public', {'private': True}, {'private'}), ('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}), ('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}), ('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}), ) cc_delim_re = re.compile(r'\s*,\s*') for initial_cc, newheaders, expected_cc in tests: with self.subTest(initial_cc=initial_cc, newheaders=newheaders): response = HttpResponse() if initial_cc is not None: response['Cache-Control'] = initial_cc patch_cache_control(response, **newheaders) parts = set(cc_delim_re.split(response['Cache-Control'])) self.assertEqual(parts, expected_cc) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix', }, }, ) class PrefixedCacheUtils(CacheUtils): pass @override_settings( CACHE_MIDDLEWARE_SECONDS=60, CACHE_MIDDLEWARE_KEY_PREFIX='test', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, ) class CacheHEADTest(SimpleTestCase): path = '/cache/test/' factory = RequestFactory() def tearDown(self): cache.clear() def _set_cache(self, request, msg): response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) def test_head_caches_correctly(self): test_content = 'test content' request = self.factory.head(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) def test_head_with_cached_get(self): test_content = 'test content' request = self.factory.get(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, LANGUAGES=[ ('en', 'English'), ('es', 'Spanish'), ], ) class CacheI18nTest(SimpleTestCase): path = '/cache/test/' factory = RequestFactory() def tearDown(self): cache.clear() @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False) def test_cache_key_i18n_translation(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") key2 = get_cache_key(request) self.assertEqual(key, key2) def check_accept_language_vary(self, accept_language, vary, reference_key): request = self.factory.get(self.path) request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response['Vary'] = vary key = learn_cache_key(request, response) key2 = get_cache_key(request) self.assertEqual(key, reference_key) self.assertEqual(key2, reference_key) @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False) def test_cache_key_i18n_translation_accept_language(self): lang = translation.get_language() self.assertEqual(lang, 'en') request = self.factory.get(self.path) request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response['Vary'] = 'accept-encoding' key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") self.check_accept_language_vary( 'en-us', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'en-US', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'en-US,en;q=0.8', 'accept-encoding, accept-language, cookie', key ) self.check_accept_language_vary( 'en-US,en;q=0.8,ko;q=0.6', 'accept-language, cookie, accept-encoding', key ) self.check_accept_language_vary( 'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ', 'accept-encoding, cookie, accept-language', key ) self.check_accept_language_vary( 'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4', 'accept-language, accept-encoding, cookie', key ) self.check_accept_language_vary( 'ko;q=1.0,en;q=0.5', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'ko, en', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'ko-KR, en-US', 'accept-encoding, accept-language, cookie', key ) @override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False) def test_cache_key_i18n_formatting(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when formatting is active") key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True) def test_cache_key_i18n_timezone(self): request = self.factory.get(self.path) tz = timezone.get_current_timezone_name() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active") key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False, USE_L10N=False) def test_cache_key_no_i18n(self): request = self.factory.get(self.path) lang = translation.get_language() tz = timezone.get_current_timezone_name() response = HttpResponse() key = learn_cache_key(request, response) self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active") self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active") @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, USE_I18N=True, ) def test_middleware(self): def set_cache(request, lang, msg): translation.activate(lang) response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) # cache with non empty request.GET request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) # first access, cache must return None self.assertIsNone(get_cache_data) response = HttpResponse() content = 'Check for cache with QUERY_STRING' response.content = content UpdateCacheMiddleware().process_response(request, response) get_cache_data = FetchFromCacheMiddleware().process_request(request) # cache must return content self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) # different QUERY_STRING, cache must be empty request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) # i18n tests en_message = "Hello world!" es_message = "Hola mundo!" request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'en', en_message) get_cache_data = FetchFromCacheMiddleware().process_request(request) # The cache can be recovered self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, en_message.encode()) # change the session language and set content request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'es', es_message) # change again the language translation.activate('en') # retrieve the content from cache get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data.content, en_message.encode()) # change again the language translation.activate('es') get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data.content, es_message.encode()) # reset the language translation.deactivate() @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, ) def test_middleware_doesnt_cache_streaming_response(self): request = self.factory.get(self.path) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) content = ['Check for cache with streaming content.'] response = StreamingHttpResponse(content) UpdateCacheMiddleware().process_response(request, response) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix' }, }, ) class PrefixedCacheI18nTest(CacheI18nTest): pass def hello_world_view(request, value): return HttpResponse('Hello World %s' % value) def csrf_view(request): return HttpResponse(csrf(request)['csrf_token']) @override_settings( CACHE_MIDDLEWARE_ALIAS='other', CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix', CACHE_MIDDLEWARE_SECONDS=30, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other', 'TIMEOUT': '1', }, }, ) class CacheMiddlewareTest(SimpleTestCase): factory = RequestFactory() def setUp(self): self.default_cache = caches['default'] self.other_cache = caches['other'] def tearDown(self): self.default_cache.clear() self.other_cache.clear() super().tearDown() def test_constructor(self): """ Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as Middleware vs. usage of CacheMiddleware as view decorator and setting attributes appropriately. """ # If no arguments are passed in construction, it's being used as middleware. middleware = CacheMiddleware() # Now test object attributes against values defined in setUp above self.assertEqual(middleware.cache_timeout, 30) self.assertEqual(middleware.key_prefix, 'middlewareprefix') self.assertEqual(middleware.cache_alias, 'other') # If arguments are being passed in construction, it's being used as a decorator. # First, test with "defaults": as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None) self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30 self.assertEqual(as_view_decorator.key_prefix, '') # Value of DEFAULT_CACHE_ALIAS from django.core.cache self.assertEqual(as_view_decorator.cache_alias, 'default') # Next, test with custom values: as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo') self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60) self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo') self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other') def test_middleware(self): middleware = CacheMiddleware() prefix_middleware = CacheMiddleware(key_prefix='prefix1') timeout_middleware = CacheMiddleware(cache_timeout=1) request = self.factory.get('/view/') # Put the request through the request middleware result = middleware.process_request(request) self.assertIsNone(result) response = hello_world_view(request, '1') # Now put the response through the response middleware response = middleware.process_response(request, response) # Repeating the request should result in a cache hit result = middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b'Hello World 1') # The same request through a different middleware won't hit result = prefix_middleware.process_request(request) self.assertIsNone(result) # The same request with a timeout _will_ hit result = timeout_middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b'Hello World 1') def test_view_decorator(self): # decorate the same view with different cache decorators default_view = cache_page(3)(hello_world_view) default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view) explicit_default_view = cache_page(3, cache='default')(hello_world_view) explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view) other_view = cache_page(1, cache='other')(hello_world_view) other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view) request = self.factory.get('/view/') # Request the view once response = default_view(request, '1') self.assertEqual(response.content, b'Hello World 1') # Request again -- hit the cache response = default_view(request, '2') self.assertEqual(response.content, b'Hello World 1') # Requesting the same view with the explicit cache should yield the same result response = explicit_default_view(request, '3') self.assertEqual(response.content, b'Hello World 1') # Requesting with a prefix will hit a different cache key response = explicit_default_with_prefix_view(request, '4') self.assertEqual(response.content, b'Hello World 4') # Hitting the same view again gives a cache hit response = explicit_default_with_prefix_view(request, '5') self.assertEqual(response.content, b'Hello World 4') # And going back to the implicit cache will hit the same cache response = default_with_prefix_view(request, '6') self.assertEqual(response.content, b'Hello World 4') # Requesting from an alternate cache won't hit cache response = other_view(request, '7') self.assertEqual(response.content, b'Hello World 7') # But a repeated hit will hit cache response = other_view(request, '8') self.assertEqual(response.content, b'Hello World 7') # And prefixing the alternate cache yields yet another cache entry response = other_with_prefix_view(request, '9') self.assertEqual(response.content, b'Hello World 9') # But if we wait a couple of seconds... time.sleep(2) # ... the default cache will still hit caches['default'] response = default_view(request, '11') self.assertEqual(response.content, b'Hello World 1') # ... the default cache with a prefix will still hit response = default_with_prefix_view(request, '12') self.assertEqual(response.content, b'Hello World 4') # ... the explicit default cache will still hit response = explicit_default_view(request, '13') self.assertEqual(response.content, b'Hello World 1') # ... the explicit default cache with a prefix will still hit response = explicit_default_with_prefix_view(request, '14') self.assertEqual(response.content, b'Hello World 4') # .. but a rapidly expiring cache won't hit response = other_view(request, '15') self.assertEqual(response.content, b'Hello World 15') # .. even if it has a prefix response = other_with_prefix_view(request, '16') self.assertEqual(response.content, b'Hello World 16') def test_cache_page_timeout(self): # Page timeout takes precedence over the "max-age" section of the # "Cache-Control". tests = [ (1, 3), # max_age < page_timeout. (3, 1), # max_age > page_timeout. ] for max_age, page_timeout in tests: with self.subTest(max_age=max_age, page_timeout=page_timeout): view = cache_page(timeout=page_timeout)( cache_control(max_age=max_age)(hello_world_view) ) request = self.factory.get('/view/') response = view(request, '1') self.assertEqual(response.content, b'Hello World 1') time.sleep(1) response = view(request, '2') self.assertEqual( response.content, b'Hello World 1' if page_timeout > max_age else b'Hello World 2', ) cache.clear() def test_cached_control_private_not_cached(self): """Responses with 'Cache-Control: private' are not cached.""" view_with_private_cache = cache_page(3)(cache_control(private=True)(hello_world_view)) request = self.factory.get('/view/') response = view_with_private_cache(request, '1') self.assertEqual(response.content, b'Hello World 1') response = view_with_private_cache(request, '2') self.assertEqual(response.content, b'Hello World 2') def test_sensitive_cookie_not_cached(self): """ Django must prevent caching of responses that set a user-specific (and maybe security sensitive) cookie in response to a cookie-less request. """ csrf_middleware = CsrfViewMiddleware() cache_middleware = CacheMiddleware() request = self.factory.get('/view/') self.assertIsNone(cache_middleware.process_request(request)) csrf_middleware.process_view(request, csrf_view, (), {}) response = csrf_view(request) response = csrf_middleware.process_response(request, response) response = cache_middleware.process_response(request, response) # Inserting a CSRF cookie in a cookie-less request prevented caching. self.assertIsNone(cache_middleware.process_request(request)) def test_304_response_has_http_caching_headers_but_not_cached(self): original_view = mock.Mock(return_value=HttpResponseNotModified()) view = cache_page(2)(original_view) request = self.factory.get('/view/') # The view shouldn't be cached on the second call. view(request).close() response = view(request) response.close() self.assertEqual(original_view.call_count, 2) self.assertIsInstance(response, HttpResponseNotModified) self.assertIn('Cache-Control', response) self.assertIn('Expires', response) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ) class TestWithTemplateResponse(SimpleTestCase): """ Tests various headers w/ TemplateResponse. Most are probably redundant since they manipulate the same object anyway but the ETag header is 'special' because it relies on the content being complete (which is not necessarily always the case with a TemplateResponse) """ path = '/cache/test/' factory = RequestFactory() def tearDown(self): cache.clear() def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ) for initial_vary, newheaders, resulting_vary in headers: with self.subTest(initial_vary=initial_vary, newheaders=newheaders): template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) if initial_vary is not None: response['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) key_prefix = 'localprefix' # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) # A specified key_prefix is taken into account. learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e' ) class TestMakeTemplateFragmentKey(SimpleTestCase): def test_without_vary_on(self): key = make_template_fragment_key('a.fragment') self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e') def test_with_one_vary_on(self): key = make_template_fragment_key('foo', ['abc']) self.assertEqual(key, 'template.cache.foo.493e283d571a73056196f1a68efd0f66') def test_with_many_vary_on(self): key = make_template_fragment_key('bar', ['abc', 'def']) self.assertEqual(key, 'template.cache.bar.17c1a507a0cb58384f4c639067a93520') def test_proper_escaping(self): key = make_template_fragment_key('spam', ['abc:def%']) self.assertEqual(key, 'template.cache.spam.06c8ae8e8c430b69fb0a6443504153dc') def test_with_ints_vary_on(self): key = make_template_fragment_key('foo', [1, 2, 3, 4, 5]) self.assertEqual(key, 'template.cache.foo.7ae8fd2e0d25d651c683bdeebdb29461') def test_with_unicode_vary_on(self): key = make_template_fragment_key('foo', ['42º', '😀']) self.assertEqual(key, 'template.cache.foo.7ced1c94e543668590ba39b3c08b0237') def test_long_vary_on(self): key = make_template_fragment_key('foo', ['x' * 10000]) self.assertEqual(key, 'template.cache.foo.3670b349b5124aa56bdb50678b02b23a') class CacheHandlerTest(SimpleTestCase): def test_same_instance(self): """ Attempting to retrieve the same alias should yield the same instance. """ cache1 = caches['default'] cache2 = caches['default'] self.assertIs(cache1, cache2) def test_per_thread(self): """ Requesting the same alias from separate threads should yield separate instances. """ c = [] def runner(): c.append(caches['default']) for x in range(2): t = threading.Thread(target=runner) t.start() t.join() self.assertIsNot(c[0], c[1])
cb069651676d854ed2d8f70892dd82aeca87458a9d9b61823b1f6ccb4311eced
import hashlib import unittest from django.test import SimpleTestCase from django.utils.crypto import ( InvalidAlgorithm, constant_time_compare, pbkdf2, salted_hmac, ) class TestUtilsCryptoMisc(SimpleTestCase): def test_constant_time_compare(self): # It's hard to test for constant time, just test the result. self.assertTrue(constant_time_compare(b'spam', b'spam')) self.assertFalse(constant_time_compare(b'spam', b'eggs')) self.assertTrue(constant_time_compare('spam', 'spam')) self.assertFalse(constant_time_compare('spam', 'eggs')) def test_salted_hmac(self): tests = [ ((b'salt', b'value'), {}, 'b51a2e619c43b1ca4f91d15c57455521d71d61eb'), (('salt', 'value'), {}, 'b51a2e619c43b1ca4f91d15c57455521d71d61eb'), ( ('salt', 'value'), {'secret': 'abcdefg'}, '8bbee04ccddfa24772d1423a0ba43bd0c0e24b76', ), ( ('salt', 'value'), {'secret': 'x' * hashlib.sha1().block_size}, 'bd3749347b412b1b0a9ea65220e55767ac8e96b0', ), ( ('salt', 'value'), {'algorithm': 'sha256'}, 'ee0bf789e4e009371a5372c90f73fcf17695a8439c9108b0480f14e347b3f9ec', ), ( ('salt', 'value'), { 'algorithm': 'blake2b', 'secret': 'x' * hashlib.blake2b().block_size, }, 'fc6b9800a584d40732a07fa33fb69c35211269441823bca431a143853c32f' 'e836cf19ab881689528ede647dac412170cd5d3407b44c6d0f44630690c54' 'ad3d58', ), ] for args, kwargs, digest in tests: with self.subTest(args=args, kwargs=kwargs): self.assertEqual(salted_hmac(*args, **kwargs).hexdigest(), digest) def test_invalid_algorithm(self): msg = "'whatever' is not an algorithm accepted by the hashlib module." with self.assertRaisesMessage(InvalidAlgorithm, msg): salted_hmac('salt', 'value', algorithm='whatever') class TestUtilsCryptoPBKDF2(unittest.TestCase): # http://tools.ietf.org/html/draft-josefsson-pbkdf2-test-vectors-06 rfc_vectors = [ { "args": { "password": "password", "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha1, }, "result": "0c60c80f961f0e71f3a9b524af6012062fe037a6", }, { "args": { "password": "password", "salt": "salt", "iterations": 2, "dklen": 20, "digest": hashlib.sha1, }, "result": "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957", }, { "args": { "password": "password", "salt": "salt", "iterations": 4096, "dklen": 20, "digest": hashlib.sha1, }, "result": "4b007901b765489abead49d926f721d065a429c1", }, # # this takes way too long :( # { # "args": { # "password": "password", # "salt": "salt", # "iterations": 16777216, # "dklen": 20, # "digest": hashlib.sha1, # }, # "result": "eefe3d61cd4da4e4e9945b3d6ba2158c2634e984", # }, { "args": { "password": "passwordPASSWORDpassword", "salt": "saltSALTsaltSALTsaltSALTsaltSALTsalt", "iterations": 4096, "dklen": 25, "digest": hashlib.sha1, }, "result": "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038", }, { "args": { "password": "pass\0word", "salt": "sa\0lt", "iterations": 4096, "dklen": 16, "digest": hashlib.sha1, }, "result": "56fa6aa75548099dcc37d7f03425e0c3", }, ] regression_vectors = [ { "args": { "password": "password", "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha256, }, "result": "120fb6cffcf8b32c43e7225256c4f837a86548c9", }, { "args": { "password": "password", "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha512, }, "result": "867f70cf1ade02cff3752599a3a53dc4af34c7a6", }, { "args": { "password": "password", "salt": "salt", "iterations": 1000, "dklen": 0, "digest": hashlib.sha512, }, "result": ("afe6c5530785b6cc6b1c6453384731bd5ee432ee" "549fd42fb6695779ad8a1c5bf59de69c48f774ef" "c4007d5298f9033c0241d5ab69305e7b64eceeb8d" "834cfec"), }, # Check leading zeros are not stripped (#17481) { "args": { "password": b'\xba', "salt": "salt", "iterations": 1, "dklen": 20, "digest": hashlib.sha1, }, "result": '0053d3b91a7f1e54effebd6d68771e8a6e0b2c5b', }, ] def test_public_vectors(self): for vector in self.rfc_vectors: result = pbkdf2(**vector['args']) self.assertEqual(result.hex(), vector['result']) def test_regression_vectors(self): for vector in self.regression_vectors: result = pbkdf2(**vector['args']) self.assertEqual(result.hex(), vector['result']) def test_default_hmac_alg(self): kwargs = {'password': b'password', 'salt': b'salt', 'iterations': 1, 'dklen': 20} self.assertEqual(pbkdf2(**kwargs), hashlib.pbkdf2_hmac(hash_name=hashlib.sha256().name, **kwargs))
888f876101821002fcd85068f83f7779d82b85a54204171ae13a060759b7886a
import threading from datetime import datetime, timedelta from unittest import mock from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections, models from django.db.models.manager import BaseManager from django.db.models.query import MAX_GET_RESULTS, EmptyQuerySet from django.test import ( SimpleTestCase, TestCase, TransactionTestCase, skipUnlessDBFeature, ) from django.utils.translation import gettext_lazy from .models import ( Article, ArticleSelectOnSave, FeaturedArticle, PrimaryKeyWithDefault, SelfRef, ) class ModelInstanceCreationTests(TestCase): def test_object_is_not_written_to_database_until_save_was_called(self): a = Article( id=None, headline='Parrot programs in Python', pub_date=datetime(2005, 7, 28), ) self.assertIsNone(a.id) self.assertEqual(Article.objects.all().count(), 0) # Save it into the database. You have to call save() explicitly. a.save() self.assertIsNotNone(a.id) self.assertEqual(Article.objects.all().count(), 1) def test_can_initialize_model_instance_using_positional_arguments(self): """ You can initialize a model instance using positional arguments, which should match the field order as defined in the model. """ a = Article(None, 'Second article', datetime(2005, 7, 29)) a.save() self.assertEqual(a.headline, 'Second article') self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0)) def test_can_create_instance_using_kwargs(self): a = Article( id=None, headline='Third article', pub_date=datetime(2005, 7, 30), ) a.save() self.assertEqual(a.headline, 'Third article') self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0)) def test_autofields_generate_different_values_for_each_instance(self): a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0)) a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0)) a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0)) self.assertNotEqual(a3.id, a1.id) self.assertNotEqual(a3.id, a2.id) def test_can_mix_and_match_position_and_kwargs(self): # You can also mix and match position and keyword arguments, but # be sure not to duplicate field information. a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31)) a.save() self.assertEqual(a.headline, 'Fourth article') def test_cannot_create_instance_with_invalid_kwargs(self): with self.assertRaisesMessage(TypeError, "Article() got an unexpected keyword argument 'foo'"): Article( id=None, headline='Some headline', pub_date=datetime(2005, 7, 31), foo='bar', ) def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self): """ You can leave off the value for an AutoField when creating an object, because it'll get filled in automatically when you save(). """ a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31)) a.save() self.assertEqual(a.headline, 'Article 5') self.assertIsNotNone(a.id) def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self): a = Article(pub_date=datetime(2005, 7, 31)) a.save() self.assertEqual(a.headline, 'Default headline') def test_for_datetimefields_saves_as_much_precision_as_was_given(self): """as much precision in *seconds*""" a1 = Article( headline='Article 7', pub_date=datetime(2005, 7, 31, 12, 30), ) a1.save() self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date, datetime(2005, 7, 31, 12, 30)) a2 = Article( headline='Article 8', pub_date=datetime(2005, 7, 31, 12, 30, 45), ) a2.save() self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date, datetime(2005, 7, 31, 12, 30, 45)) def test_saving_an_object_again_does_not_create_a_new_object(self): a = Article(headline='original', pub_date=datetime(2014, 5, 16)) a.save() current_id = a.id a.save() self.assertEqual(a.id, current_id) a.headline = 'Updated headline' a.save() self.assertEqual(a.id, current_id) def test_querysets_checking_for_membership(self): headlines = [ 'Parrot programs in Python', 'Second article', 'Third article'] some_pub_date = datetime(2014, 5, 16, 12, 1) for headline in headlines: Article(headline=headline, pub_date=some_pub_date).save() a = Article(headline='Some headline', pub_date=some_pub_date) a.save() # You can use 'in' to test for membership... self.assertIn(a, Article.objects.all()) # ... but there will often be more efficient ways if that is all you need: self.assertTrue(Article.objects.filter(id=a.id).exists()) def test_save_primary_with_default(self): # An UPDATE attempt is skipped when a primary key has default. with self.assertNumQueries(1): PrimaryKeyWithDefault().save() class ModelTest(TestCase): def test_objects_attribute_is_only_available_on_the_class_itself(self): with self.assertRaisesMessage(AttributeError, "Manager isn't accessible via Article instances"): getattr(Article(), "objects",) self.assertFalse(hasattr(Article(), 'objects')) self.assertTrue(hasattr(Article, 'objects')) def test_queryset_delete_removes_all_items_in_that_queryset(self): headlines = [ 'An article', 'Article One', 'Amazing article', 'Boring article'] some_pub_date = datetime(2014, 5, 16, 12, 1) for headline in headlines: Article(headline=headline, pub_date=some_pub_date).save() self.assertQuerysetEqual( Article.objects.all().order_by('headline'), ["<Article: Amazing article>", "<Article: An article>", "<Article: Article One>", "<Article: Boring article>"] ) Article.objects.filter(headline__startswith='A').delete() self.assertQuerysetEqual(Article.objects.all().order_by('headline'), ["<Article: Boring article>"]) def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self): some_pub_date = datetime(2014, 5, 16, 12, 1) a1 = Article.objects.create(headline='First', pub_date=some_pub_date) a2 = Article.objects.create(headline='Second', pub_date=some_pub_date) self.assertNotEqual(a1, a2) self.assertEqual(a1, Article.objects.get(id__exact=a1.id)) self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id)) def test_microsecond_precision(self): a9 = Article( headline='Article 9', pub_date=datetime(2005, 7, 31, 12, 30, 45, 180), ) a9.save() self.assertEqual(Article.objects.get(pk=a9.pk).pub_date, datetime(2005, 7, 31, 12, 30, 45, 180)) def test_manually_specify_primary_key(self): # You can manually specify the primary key when creating a new object. a101 = Article( id=101, headline='Article 101', pub_date=datetime(2005, 7, 31, 12, 30, 45), ) a101.save() a101 = Article.objects.get(pk=101) self.assertEqual(a101.headline, 'Article 101') def test_create_method(self): # You can create saved objects in a single step a10 = Article.objects.create( headline="Article 10", pub_date=datetime(2005, 7, 31, 12, 30, 45), ) self.assertEqual(Article.objects.get(headline="Article 10"), a10) def test_year_lookup_edge_case(self): # Edge-case test: A year lookup should retrieve all objects in # the given year, including Jan. 1 and Dec. 31. Article.objects.create( headline='Article 11', pub_date=datetime(2008, 1, 1), ) Article.objects.create( headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999), ) self.assertQuerysetEqual( Article.objects.filter(pub_date__year=2008), ["<Article: Article 11>", "<Article: Article 12>"] ) def test_unicode_data(self): # Unicode data works, too. a = Article( headline='\u6797\u539f \u3081\u3050\u307f', pub_date=datetime(2005, 7, 28), ) a.save() self.assertEqual(Article.objects.get(pk=a.id).headline, '\u6797\u539f \u3081\u3050\u307f') def test_hash_function(self): # Model instances have a hash function, so they can be used in sets # or as dictionary keys. Two models compare as equal if their primary # keys are equal. a10 = Article.objects.create( headline="Article 10", pub_date=datetime(2005, 7, 31, 12, 30, 45), ) a11 = Article.objects.create( headline='Article 11', pub_date=datetime(2008, 1, 1), ) a12 = Article.objects.create( headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999), ) s = {a10, a11, a12} self.assertIn(Article.objects.get(headline='Article 11'), s) def test_extra_method_select_argument_with_dashes_and_values(self): # The 'select' argument to extra() supports names with dashes in # them, as long as you use values(). Article.objects.bulk_create([ Article(headline='Article 10', pub_date=datetime(2005, 7, 31, 12, 30, 45)), Article(headline='Article 11', pub_date=datetime(2008, 1, 1)), Article(headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999)), ]) dicts = Article.objects.filter( pub_date__year=2008).extra( select={'dashed-value': '1'}).values('headline', 'dashed-value') self.assertEqual( [sorted(d.items()) for d in dicts], [[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]] ) def test_extra_method_select_argument_with_dashes(self): # If you use 'select' with extra() and names containing dashes on a # query that's *not* a values() query, those extra 'select' values # will silently be ignored. Article.objects.bulk_create([ Article(headline='Article 10', pub_date=datetime(2005, 7, 31, 12, 30, 45)), Article(headline='Article 11', pub_date=datetime(2008, 1, 1)), Article(headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999)), ]) articles = Article.objects.filter( pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'}) self.assertEqual(articles[0].undashedvalue, 2) def test_create_relation_with_gettext_lazy(self): """ gettext_lazy objects work when saving model instances through various methods. Refs #10498. """ notlazy = 'test' lazy = gettext_lazy(notlazy) Article.objects.create(headline=lazy, pub_date=datetime.now()) article = Article.objects.get() self.assertEqual(article.headline, notlazy) # test that assign + save works with Promise objects article.headline = lazy article.save() self.assertEqual(article.headline, notlazy) # test .update() Article.objects.update(headline=lazy) article = Article.objects.get() self.assertEqual(article.headline, notlazy) # still test bulk_create() Article.objects.all().delete() Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())]) article = Article.objects.get() self.assertEqual(article.headline, notlazy) def test_emptyqs(self): msg = "EmptyQuerySet can't be instantiated" with self.assertRaisesMessage(TypeError, msg): EmptyQuerySet() self.assertIsInstance(Article.objects.none(), EmptyQuerySet) self.assertNotIsInstance('', EmptyQuerySet) def test_emptyqs_values(self): # test for #15959 Article.objects.create(headline='foo', pub_date=datetime.now()) with self.assertNumQueries(0): qs = Article.objects.none().values_list('pk') self.assertIsInstance(qs, EmptyQuerySet) self.assertEqual(len(qs), 0) def test_emptyqs_customqs(self): # A hacky test for custom QuerySet subclass - refs #17271 Article.objects.create(headline='foo', pub_date=datetime.now()) class CustomQuerySet(models.QuerySet): def do_something(self): return 'did something' qs = Article.objects.all() qs.__class__ = CustomQuerySet qs = qs.none() with self.assertNumQueries(0): self.assertEqual(len(qs), 0) self.assertIsInstance(qs, EmptyQuerySet) self.assertEqual(qs.do_something(), 'did something') def test_emptyqs_values_order(self): # Tests for ticket #17712 Article.objects.create(headline='foo', pub_date=datetime.now()) with self.assertNumQueries(0): self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0) with self.assertNumQueries(0): self.assertEqual(len(Article.objects.none().filter( id__in=Article.objects.values_list('id', flat=True))), 0) @skipUnlessDBFeature('can_distinct_on_fields') def test_emptyqs_distinct(self): # Tests for #19426 Article.objects.create(headline='foo', pub_date=datetime.now()) with self.assertNumQueries(0): self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0) def test_ticket_20278(self): sr = SelfRef.objects.create() with self.assertRaises(ObjectDoesNotExist): SelfRef.objects.get(selfref=sr) def test_eq(self): self.assertEqual(Article(id=1), Article(id=1)) self.assertNotEqual(Article(id=1), object()) self.assertNotEqual(object(), Article(id=1)) a = Article() self.assertEqual(a, a) self.assertEqual(a, mock.ANY) self.assertNotEqual(Article(), a) def test_hash(self): # Value based on PK self.assertEqual(hash(Article(id=1)), hash(1)) msg = 'Model instances without primary key value are unhashable' with self.assertRaisesMessage(TypeError, msg): # No PK value -> unhashable (because save() would then change # hash) hash(Article()) def test_missing_hash_not_inherited(self): class NoHash(models.Model): def __eq__(self, other): return super.__eq__(other) with self.assertRaisesMessage(TypeError, "unhashable type: 'NoHash'"): hash(NoHash(id=1)) def test_specified_parent_hash_inherited(self): class ParentHash(models.Model): def __eq__(self, other): return super.__eq__(other) __hash__ = models.Model.__hash__ self.assertEqual(hash(ParentHash(id=1)), 1) def test_delete_and_access_field(self): # Accessing a field after it's deleted from a model reloads its value. pub_date = datetime.now() article = Article.objects.create(headline='foo', pub_date=pub_date) new_pub_date = article.pub_date + timedelta(days=10) article.headline = 'bar' article.pub_date = new_pub_date del article.headline with self.assertNumQueries(1): self.assertEqual(article.headline, 'foo') # Fields that weren't deleted aren't reloaded. self.assertEqual(article.pub_date, new_pub_date) def test_multiple_objects_max_num_fetched(self): max_results = MAX_GET_RESULTS - 1 Article.objects.bulk_create( Article(headline='Area %s' % i, pub_date=datetime(2005, 7, 28)) for i in range(max_results) ) self.assertRaisesMessage( MultipleObjectsReturned, 'get() returned more than one Article -- it returned %d!' % max_results, Article.objects.get, headline__startswith='Area', ) Article.objects.create(headline='Area %s' % max_results, pub_date=datetime(2005, 7, 28)) self.assertRaisesMessage( MultipleObjectsReturned, 'get() returned more than one Article -- it returned more than %d!' % max_results, Article.objects.get, headline__startswith='Area', ) class ModelLookupTest(TestCase): @classmethod def setUpTestData(cls): # Create an Article. cls.a = Article( id=None, headline='Swallow programs in Python', pub_date=datetime(2005, 7, 28), ) # Save it into the database. You have to call save() explicitly. cls.a.save() def test_all_lookup(self): # Change values by changing the attributes, then calling save(). self.a.headline = 'Parrot programs in Python' self.a.save() # Article.objects.all() returns all the articles in the database. self.assertQuerysetEqual(Article.objects.all(), ['<Article: Parrot programs in Python>']) def test_rich_lookup(self): # Django provides a rich database lookup API. self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a) self.assertEqual(Article.objects.get(headline__startswith='Swallow'), self.a) self.assertEqual(Article.objects.get(pub_date__year=2005), self.a) self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a) self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a) self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a) def test_equal_lookup(self): # The "__exact" lookup type can be omitted, as a shortcut. self.assertEqual(Article.objects.get(id=self.a.id), self.a) self.assertEqual(Article.objects.get(headline='Swallow programs in Python'), self.a) self.assertQuerysetEqual( Article.objects.filter(pub_date__year=2005), ['<Article: Swallow programs in Python>'], ) self.assertQuerysetEqual( Article.objects.filter(pub_date__year=2004), [], ) self.assertQuerysetEqual( Article.objects.filter(pub_date__year=2005, pub_date__month=7), ['<Article: Swallow programs in Python>'], ) self.assertQuerysetEqual( Article.objects.filter(pub_date__week_day=5), ['<Article: Swallow programs in Python>'], ) self.assertQuerysetEqual( Article.objects.filter(pub_date__week_day=6), [], ) def test_does_not_exist(self): # Django raises an Article.DoesNotExist exception for get() if the # parameters don't match any object. with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."): Article.objects.get(id__exact=2000,) # To avoid dict-ordering related errors check only one lookup # in single assert. with self.assertRaises(ObjectDoesNotExist): Article.objects.get(pub_date__year=2005, pub_date__month=8) with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."): Article.objects.get(pub_date__week_day=6,) def test_lookup_by_primary_key(self): # Lookup by a primary key is the most common case, so Django # provides a shortcut for primary-key exact lookups. # The following is identical to articles.get(id=a.id). self.assertEqual(Article.objects.get(pk=self.a.id), self.a) # pk can be used as a shortcut for the primary key name in any query. self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]), ["<Article: Swallow programs in Python>"]) # Model instances of the same type and same ID are considered equal. a = Article.objects.get(pk=self.a.id) b = Article.objects.get(pk=self.a.id) self.assertEqual(a, b) def test_too_many(self): # Create a very similar object a = Article( id=None, headline='Swallow bites Python', pub_date=datetime(2005, 7, 28), ) a.save() self.assertEqual(Article.objects.count(), 2) # Django raises an Article.MultipleObjectsReturned exception if the # lookup matches more than one object msg = "get() returned more than one Article -- it returned 2!" with self.assertRaisesMessage(MultipleObjectsReturned, msg): Article.objects.get(headline__startswith='Swallow',) with self.assertRaisesMessage(MultipleObjectsReturned, msg): Article.objects.get(pub_date__year=2005,) with self.assertRaisesMessage(MultipleObjectsReturned, msg): Article.objects.get(pub_date__year=2005, pub_date__month=7) class ConcurrentSaveTests(TransactionTestCase): available_apps = ['basic'] @skipUnlessDBFeature('test_db_allows_multiple_connections') def test_concurrent_delete_with_save(self): """ Test fetching, deleting and finally saving an object - we should get an insert in this case. """ a = Article.objects.create(headline='foo', pub_date=datetime.now()) exceptions = [] def deleter(): try: # Do not delete a directly - doing so alters its state. Article.objects.filter(pk=a.pk).delete() except Exception as e: exceptions.append(e) finally: connections[DEFAULT_DB_ALIAS].close() self.assertEqual(len(exceptions), 0) t = threading.Thread(target=deleter) t.start() t.join() a.save() self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo') class ManagerTest(SimpleTestCase): QUERYSET_PROXY_METHODS = [ 'none', 'count', 'dates', 'datetimes', 'distinct', 'extra', 'get', 'get_or_create', 'update_or_create', 'create', 'bulk_create', 'bulk_update', 'filter', 'aggregate', 'annotate', 'complex_filter', 'exclude', 'in_bulk', 'iterator', 'earliest', 'latest', 'first', 'last', 'order_by', 'select_for_update', 'select_related', 'prefetch_related', 'values', 'values_list', 'update', 'reverse', 'defer', 'only', 'using', 'exists', 'explain', '_insert', '_update', 'raw', 'union', 'intersection', 'difference', ] def test_manager_methods(self): """ This test ensures that the correct set of methods from `QuerySet` are copied onto `Manager`. It's particularly useful to prevent accidentally leaking new methods into `Manager`. New `QuerySet` methods that should also be copied onto `Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`. """ self.assertEqual( sorted(BaseManager._get_queryset_methods(models.QuerySet)), sorted(self.QUERYSET_PROXY_METHODS), ) class SelectOnSaveTests(TestCase): def test_select_on_save(self): a1 = Article.objects.create(pub_date=datetime.now()) with self.assertNumQueries(1): a1.save() asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now()) with self.assertNumQueries(2): asos.save() with self.assertNumQueries(1): asos.save(force_update=True) Article.objects.all().delete() with self.assertRaisesMessage(DatabaseError, 'Forced update did not affect any rows.'): with self.assertNumQueries(1): asos.save(force_update=True) def test_select_on_save_lying_update(self): """ select_on_save works correctly if the database doesn't return correct information about matched rows from UPDATE. """ # Change the manager to not return "row matched" for update(). # We are going to change the Article's _base_manager class # dynamically. This is a bit of a hack, but it seems hard to # test this properly otherwise. Article's manager, because # proxy models use their parent model's _base_manager. orig_class = Article._base_manager._queryset_class class FakeQuerySet(models.QuerySet): # Make sure the _update method below is in fact called. called = False def _update(self, *args, **kwargs): FakeQuerySet.called = True super()._update(*args, **kwargs) return 0 try: Article._base_manager._queryset_class = FakeQuerySet asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now()) with self.assertNumQueries(3): asos.save() self.assertTrue(FakeQuerySet.called) # This is not wanted behavior, but this is how Django has always # behaved for databases that do not return correct information # about matched rows for UPDATE. with self.assertRaisesMessage(DatabaseError, 'Forced update did not affect any rows.'): asos.save(force_update=True) msg = ( "An error occurred in the current transaction. You can't " "execute queries until the end of the 'atomic' block." ) with self.assertRaisesMessage(DatabaseError, msg): asos.save(update_fields=['pub_date']) finally: Article._base_manager._queryset_class = orig_class class ModelRefreshTests(TestCase): def test_refresh(self): a = Article.objects.create(pub_date=datetime.now()) Article.objects.create(pub_date=datetime.now()) Article.objects.filter(pk=a.pk).update(headline='new headline') with self.assertNumQueries(1): a.refresh_from_db() self.assertEqual(a.headline, 'new headline') orig_pub_date = a.pub_date new_pub_date = a.pub_date + timedelta(10) Article.objects.update(headline='new headline 2', pub_date=new_pub_date) with self.assertNumQueries(1): a.refresh_from_db(fields=['headline']) self.assertEqual(a.headline, 'new headline 2') self.assertEqual(a.pub_date, orig_pub_date) with self.assertNumQueries(1): a.refresh_from_db() self.assertEqual(a.pub_date, new_pub_date) def test_unknown_kwarg(self): s = SelfRef.objects.create() msg = "refresh_from_db() got an unexpected keyword argument 'unknown_kwarg'" with self.assertRaisesMessage(TypeError, msg): s.refresh_from_db(unknown_kwarg=10) def test_lookup_in_fields(self): s = SelfRef.objects.create() msg = 'Found "__" in fields argument. Relations and transforms are not allowed in fields.' with self.assertRaisesMessage(ValueError, msg): s.refresh_from_db(fields=['foo__bar']) def test_refresh_fk(self): s1 = SelfRef.objects.create() s2 = SelfRef.objects.create() s3 = SelfRef.objects.create(selfref=s1) s3_copy = SelfRef.objects.get(pk=s3.pk) s3_copy.selfref.touched = True s3.selfref = s2 s3.save() with self.assertNumQueries(1): s3_copy.refresh_from_db() with self.assertNumQueries(1): # The old related instance was thrown away (the selfref_id has # changed). It needs to be reloaded on access, so one query # executed. self.assertFalse(hasattr(s3_copy.selfref, 'touched')) self.assertEqual(s3_copy.selfref, s2) def test_refresh_null_fk(self): s1 = SelfRef.objects.create() s2 = SelfRef.objects.create(selfref=s1) s2.selfref = None s2.refresh_from_db() self.assertEqual(s2.selfref, s1) def test_refresh_unsaved(self): pub_date = datetime.now() a = Article.objects.create(pub_date=pub_date) a2 = Article(id=a.pk) with self.assertNumQueries(1): a2.refresh_from_db() self.assertEqual(a2.pub_date, pub_date) self.assertEqual(a2._state.db, "default") def test_refresh_fk_on_delete_set_null(self): a = Article.objects.create( headline='Parrot programs in Python', pub_date=datetime(2005, 7, 28), ) s1 = SelfRef.objects.create(article=a) a.delete() s1.refresh_from_db() self.assertIsNone(s1.article_id) self.assertIsNone(s1.article) def test_refresh_no_fields(self): a = Article.objects.create(pub_date=datetime.now()) with self.assertNumQueries(0): a.refresh_from_db(fields=[]) def test_refresh_clears_reverse_related(self): """refresh_from_db() clear cached reverse relations.""" article = Article.objects.create( headline='Parrot programs in Python', pub_date=datetime(2005, 7, 28), ) self.assertFalse(hasattr(article, 'featured')) FeaturedArticle.objects.create(article_id=article.pk) article.refresh_from_db() self.assertTrue(hasattr(article, 'featured')) def test_refresh_clears_one_to_one_field(self): article = Article.objects.create( headline='Parrot programs in Python', pub_date=datetime(2005, 7, 28), ) featured = FeaturedArticle.objects.create(article_id=article.pk) self.assertEqual(featured.article.headline, 'Parrot programs in Python') article.headline = 'Parrot programs in Python 2.0' article.save() featured.refresh_from_db() self.assertEqual(featured.article.headline, 'Parrot programs in Python 2.0') def test_prefetched_cache_cleared(self): a = Article.objects.create(pub_date=datetime(2005, 7, 28)) s = SelfRef.objects.create(article=a) # refresh_from_db() without fields=[...] a1_prefetched = Article.objects.prefetch_related('selfref_set').first() self.assertCountEqual(a1_prefetched.selfref_set.all(), [s]) s.article = None s.save() # Relation is cleared and prefetch cache is stale. self.assertCountEqual(a1_prefetched.selfref_set.all(), [s]) a1_prefetched.refresh_from_db() # Cache was cleared and new results are available. self.assertCountEqual(a1_prefetched.selfref_set.all(), []) # refresh_from_db() with fields=[...] a2_prefetched = Article.objects.prefetch_related('selfref_set').first() self.assertCountEqual(a2_prefetched.selfref_set.all(), []) s.article = a s.save() # Relation is added and prefetch cache is stale. self.assertCountEqual(a2_prefetched.selfref_set.all(), []) a2_prefetched.refresh_from_db(fields=['selfref_set']) # Cache was cleared and new results are available. self.assertCountEqual(a2_prefetched.selfref_set.all(), [s])
47fc1a6e128a60171ba5cd67f095f8861de90d2392b6c18fab426f4ded619962
import datetime from unittest import skipIf, skipUnless from django.db import connection from django.db.models import CASCADE, ForeignKey, Index, Q from django.test import ( TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import override_settings from django.utils import timezone from .models import ( Article, ArticleTranslation, IndexedArticle2, IndexTogetherSingleList, ) class SchemaIndexesTests(TestCase): """ Test index handling by the db.backends.schema infrastructure. """ def test_index_name_hash(self): """ Index names should be deterministic. """ editor = connection.schema_editor() index_name = editor._create_index_name( table_name=Article._meta.db_table, column_names=("c1",), suffix="123", ) self.assertEqual(index_name, "indexes_article_c1_a52bd80b123") def test_index_name(self): """ Index names on the built-in database backends:: * Are truncated as needed. * Include all the column names. * Include a deterministic hash. """ long_name = 'l%sng' % ('o' * 100) editor = connection.schema_editor() index_name = editor._create_index_name( table_name=Article._meta.db_table, column_names=('c1', 'c2', long_name), suffix='ix', ) expected = { 'mysql': 'indexes_article_c1_c2_looooooooooooooooooo_255179b2ix', 'oracle': 'indexes_a_c1_c2_loo_255179b2ix', 'postgresql': 'indexes_article_c1_c2_loooooooooooooooooo_255179b2ix', 'sqlite': 'indexes_article_c1_c2_l%sng_255179b2ix' % ('o' * 100), } if connection.vendor not in expected: self.skipTest('This test is only supported on the built-in database backends.') self.assertEqual(index_name, expected[connection.vendor]) def test_index_together(self): editor = connection.schema_editor() index_sql = [str(statement) for statement in editor._model_indexes_sql(Article)] self.assertEqual(len(index_sql), 1) # Ensure the index name is properly quoted self.assertIn( connection.ops.quote_name( editor._create_index_name(Article._meta.db_table, ['headline', 'pub_date'], suffix='_idx') ), index_sql[0] ) def test_index_together_single_list(self): # Test for using index_together with a single list (#22172) index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList) self.assertEqual(len(index_sql), 1) def test_columns_list_sql(self): index = Index(fields=['headline'], name='whitespace_idx') editor = connection.schema_editor() self.assertIn( '(%s)' % editor.quote_name('headline'), str(index.create_sql(Article, editor)), ) def test_descending_columns_list_sql(self): index = Index(fields=['-headline'], name='whitespace_idx') editor = connection.schema_editor() self.assertIn( '(%s DESC)' % editor.quote_name('headline'), str(index.create_sql(Article, editor)), ) @skipIf(connection.vendor == 'postgresql', 'opclasses are PostgreSQL only') class SchemaIndexesNotPostgreSQLTests(TransactionTestCase): available_apps = ['indexes'] def test_create_index_ignores_opclasses(self): index = Index( name='test_ops_class', fields=['headline'], opclasses=['varchar_pattern_ops'], ) with connection.schema_editor() as editor: # This would error if opclasses weren't ignored. editor.add_index(IndexedArticle2, index) # The `condition` parameter is ignored by databases that don't support partial # indexes. @skipIfDBFeature('supports_partial_indexes') class PartialIndexConditionIgnoredTests(TransactionTestCase): available_apps = ['indexes'] def test_condition_ignored(self): index = Index( name='test_condition_ignored', fields=['published'], condition=Q(published=True), ) with connection.schema_editor() as editor: # This would error if condition weren't ignored. editor.add_index(Article, index) self.assertNotIn( 'WHERE %s' % editor.quote_name('published'), str(index.create_sql(Article, editor)) ) @skipUnless(connection.vendor == 'postgresql', 'PostgreSQL tests') class SchemaIndexesPostgreSQLTests(TransactionTestCase): available_apps = ['indexes'] get_opclass_query = ''' SELECT opcname, c.relname FROM pg_opclass AS oc JOIN pg_index as i on oc.oid = ANY(i.indclass) JOIN pg_class as c on c.oid = i.indexrelid WHERE c.relname = '%s' ''' def test_text_indexes(self): """Test creation of PostgreSQL-specific text indexes (#12234)""" from .models import IndexedArticle index_sql = [str(statement) for statement in connection.schema_editor()._model_indexes_sql(IndexedArticle)] self.assertEqual(len(index_sql), 5) self.assertIn('("headline" varchar_pattern_ops)', index_sql[1]) self.assertIn('("body" text_pattern_ops)', index_sql[3]) # unique=True and db_index=True should only create the varchar-specific # index (#19441). self.assertIn('("slug" varchar_pattern_ops)', index_sql[4]) def test_virtual_relation_indexes(self): """Test indexes are not created for related objects""" index_sql = connection.schema_editor()._model_indexes_sql(Article) self.assertEqual(len(index_sql), 1) def test_ops_class(self): index = Index( name='test_ops_class', fields=['headline'], opclasses=['varchar_pattern_ops'], ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % 'test_ops_class') self.assertEqual(cursor.fetchall(), [('varchar_pattern_ops', 'test_ops_class')]) def test_ops_class_multiple_columns(self): index = Index( name='test_ops_class_multiple', fields=['headline', 'body'], opclasses=['varchar_pattern_ops', 'text_pattern_ops'], ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % 'test_ops_class_multiple') expected_ops_classes = ( ('varchar_pattern_ops', 'test_ops_class_multiple'), ('text_pattern_ops', 'test_ops_class_multiple'), ) self.assertCountEqual(cursor.fetchall(), expected_ops_classes) def test_ops_class_partial(self): index = Index( name='test_ops_class_partial', fields=['body'], opclasses=['text_pattern_ops'], condition=Q(headline__contains='China'), ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % 'test_ops_class_partial') self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', 'test_ops_class_partial')]) def test_ops_class_partial_tablespace(self): indexname = 'test_ops_class_tblspace' index = Index( name=indexname, fields=['body'], opclasses=['text_pattern_ops'], condition=Q(headline__contains='China'), db_tablespace='pg_default', ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) self.assertIn('TABLESPACE "pg_default" ', str(index.create_sql(IndexedArticle2, editor))) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % indexname) self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', indexname)]) def test_ops_class_descending(self): indexname = 'test_ops_class_ordered' index = Index( name=indexname, fields=['-body'], opclasses=['text_pattern_ops'], ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % indexname) self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', indexname)]) def test_ops_class_descending_partial(self): indexname = 'test_ops_class_ordered_partial' index = Index( name=indexname, fields=['-body'], opclasses=['text_pattern_ops'], condition=Q(headline__contains='China'), ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % indexname) self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', indexname)]) def test_ops_class_columns_lists_sql(self): index = Index( fields=['headline'], name='whitespace_idx', opclasses=['text_pattern_ops'], ) with connection.schema_editor() as editor: self.assertIn( '(%s text_pattern_ops)' % editor.quote_name('headline'), str(index.create_sql(Article, editor)), ) def test_ops_class_descending_columns_list_sql(self): index = Index( fields=['-headline'], name='whitespace_idx', opclasses=['text_pattern_ops'], ) with connection.schema_editor() as editor: self.assertIn( '(%s text_pattern_ops DESC)' % editor.quote_name('headline'), str(index.create_sql(Article, editor)), ) @skipUnless(connection.vendor == 'mysql', 'MySQL tests') class SchemaIndexesMySQLTests(TransactionTestCase): available_apps = ['indexes'] def test_no_index_for_foreignkey(self): """ MySQL on InnoDB already creates indexes automatically for foreign keys. (#14180). An index should be created if db_constraint=False (#26171). """ storage = connection.introspection.get_storage_engine( connection.cursor(), ArticleTranslation._meta.db_table ) if storage != "InnoDB": self.skip("This test only applies to the InnoDB storage engine") index_sql = [str(statement) for statement in connection.schema_editor()._model_indexes_sql(ArticleTranslation)] self.assertEqual(index_sql, [ 'CREATE INDEX `indexes_articletranslation_article_no_constraint_id_d6c0806b` ' 'ON `indexes_articletranslation` (`article_no_constraint_id`)' ]) # The index also shouldn't be created if the ForeignKey is added after # the model was created. field_created = False try: with connection.schema_editor() as editor: new_field = ForeignKey(Article, CASCADE) new_field.set_attributes_from_name('new_foreign_key') editor.add_field(ArticleTranslation, new_field) field_created = True # No deferred SQL. The FK constraint is included in the # statement to add the field. self.assertFalse(editor.deferred_sql) finally: if field_created: with connection.schema_editor() as editor: editor.remove_field(ArticleTranslation, new_field) @skipUnlessDBFeature('supports_partial_indexes') # SQLite doesn't support timezone-aware datetimes when USE_TZ is False. @override_settings(USE_TZ=True) class PartialIndexTests(TransactionTestCase): # Schema editor is used to create the index to test that it works. available_apps = ['indexes'] def test_partial_index(self): with connection.schema_editor() as editor: index = Index( name='recent_article_idx', fields=['pub_date'], condition=Q( pub_date__gt=datetime.datetime( year=2015, month=1, day=1, # PostgreSQL would otherwise complain about the lookup # being converted to a mutable function (by removing # the timezone in the cast) which is forbidden. tzinfo=timezone.get_current_timezone(), ), ) ) self.assertIn( 'WHERE %s' % editor.quote_name('pub_date'), str(index.create_sql(Article, schema_editor=editor)) ) editor.add_index(index=index, model=Article) self.assertIn(index.name, connection.introspection.get_constraints( cursor=connection.cursor(), table_name=Article._meta.db_table, )) editor.remove_index(index=index, model=Article) def test_integer_restriction_partial(self): with connection.schema_editor() as editor: index = Index( name='recent_article_idx', fields=['id'], condition=Q(pk__gt=1), ) self.assertIn( 'WHERE %s' % editor.quote_name('id'), str(index.create_sql(Article, schema_editor=editor)) ) editor.add_index(index=index, model=Article) self.assertIn(index.name, connection.introspection.get_constraints( cursor=connection.cursor(), table_name=Article._meta.db_table, )) editor.remove_index(index=index, model=Article) def test_boolean_restriction_partial(self): with connection.schema_editor() as editor: index = Index( name='published_index', fields=['published'], condition=Q(published=True), ) self.assertIn( 'WHERE %s' % editor.quote_name('published'), str(index.create_sql(Article, schema_editor=editor)) ) editor.add_index(index=index, model=Article) self.assertIn(index.name, connection.introspection.get_constraints( cursor=connection.cursor(), table_name=Article._meta.db_table, )) editor.remove_index(index=index, model=Article) @skipUnlessDBFeature('supports_functions_in_partial_indexes') def test_multiple_conditions(self): with connection.schema_editor() as editor: index = Index( name='recent_article_idx', fields=['pub_date', 'headline'], condition=( Q(pub_date__gt=datetime.datetime( year=2015, month=1, day=1, tzinfo=timezone.get_current_timezone(), )) & Q(headline__contains='China') ), ) sql = str(index.create_sql(Article, schema_editor=editor)) where = sql.find('WHERE') self.assertIn( 'WHERE (%s' % editor.quote_name('pub_date'), sql ) # Because each backend has different syntax for the operators, # check ONLY the occurrence of headline in the SQL. self.assertGreater(sql.rfind('headline'), where) editor.add_index(index=index, model=Article) self.assertIn(index.name, connection.introspection.get_constraints( cursor=connection.cursor(), table_name=Article._meta.db_table, )) editor.remove_index(index=index, model=Article) def test_is_null_condition(self): with connection.schema_editor() as editor: index = Index( name='recent_article_idx', fields=['pub_date'], condition=Q(pub_date__isnull=False), ) self.assertIn( 'WHERE %s IS NOT NULL' % editor.quote_name('pub_date'), str(index.create_sql(Article, schema_editor=editor)) ) editor.add_index(index=index, model=Article) self.assertIn(index.name, connection.introspection.get_constraints( cursor=connection.cursor(), table_name=Article._meta.db_table, )) editor.remove_index(index=index, model=Article)
dd1bc0c22fb89b9506c10d2cffe2364670805e9a871972e18eb7883f4e7a76f0
import datetime from copy import deepcopy from django.core.exceptions import FieldError, MultipleObjectsReturned from django.db import IntegrityError, models, transaction from django.test import TestCase from django.utils.translation import gettext_lazy from .models import ( Article, Category, Child, ChildNullableParent, City, Country, District, First, Parent, Record, Relation, Reporter, School, Student, Third, ToFieldChild, ) class ManyToOneTests(TestCase): def setUp(self): # Create a few Reporters. self.r = Reporter(first_name='John', last_name='Smith', email='[email protected]') self.r.save() self.r2 = Reporter(first_name='Paul', last_name='Jones', email='[email protected]') self.r2.save() # Create an Article. self.a = Article(headline="This is a test", pub_date=datetime.date(2005, 7, 27), reporter=self.r) self.a.save() def test_get(self): # Article objects have access to their related Reporter objects. r = self.a.reporter self.assertEqual(r.id, self.r.id) self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith')) def test_create(self): # You can also instantiate an Article by passing the Reporter's ID # instead of a Reporter object. a3 = Article(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id) a3.save() self.assertEqual(a3.reporter.id, self.r.id) # Similarly, the reporter ID can be a string. a4 = Article(headline="Fourth article", pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id)) a4.save() self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>") def test_add(self): # Create an Article via the Reporter object. new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29)) self.assertEqual(repr(new_article), "<Article: John's second story>") self.assertEqual(new_article.reporter.id, self.r.id) # Create a new article, and add it to the article set. new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17)) msg = "<Article: Paul's story> instance isn't saved. Use bulk=False or save the object first." with self.assertRaisesMessage(ValueError, msg): self.r.article_set.add(new_article2) self.r.article_set.add(new_article2, bulk=False) self.assertEqual(new_article2.reporter.id, self.r.id) self.assertQuerysetEqual( self.r.article_set.all(), ["<Article: John's second story>", "<Article: Paul's story>", "<Article: This is a test>"] ) # Add the same article to a different article set - check that it moves. self.r2.article_set.add(new_article2) self.assertEqual(new_article2.reporter.id, self.r2.id) self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"]) # Adding an object of the wrong type raises TypeError. with transaction.atomic(): with self.assertRaisesMessage(TypeError, "'Article' instance expected, got <Reporter:"): self.r.article_set.add(self.r2) self.assertQuerysetEqual( self.r.article_set.all(), ["<Article: John's second story>", "<Article: This is a test>"] ) def test_set(self): new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29)) new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17)) # Assign the article to the reporter. new_article2.reporter = self.r new_article2.save() self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>") self.assertEqual(new_article2.reporter.id, self.r.id) self.assertQuerysetEqual(self.r.article_set.all(), [ "<Article: John's second story>", "<Article: Paul's story>", "<Article: This is a test>", ]) self.assertQuerysetEqual(self.r2.article_set.all(), []) # Set the article back again. self.r2.article_set.set([new_article, new_article2]) self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"]) self.assertQuerysetEqual( self.r2.article_set.all(), ["<Article: John's second story>", "<Article: Paul's story>"] ) # Funny case - because the ForeignKey cannot be null, # existing members of the set must remain. self.r.article_set.set([new_article]) self.assertQuerysetEqual( self.r.article_set.all(), ["<Article: John's second story>", "<Article: This is a test>"] ) self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"]) def test_reverse_assignment_deprecation(self): msg = ( "Direct assignment to the reverse side of a related set is " "prohibited. Use article_set.set() instead." ) with self.assertRaisesMessage(TypeError, msg): self.r2.article_set = [] def test_assign(self): new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29)) new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17)) # Assign the article to the reporter directly using the descriptor. new_article2.reporter = self.r new_article2.save() self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>") self.assertEqual(new_article2.reporter.id, self.r.id) self.assertQuerysetEqual(self.r.article_set.all(), [ "<Article: John's second story>", "<Article: Paul's story>", "<Article: This is a test>", ]) self.assertQuerysetEqual(self.r2.article_set.all(), []) # Set the article back again using set() method. self.r2.article_set.set([new_article, new_article2]) self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"]) self.assertQuerysetEqual( self.r2.article_set.all(), ["<Article: John's second story>", "<Article: Paul's story>"] ) # Because the ForeignKey cannot be null, existing members of the set # must remain. self.r.article_set.set([new_article]) self.assertQuerysetEqual( self.r.article_set.all(), ["<Article: John's second story>", "<Article: This is a test>"] ) self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"]) # Reporter cannot be null - there should not be a clear or remove method self.assertFalse(hasattr(self.r2.article_set, 'remove')) self.assertFalse(hasattr(self.r2.article_set, 'clear')) def test_assign_fk_id_value(self): parent = Parent.objects.create(name='jeff') child1 = Child.objects.create(name='frank', parent=parent) child2 = Child.objects.create(name='randy', parent=parent) parent.bestchild = child1 parent.save() parent.bestchild_id = child2.pk parent.save() self.assertEqual(parent.bestchild_id, child2.pk) self.assertFalse(Parent.bestchild.is_cached(parent)) self.assertEqual(parent.bestchild, child2) self.assertTrue(Parent.bestchild.is_cached(parent)) # Reassigning the same value doesn't clear cached instance. parent.bestchild_id = child2.pk self.assertTrue(Parent.bestchild.is_cached(parent)) def test_assign_fk_id_none(self): parent = Parent.objects.create(name='jeff') child = Child.objects.create(name='frank', parent=parent) parent.bestchild = child parent.save() parent.bestchild_id = None parent.save() self.assertIsNone(parent.bestchild_id) self.assertFalse(Parent.bestchild.is_cached(parent)) self.assertIsNone(parent.bestchild) self.assertTrue(Parent.bestchild.is_cached(parent)) def test_selects(self): self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29)) self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17)) # Reporter objects have access to their related Article objects. self.assertQuerysetEqual(self.r.article_set.all(), [ "<Article: John's second story>", "<Article: This is a test>", ]) self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'), ["<Article: This is a test>"]) self.assertEqual(self.r.article_set.count(), 2) self.assertEqual(self.r2.article_set.count(), 1) # Get articles by id self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id), ["<Article: This is a test>"]) self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id), ["<Article: This is a test>"]) # Query on an article property self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'), ["<Article: This is a test>"]) # The API automatically follows relationships as far as you need. # Use double underscores to separate relationships. # This works as many levels deep as you want. There's no limit. # Find all Articles for any Reporter whose first name is "John". self.assertQuerysetEqual( Article.objects.filter(reporter__first_name__exact='John'), ["<Article: John's second story>", "<Article: This is a test>"] ) # Implied __exact also works self.assertQuerysetEqual( Article.objects.filter(reporter__first_name='John'), ["<Article: John's second story>", "<Article: This is a test>"] ) # Query twice over the related field. self.assertQuerysetEqual( Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith'), ["<Article: John's second story>", "<Article: This is a test>"] ) # The underlying query only makes one join when a related table is referenced twice. queryset = Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith') self.assertNumQueries(1, list, queryset) self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1) # The automatically joined table has a predictable name. self.assertQuerysetEqual( Article.objects.filter(reporter__first_name__exact='John').extra( where=["many_to_one_reporter.last_name='Smith'"]), ["<Article: John's second story>", "<Article: This is a test>"] ) # ... and should work fine with the string that comes out of forms.Form.cleaned_data self.assertQuerysetEqual( (Article.objects .filter(reporter__first_name__exact='John') .extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])), ["<Article: John's second story>", "<Article: This is a test>"] ) # Find all Articles for a Reporter. # Use direct ID check, pk check, and object comparison self.assertQuerysetEqual( Article.objects.filter(reporter__id__exact=self.r.id), [ "<Article: John's second story>", "<Article: This is a test>", ]) self.assertQuerysetEqual( Article.objects.filter(reporter__pk=self.r.id), [ "<Article: John's second story>", "<Article: This is a test>", ]) self.assertQuerysetEqual( Article.objects.filter(reporter=self.r.id), [ "<Article: John's second story>", "<Article: This is a test>", ]) self.assertQuerysetEqual( Article.objects.filter(reporter=self.r), [ "<Article: John's second story>", "<Article: This is a test>", ]) self.assertQuerysetEqual( Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(), [ "<Article: John's second story>", "<Article: Paul's story>", "<Article: This is a test>", ]) self.assertQuerysetEqual( Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(), [ "<Article: John's second story>", "<Article: Paul's story>", "<Article: This is a test>", ]) # You can also use a queryset instead of a literal list of instances. # The queryset must be reduced to a list of values using values(), # then converted into a query self.assertQuerysetEqual( Article.objects.filter( reporter__in=Reporter.objects.filter(first_name='John').values('pk').query ).distinct(), [ "<Article: John's second story>", "<Article: This is a test>", ]) def test_reverse_selects(self): a3 = Article.objects.create( headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id, ) Article.objects.create( headline="Fourth article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id, ) john_smith = ["<Reporter: John Smith>"] # Reporters can be queried self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id), john_smith) self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id), john_smith) self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'), john_smith) # Reporters can query in opposite direction of ForeignKey definition self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id), john_smith) self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id), john_smith) self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id), john_smith) self.assertQuerysetEqual(Reporter.objects.filter(article=self.a), john_smith) self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(), john_smith) self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(), john_smith) self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a, a3]).distinct(), john_smith) self.assertQuerysetEqual( Reporter.objects.filter(article__headline__startswith='T'), ["<Reporter: John Smith>", "<Reporter: John Smith>"], ordered=False ) self.assertQuerysetEqual(Reporter.objects.filter(article__headline__startswith='T').distinct(), john_smith) # Counting in the opposite direction works in conjunction with distinct() self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').count(), 2) self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1) # Queries can go round in circles. self.assertQuerysetEqual( Reporter.objects.filter(article__reporter__first_name__startswith='John'), [ "<Reporter: John Smith>", "<Reporter: John Smith>", "<Reporter: John Smith>", ], ordered=False ) self.assertQuerysetEqual( Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(), john_smith ) self.assertQuerysetEqual(Reporter.objects.filter(article__reporter__exact=self.r).distinct(), john_smith) # Implied __exact also works. self.assertQuerysetEqual(Reporter.objects.filter(article__reporter=self.r).distinct(), john_smith) # It's possible to use values() calls across many-to-one relations. # (Note, too, that we clear the ordering here so as not to drag the # 'headline' field into the columns being used to determine uniqueness) d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'} qs = Article.objects.filter( reporter=self.r, ).distinct().order_by().values('reporter__first_name', 'reporter__last_name') self.assertEqual([d], list(qs)) def test_select_related(self): # Article.objects.select_related().dates() works properly when there # are multiple Articles with the same date but different foreign-key # objects (Reporters). r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='[email protected]') r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='[email protected]') Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1) Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2) self.assertEqual( list(Article.objects.select_related().dates('pub_date', 'day')), [datetime.date(1980, 4, 23), datetime.date(2005, 7, 27)] ) self.assertEqual( list(Article.objects.select_related().dates('pub_date', 'month')), [datetime.date(1980, 4, 1), datetime.date(2005, 7, 1)] ) self.assertEqual( list(Article.objects.select_related().dates('pub_date', 'year')), [datetime.date(1980, 1, 1), datetime.date(2005, 1, 1)] ) def test_delete(self): self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29)) self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17)) Article.objects.create(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id) Article.objects.create( headline="Fourth article", pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id), ) # If you delete a reporter, his articles will be deleted. self.assertQuerysetEqual( Article.objects.all(), [ "<Article: Fourth article>", "<Article: John's second story>", "<Article: Paul's story>", "<Article: Third article>", "<Article: This is a test>", ] ) self.assertQuerysetEqual( Reporter.objects.order_by('first_name'), ["<Reporter: John Smith>", "<Reporter: Paul Jones>"] ) self.r2.delete() self.assertQuerysetEqual( Article.objects.all(), [ "<Article: Fourth article>", "<Article: John's second story>", "<Article: Third article>", "<Article: This is a test>", ] ) self.assertQuerysetEqual(Reporter.objects.order_by('first_name'), ["<Reporter: John Smith>"]) # You can delete using a JOIN in the query. Reporter.objects.filter(article__headline__startswith='This').delete() self.assertQuerysetEqual(Reporter.objects.all(), []) self.assertQuerysetEqual(Article.objects.all(), []) def test_explicit_fk(self): # Create a new Article with get_or_create using an explicit value # for a ForeignKey. a2, created = Article.objects.get_or_create( headline="John's second test", pub_date=datetime.date(2011, 5, 7), reporter_id=self.r.id, ) self.assertTrue(created) self.assertEqual(a2.reporter.id, self.r.id) # You can specify filters containing the explicit FK value. self.assertQuerysetEqual( Article.objects.filter(reporter_id__exact=self.r.id), ["<Article: John's second test>", "<Article: This is a test>"] ) # Create an Article by Paul for the same date. a3 = Article.objects.create( headline="Paul's commentary", pub_date=datetime.date(2011, 5, 7), reporter_id=self.r2.id, ) self.assertEqual(a3.reporter.id, self.r2.id) # Get should respect explicit foreign keys as well. msg = 'get() returned more than one Article -- it returned 2!' with self.assertRaisesMessage(MultipleObjectsReturned, msg): Article.objects.get(reporter_id=self.r.id) self.assertEqual( repr(a3), repr(Article.objects.get(reporter_id=self.r2.id, pub_date=datetime.date(2011, 5, 7))) ) def test_deepcopy_and_circular_references(self): # Regression for #12876 -- Model methods that include queries that # recursive don't cause recursion depth problems under deepcopy. self.r.cached_query = Article.objects.filter(reporter=self.r) self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>") def test_manager_class_caching(self): r1 = Reporter.objects.create(first_name='Mike') r2 = Reporter.objects.create(first_name='John') # Same twice self.assertIs(r1.article_set.__class__, r1.article_set.__class__) # Same as each other self.assertIs(r1.article_set.__class__, r2.article_set.__class__) def test_create_relation_with_gettext_lazy(self): reporter = Reporter.objects.create(first_name='John', last_name='Smith', email='[email protected]') lazy = gettext_lazy('test') reporter.article_set.create(headline=lazy, pub_date=datetime.date(2011, 6, 10)) notlazy = str(lazy) article = reporter.article_set.get() self.assertEqual(article.headline, notlazy) def test_values_list_exception(self): expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s" reporter_fields = ', '.join(sorted(f.name for f in Reporter._meta.get_fields())) with self.assertRaisesMessage(FieldError, expected_message % reporter_fields): Article.objects.values_list('reporter__notafield') article_fields = ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields())) with self.assertRaisesMessage(FieldError, expected_message % article_fields): Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list('notafield') def test_fk_assignment_and_related_object_cache(self): # Tests of ForeignKey assignment and the related-object cache (see #6886). p = Parent.objects.create(name="Parent") c = Child.objects.create(name="Child", parent=p) # Look up the object again so that we get a "fresh" object. c = Child.objects.get(name="Child") p = c.parent # Accessing the related object again returns the exactly same object. self.assertIs(c.parent, p) # But if we kill the cache, we get a new object. del c._state.fields_cache['parent'] self.assertIsNot(c.parent, p) # Assigning a new object results in that object getting cached immediately. p2 = Parent.objects.create(name="Parent 2") c.parent = p2 self.assertIs(c.parent, p2) # Assigning None succeeds if field is null=True. p.bestchild = None self.assertIsNone(p.bestchild) # bestchild should still be None after saving. p.save() self.assertIsNone(p.bestchild) # bestchild should still be None after fetching the object again. p = Parent.objects.get(name="Parent") self.assertIsNone(p.bestchild) # Assigning None will not fail: Child.parent is null=False. setattr(c, "parent", None) # You also can't assign an object of the wrong type here msg = ( 'Cannot assign "<First: First object (1)>": "Child.parent" must ' 'be a "Parent" instance.' ) with self.assertRaisesMessage(ValueError, msg): setattr(c, "parent", First(id=1, second=1)) # You can assign None to Child.parent during object creation. Child(name='xyzzy', parent=None) # But when trying to save a Child with parent=None, the database will # raise IntegrityError. with self.assertRaises(IntegrityError), transaction.atomic(): Child.objects.create(name='xyzzy', parent=None) # Creation using keyword argument should cache the related object. p = Parent.objects.get(name="Parent") c = Child(parent=p) self.assertIs(c.parent, p) # Creation using keyword argument and unsaved related instance (#8070). p = Parent() msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'." with self.assertRaisesMessage(ValueError, msg): Child.objects.create(parent=p) msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'." with self.assertRaisesMessage(ValueError, msg): ToFieldChild.objects.create(parent=p) # Creation using attname keyword argument and an id will cause the # related object to be fetched. p = Parent.objects.get(name="Parent") c = Child(parent_id=p.id) self.assertIsNot(c.parent, p) self.assertEqual(c.parent, p) def test_save_nullable_fk_after_parent(self): parent = Parent() child = ChildNullableParent(parent=parent) parent.save() child.save() child.refresh_from_db() self.assertEqual(child.parent, parent) def test_save_nullable_fk_after_parent_with_to_field(self): parent = Parent(name='jeff') child = ToFieldChild(parent=parent) parent.save() child.save() child.refresh_from_db() self.assertEqual(child.parent, parent) self.assertEqual(child.parent_id, parent.name) def test_fk_to_bigautofield(self): ch = City.objects.create(name='Chicago') District.objects.create(city=ch, name='Far South') District.objects.create(city=ch, name='North') ny = City.objects.create(name='New York', id=2 ** 33) District.objects.create(city=ny, name='Brooklyn') District.objects.create(city=ny, name='Manhattan') def test_fk_to_smallautofield(self): us = Country.objects.create(name='United States') City.objects.create(country=us, name='Chicago') City.objects.create(country=us, name='New York') uk = Country.objects.create(name='United Kingdom', id=2 ** 11) City.objects.create(country=uk, name='London') City.objects.create(country=uk, name='Edinburgh') def test_multiple_foreignkeys(self): # Test of multiple ForeignKeys to the same model (bug #7125). c1 = Category.objects.create(name='First') c2 = Category.objects.create(name='Second') c3 = Category.objects.create(name='Third') r1 = Record.objects.create(category=c1) r2 = Record.objects.create(category=c1) r3 = Record.objects.create(category=c2) r4 = Record.objects.create(category=c2) r5 = Record.objects.create(category=c3) Relation.objects.create(left=r1, right=r2) Relation.objects.create(left=r3, right=r4) Relation.objects.create(left=r1, right=r3) Relation.objects.create(left=r5, right=r2) Relation.objects.create(left=r3, right=r2) q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second']) self.assertQuerysetEqual(q1, ["<Relation: First - Second>"]) q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name') self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"]) p = Parent.objects.create(name="Parent") c = Child.objects.create(name="Child", parent=p) msg = 'Cannot assign "%r": "Child.parent" must be a "Parent" instance.' % c with self.assertRaisesMessage(ValueError, msg): Child.objects.create(name="Grandchild", parent=c) def test_fk_instantiation_outside_model(self): # Regression for #12190 -- Should be able to instantiate a FK outside # of a model, and interrogate its related field. cat = models.ForeignKey(Category, models.CASCADE) self.assertEqual('id', cat.remote_field.get_related_field().name) def test_relation_unsaved(self): # The <field>_set manager does not join on Null value fields (#17541) Third.objects.create(name='Third 1') Third.objects.create(name='Third 2') th = Third(name="testing") # The object isn't saved and thus the relation field is null - we won't even # execute a query in this case. with self.assertNumQueries(0): self.assertEqual(th.child_set.count(), 0) th.save() # Now the model is saved, so we will need to execute a query. with self.assertNumQueries(1): self.assertEqual(th.child_set.count(), 0) def test_related_object(self): public_school = School.objects.create(is_public=True) public_student = Student.objects.create(school=public_school) private_school = School.objects.create(is_public=False) private_student = Student.objects.create(school=private_school) # Only one school is available via all() due to the custom default manager. self.assertSequenceEqual(School.objects.all(), [public_school]) self.assertEqual(public_student.school, public_school) # Make sure the base manager is used so that a student can still access # its related school even if the default manager doesn't normally # allow it. self.assertEqual(private_student.school, private_school) School._meta.base_manager_name = 'objects' School._meta._expire_cache() try: private_student = Student.objects.get(pk=private_student.pk) with self.assertRaises(School.DoesNotExist): private_student.school finally: School._meta.base_manager_name = None School._meta._expire_cache() def test_hasattr_related_object(self): # The exception raised on attribute access when a related object # doesn't exist should be an instance of a subclass of `AttributeError` # refs #21563 self.assertFalse(hasattr(Article(), 'reporter')) def test_clear_after_prefetch(self): c = City.objects.create(name='Musical City') District.objects.create(name='Ladida', city=c) city = City.objects.prefetch_related('districts').get(id=c.id) self.assertQuerysetEqual(city.districts.all(), ['<District: Ladida>']) city.districts.clear() self.assertQuerysetEqual(city.districts.all(), []) def test_remove_after_prefetch(self): c = City.objects.create(name='Musical City') d = District.objects.create(name='Ladida', city=c) city = City.objects.prefetch_related('districts').get(id=c.id) self.assertQuerysetEqual(city.districts.all(), ['<District: Ladida>']) city.districts.remove(d) self.assertQuerysetEqual(city.districts.all(), []) def test_add_after_prefetch(self): c = City.objects.create(name='Musical City') District.objects.create(name='Ladida', city=c) d2 = District.objects.create(name='Ladidu') city = City.objects.prefetch_related('districts').get(id=c.id) self.assertEqual(city.districts.count(), 1) city.districts.add(d2) self.assertEqual(city.districts.count(), 2) def test_set_after_prefetch(self): c = City.objects.create(name='Musical City') District.objects.create(name='Ladida', city=c) d2 = District.objects.create(name='Ladidu') city = City.objects.prefetch_related('districts').get(id=c.id) self.assertEqual(city.districts.count(), 1) city.districts.set([d2]) self.assertQuerysetEqual(city.districts.all(), ['<District: Ladidu>']) def test_add_then_remove_after_prefetch(self): c = City.objects.create(name='Musical City') District.objects.create(name='Ladida', city=c) d2 = District.objects.create(name='Ladidu') city = City.objects.prefetch_related('districts').get(id=c.id) self.assertEqual(city.districts.count(), 1) city.districts.add(d2) self.assertEqual(city.districts.count(), 2) city.districts.remove(d2) self.assertEqual(city.districts.count(), 1) def test_cached_relation_invalidated_on_save(self): """ Model.save() invalidates stale ForeignKey relations after a primary key assignment. """ self.assertEqual(self.a.reporter, self.r) # caches a.reporter self.a.reporter_id = self.r2.pk self.a.save() self.assertEqual(self.a.reporter, self.r2) def test_cached_foreign_key_with_to_field_not_cleared_by_save(self): parent = Parent.objects.create(name='a') child = ToFieldChild.objects.create(parent=parent) with self.assertNumQueries(0): self.assertIs(child.parent, parent) def test_reverse_foreign_key_instance_to_field_caching(self): parent = Parent.objects.create(name='a') ToFieldChild.objects.create(parent=parent) child = parent.to_field_children.get() with self.assertNumQueries(0): self.assertIs(child.parent, parent)
2289fd3c62c370cf14fd46f8f1d35a1c837561ea7c958511171a845bfbfb34c6
"""Tests related to django.db.backends that haven't been organized.""" import datetime import threading import unittest import warnings from django.core.management.color import no_style from django.db import ( DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections, reset_queries, transaction, ) from django.db.backends.base.base import BaseDatabaseWrapper from django.db.backends.signals import connection_created from django.db.backends.utils import CursorWrapper from django.db.models.sql.constants import CURSOR from django.test import ( TestCase, TransactionTestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature, ) from .models import ( Article, Object, ObjectReference, Person, Post, RawData, Reporter, ReporterProxy, SchoolClass, Square, VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ, ) class DateQuotingTest(TestCase): def test_django_date_trunc(self): """ Test the custom ``django_date_trunc method``, in particular against fields which clash with strings passed to it (e.g. 'year') (#12818). """ updated = datetime.datetime(2010, 2, 20) SchoolClass.objects.create(year=2009, last_updated=updated) years = SchoolClass.objects.dates('last_updated', 'year') self.assertEqual(list(years), [datetime.date(2010, 1, 1)]) def test_django_date_extract(self): """ Test the custom ``django_date_extract method``, in particular against fields which clash with strings passed to it (e.g. 'day') (#12818). """ updated = datetime.datetime(2010, 2, 20) SchoolClass.objects.create(year=2009, last_updated=updated) classes = SchoolClass.objects.filter(last_updated__day=20) self.assertEqual(len(classes), 1) @override_settings(DEBUG=True) class LastExecutedQueryTest(TestCase): def test_last_executed_query_without_previous_query(self): """ last_executed_query should not raise an exception even if no previous query has been run. """ with connection.cursor() as cursor: connection.ops.last_executed_query(cursor, '', ()) def test_debug_sql(self): list(Reporter.objects.filter(first_name="test")) sql = connection.queries[-1]['sql'].lower() self.assertIn("select", sql) self.assertIn(Reporter._meta.db_table, sql) def test_query_encoding(self): """last_executed_query() returns a string.""" data = RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1}) sql, params = data.query.sql_with_params() cursor = data.query.get_compiler('default').execute_sql(CURSOR) last_sql = cursor.db.ops.last_executed_query(cursor, sql, params) self.assertIsInstance(last_sql, str) def test_last_executed_query(self): # last_executed_query() interpolate all parameters, in most cases it is # not equal to QuerySet.query. for qs in ( Article.objects.filter(pk=1), Article.objects.filter(pk__in=(1, 2), reporter__pk=3), ): sql, params = qs.query.sql_with_params() cursor = qs.query.get_compiler(DEFAULT_DB_ALIAS).execute_sql(CURSOR) self.assertEqual( cursor.db.ops.last_executed_query(cursor, sql, params), str(qs.query), ) @skipUnlessDBFeature('supports_paramstyle_pyformat') def test_last_executed_query_dict(self): square_opts = Square._meta sql = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % ( connection.introspection.identifier_converter(square_opts.db_table), connection.ops.quote_name(square_opts.get_field('root').column), connection.ops.quote_name(square_opts.get_field('square').column), ) with connection.cursor() as cursor: params = {'root': 2, 'square': 4} cursor.execute(sql, params) self.assertEqual( cursor.db.ops.last_executed_query(cursor, sql, params), sql % params, ) class ParameterHandlingTest(TestCase): def test_bad_parameter_count(self): "An executemany call with too many/not enough parameters will raise an exception (Refs #12612)" with connection.cursor() as cursor: query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % ( connection.introspection.identifier_converter('backends_square'), connection.ops.quote_name('root'), connection.ops.quote_name('square') )) with self.assertRaises(Exception): cursor.executemany(query, [(1, 2, 3)]) with self.assertRaises(Exception): cursor.executemany(query, [(1,)]) class LongNameTest(TransactionTestCase): """Long primary keys and model names can result in a sequence name that exceeds the database limits, which will result in truncation on certain databases (e.g., Postgres). The backend needs to use the correct sequence name in last_insert_id and other places, so check it is. Refs #8901. """ available_apps = ['backends'] def test_sequence_name_length_limits_create(self): """Test creation of model with long name and long pk name doesn't error. Ref #8901""" VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create() def test_sequence_name_length_limits_m2m(self): """ An m2m save of a model with a long name and a long m2m field name doesn't error (#8901). """ obj = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create() rel_obj = Person.objects.create(first_name='Django', last_name='Reinhardt') obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj) def test_sequence_name_length_limits_flush(self): """ Sequence resetting as part of a flush with model with long name and long pk name doesn't error (#8901). """ # A full flush is expensive to the full test, so we dig into the # internals to generate the likely offending SQL and run it manually # Some convenience aliases VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through tables = [ VLM._meta.db_table, VLM_m2m._meta.db_table, ] sequences = [ { 'column': VLM._meta.pk.column, 'table': VLM._meta.db_table }, ] sql_list = connection.ops.sql_flush(no_style(), tables, sequences) with connection.cursor() as cursor: for statement in sql_list: cursor.execute(statement) class SequenceResetTest(TestCase): def test_generic_relation(self): "Sequence names are correct when resetting generic relations (Ref #13941)" # Create an object with a manually specified PK Post.objects.create(id=10, name='1st post', text='hello world') # Reset the sequences for the database commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [Post]) with connection.cursor() as cursor: for sql in commands: cursor.execute(sql) # If we create a new object now, it should have a PK greater # than the PK we specified manually. obj = Post.objects.create(name='New post', text='goodbye world') self.assertGreater(obj.pk, 10) # This test needs to run outside of a transaction, otherwise closing the # connection would implicitly rollback and cause problems during teardown. class ConnectionCreatedSignalTest(TransactionTestCase): available_apps = [] # Unfortunately with sqlite3 the in-memory test database cannot be closed, # and so it cannot be re-opened during testing. @skipUnlessDBFeature('test_db_allows_multiple_connections') def test_signal(self): data = {} def receiver(sender, connection, **kwargs): data["connection"] = connection connection_created.connect(receiver) connection.close() connection.cursor() self.assertIs(data["connection"].connection, connection.connection) connection_created.disconnect(receiver) data.clear() connection.cursor() self.assertEqual(data, {}) class EscapingChecks(TestCase): """ All tests in this test case are also run with settings.DEBUG=True in EscapingChecksDebug test case, to also test CursorDebugWrapper. """ bare_select_suffix = connection.features.bare_select_suffix def test_paramless_no_escaping(self): with connection.cursor() as cursor: cursor.execute("SELECT '%s'" + self.bare_select_suffix) self.assertEqual(cursor.fetchall()[0][0], '%s') def test_parameter_escaping(self): with connection.cursor() as cursor: cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',)) self.assertEqual(cursor.fetchall()[0], ('%', '%d')) @override_settings(DEBUG=True) class EscapingChecksDebug(EscapingChecks): pass class BackendTestCase(TransactionTestCase): available_apps = ['backends'] def create_squares_with_executemany(self, args): self.create_squares(args, 'format', True) def create_squares(self, args, paramstyle, multiple): opts = Square._meta tbl = connection.introspection.identifier_converter(opts.db_table) f1 = connection.ops.quote_name(opts.get_field('root').column) f2 = connection.ops.quote_name(opts.get_field('square').column) if paramstyle == 'format': query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2) elif paramstyle == 'pyformat': query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2) else: raise ValueError("unsupported paramstyle in test") with connection.cursor() as cursor: if multiple: cursor.executemany(query, args) else: cursor.execute(query, args) def test_cursor_executemany(self): # Test cursor.executemany #4896 args = [(i, i ** 2) for i in range(-5, 6)] self.create_squares_with_executemany(args) self.assertEqual(Square.objects.count(), 11) for i in range(-5, 6): square = Square.objects.get(root=i) self.assertEqual(square.square, i ** 2) def test_cursor_executemany_with_empty_params_list(self): # Test executemany with params=[] does nothing #4765 args = [] self.create_squares_with_executemany(args) self.assertEqual(Square.objects.count(), 0) def test_cursor_executemany_with_iterator(self): # Test executemany accepts iterators #10320 args = ((i, i ** 2) for i in range(-3, 2)) self.create_squares_with_executemany(args) self.assertEqual(Square.objects.count(), 5) args = ((i, i ** 2) for i in range(3, 7)) with override_settings(DEBUG=True): # same test for DebugCursorWrapper self.create_squares_with_executemany(args) self.assertEqual(Square.objects.count(), 9) @skipUnlessDBFeature('supports_paramstyle_pyformat') def test_cursor_execute_with_pyformat(self): # Support pyformat style passing of parameters #10070 args = {'root': 3, 'square': 9} self.create_squares(args, 'pyformat', multiple=False) self.assertEqual(Square.objects.count(), 1) @skipUnlessDBFeature('supports_paramstyle_pyformat') def test_cursor_executemany_with_pyformat(self): # Support pyformat style passing of parameters #10070 args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)] self.create_squares(args, 'pyformat', multiple=True) self.assertEqual(Square.objects.count(), 11) for i in range(-5, 6): square = Square.objects.get(root=i) self.assertEqual(square.square, i ** 2) @skipUnlessDBFeature('supports_paramstyle_pyformat') def test_cursor_executemany_with_pyformat_iterator(self): args = ({'root': i, 'square': i ** 2} for i in range(-3, 2)) self.create_squares(args, 'pyformat', multiple=True) self.assertEqual(Square.objects.count(), 5) args = ({'root': i, 'square': i ** 2} for i in range(3, 7)) with override_settings(DEBUG=True): # same test for DebugCursorWrapper self.create_squares(args, 'pyformat', multiple=True) self.assertEqual(Square.objects.count(), 9) def test_unicode_fetches(self): # fetchone, fetchmany, fetchall return strings as unicode objects #6254 qn = connection.ops.quote_name Person(first_name="John", last_name="Doe").save() Person(first_name="Jane", last_name="Doe").save() Person(first_name="Mary", last_name="Agnelline").save() Person(first_name="Peter", last_name="Parker").save() Person(first_name="Clark", last_name="Kent").save() opts2 = Person._meta f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name') with connection.cursor() as cursor: cursor.execute( 'SELECT %s, %s FROM %s ORDER BY %s' % ( qn(f3.column), qn(f4.column), connection.introspection.identifier_converter(opts2.db_table), qn(f3.column), ) ) self.assertEqual(cursor.fetchone(), ('Clark', 'Kent')) self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')]) self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')]) def test_unicode_password(self): old_password = connection.settings_dict['PASSWORD'] connection.settings_dict['PASSWORD'] = "françois" try: connection.cursor() except DatabaseError: # As password is probably wrong, a database exception is expected pass except Exception as e: self.fail("Unexpected error raised with unicode password: %s" % e) finally: connection.settings_dict['PASSWORD'] = old_password def test_database_operations_helper_class(self): # Ticket #13630 self.assertTrue(hasattr(connection, 'ops')) self.assertTrue(hasattr(connection.ops, 'connection')) self.assertEqual(connection, connection.ops.connection) def test_database_operations_init(self): """ DatabaseOperations initialization doesn't query the database. See #17656. """ with self.assertNumQueries(0): connection.ops.__class__(connection) def test_cached_db_features(self): self.assertIn(connection.features.supports_transactions, (True, False)) self.assertIn(connection.features.can_introspect_foreign_keys, (True, False)) def test_duplicate_table_error(self): """ Creating an existing table returns a DatabaseError """ query = 'CREATE TABLE %s (id INTEGER);' % Article._meta.db_table with connection.cursor() as cursor: with self.assertRaises(DatabaseError): cursor.execute(query) def test_cursor_contextmanager(self): """ Cursors can be used as a context manager """ with connection.cursor() as cursor: self.assertIsInstance(cursor, CursorWrapper) # Both InterfaceError and ProgrammingError seem to be used when # accessing closed cursor (psycopg2 has InterfaceError, rest seem # to use ProgrammingError). with self.assertRaises(connection.features.closed_cursor_error_class): # cursor should be closed, so no queries should be possible. cursor.execute("SELECT 1" + connection.features.bare_select_suffix) @unittest.skipUnless(connection.vendor == 'postgresql', "Psycopg2 specific cursor.closed attribute needed") def test_cursor_contextmanager_closing(self): # There isn't a generic way to test that cursors are closed, but # psycopg2 offers us a way to check that by closed attribute. # So, run only on psycopg2 for that reason. with connection.cursor() as cursor: self.assertIsInstance(cursor, CursorWrapper) self.assertTrue(cursor.closed) # Unfortunately with sqlite3 the in-memory test database cannot be closed. @skipUnlessDBFeature('test_db_allows_multiple_connections') def test_is_usable_after_database_disconnects(self): """ is_usable() doesn't crash when the database disconnects (#21553). """ # Open a connection to the database. with connection.cursor(): pass # Emulate a connection close by the database. connection._close() # Even then is_usable() should not raise an exception. try: self.assertFalse(connection.is_usable()) finally: # Clean up the mess created by connection._close(). Since the # connection is already closed, this crashes on some backends. try: connection.close() except Exception: pass @override_settings(DEBUG=True) def test_queries(self): """ Test the documented API of connection.queries. """ sql = 'SELECT 1' + connection.features.bare_select_suffix with connection.cursor() as cursor: reset_queries() cursor.execute(sql) self.assertEqual(1, len(connection.queries)) self.assertIsInstance(connection.queries, list) self.assertIsInstance(connection.queries[0], dict) self.assertEqual(list(connection.queries[0]), ['sql', 'time']) self.assertEqual(connection.queries[0]['sql'], sql) reset_queries() self.assertEqual(0, len(connection.queries)) sql = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % ( connection.introspection.identifier_converter('backends_square'), connection.ops.quote_name('root'), connection.ops.quote_name('square'), )) with connection.cursor() as cursor: cursor.executemany(sql, [(1, 1), (2, 4)]) self.assertEqual(1, len(connection.queries)) self.assertIsInstance(connection.queries, list) self.assertIsInstance(connection.queries[0], dict) self.assertEqual(list(connection.queries[0]), ['sql', 'time']) self.assertEqual(connection.queries[0]['sql'], '2 times: %s' % sql) # Unfortunately with sqlite3 the in-memory test database cannot be closed. @skipUnlessDBFeature('test_db_allows_multiple_connections') @override_settings(DEBUG=True) def test_queries_limit(self): """ The backend doesn't store an unlimited number of queries (#12581). """ old_queries_limit = BaseDatabaseWrapper.queries_limit BaseDatabaseWrapper.queries_limit = 3 new_connection = connection.copy() # Initialize the connection and clear initialization statements. with new_connection.cursor(): pass new_connection.queries_log.clear() try: with new_connection.cursor() as cursor: cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix) cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix) with warnings.catch_warnings(record=True) as w: self.assertEqual(2, len(new_connection.queries)) self.assertEqual(0, len(w)) with new_connection.cursor() as cursor: cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix) cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix) msg = "Limit for query logging exceeded, only the last 3 queries will be returned." with self.assertWarnsMessage(UserWarning, msg): self.assertEqual(3, len(new_connection.queries)) finally: BaseDatabaseWrapper.queries_limit = old_queries_limit new_connection.close() def test_timezone_none_use_tz_false(self): connection.ensure_connection() with self.settings(TIME_ZONE=None, USE_TZ=False): connection.init_connection_state() # These tests aren't conditional because it would require differentiating # between MySQL+InnoDB and MySQL+MYISAM (something we currently can't do). class FkConstraintsTests(TransactionTestCase): available_apps = ['backends'] def setUp(self): # Create a Reporter. self.r = Reporter.objects.create(first_name='John', last_name='Smith') def test_integrity_checks_on_creation(self): """ Try to create a model instance that violates a FK constraint. If it fails it should fail with IntegrityError. """ a1 = Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30) try: a1.save() except IntegrityError: pass else: self.skipTest("This backend does not support integrity checks.") # Now that we know this backend supports integrity checks we make sure # constraints are also enforced for proxy Refs #17519 a2 = Article( headline='This is another test', reporter=self.r, pub_date=datetime.datetime(2012, 8, 3), reporter_proxy_id=30, ) with self.assertRaises(IntegrityError): a2.save() def test_integrity_checks_on_update(self): """ Try to update a model instance introducing a FK constraint violation. If it fails it should fail with IntegrityError. """ # Create an Article. Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r) # Retrieve it from the DB a1 = Article.objects.get(headline="Test article") a1.reporter_id = 30 try: a1.save() except IntegrityError: pass else: self.skipTest("This backend does not support integrity checks.") # Now that we know this backend supports integrity checks we make sure # constraints are also enforced for proxy Refs #17519 # Create another article r_proxy = ReporterProxy.objects.get(pk=self.r.pk) Article.objects.create( headline='Another article', pub_date=datetime.datetime(1988, 5, 15), reporter=self.r, reporter_proxy=r_proxy, ) # Retrieve the second article from the DB a2 = Article.objects.get(headline='Another article') a2.reporter_proxy_id = 30 with self.assertRaises(IntegrityError): a2.save() def test_disable_constraint_checks_manually(self): """ When constraint checks are disabled, should be able to write bad data without IntegrityErrors. """ with transaction.atomic(): # Create an Article. Article.objects.create( headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r, ) # Retrieve it from the DB a = Article.objects.get(headline="Test article") a.reporter_id = 30 try: connection.disable_constraint_checking() a.save() connection.enable_constraint_checking() except IntegrityError: self.fail("IntegrityError should not have occurred.") transaction.set_rollback(True) def test_disable_constraint_checks_context_manager(self): """ When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors. """ with transaction.atomic(): # Create an Article. Article.objects.create( headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r, ) # Retrieve it from the DB a = Article.objects.get(headline="Test article") a.reporter_id = 30 try: with connection.constraint_checks_disabled(): a.save() except IntegrityError: self.fail("IntegrityError should not have occurred.") transaction.set_rollback(True) def test_check_constraints(self): """ Constraint checks should raise an IntegrityError when bad data is in the DB. """ with transaction.atomic(): # Create an Article. Article.objects.create( headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r, ) # Retrieve it from the DB a = Article.objects.get(headline="Test article") a.reporter_id = 30 with connection.constraint_checks_disabled(): a.save() with self.assertRaises(IntegrityError): connection.check_constraints() transaction.set_rollback(True) class ThreadTests(TransactionTestCase): available_apps = ['backends'] def test_default_connection_thread_local(self): """ The default connection (i.e. django.db.connection) is different for each thread (#17258). """ # Map connections by id because connections with identical aliases # have the same hash. connections_dict = {} connection.cursor() connections_dict[id(connection)] = connection def runner(): # Passing django.db.connection between threads doesn't work while # connections[DEFAULT_DB_ALIAS] does. from django.db import connections connection = connections[DEFAULT_DB_ALIAS] # Allow thread sharing so the connection can be closed by the # main thread. connection.inc_thread_sharing() connection.cursor() connections_dict[id(connection)] = connection try: for x in range(2): t = threading.Thread(target=runner) t.start() t.join() # Each created connection got different inner connection. self.assertEqual(len({conn.connection for conn in connections_dict.values()}), 3) finally: # Finish by closing the connections opened by the other threads # (the connection opened in the main thread will automatically be # closed on teardown). for conn in connections_dict.values(): if conn is not connection: if conn.allow_thread_sharing: conn.close() conn.dec_thread_sharing() def test_connections_thread_local(self): """ The connections are different for each thread (#17258). """ # Map connections by id because connections with identical aliases # have the same hash. connections_dict = {} for conn in connections.all(): connections_dict[id(conn)] = conn def runner(): from django.db import connections for conn in connections.all(): # Allow thread sharing so the connection can be closed by the # main thread. conn.inc_thread_sharing() connections_dict[id(conn)] = conn try: num_new_threads = 2 for x in range(num_new_threads): t = threading.Thread(target=runner) t.start() t.join() self.assertEqual( len(connections_dict), len(connections.all()) * (num_new_threads + 1), ) finally: # Finish by closing the connections opened by the other threads # (the connection opened in the main thread will automatically be # closed on teardown). for conn in connections_dict.values(): if conn is not connection: if conn.allow_thread_sharing: conn.close() conn.dec_thread_sharing() def test_pass_connection_between_threads(self): """ A connection can be passed from one thread to the other (#17258). """ Person.objects.create(first_name="John", last_name="Doe") def do_thread(): def runner(main_thread_connection): from django.db import connections connections['default'] = main_thread_connection try: Person.objects.get(first_name="John", last_name="Doe") except Exception as e: exceptions.append(e) t = threading.Thread(target=runner, args=[connections['default']]) t.start() t.join() # Without touching thread sharing, which should be False by default. exceptions = [] do_thread() # Forbidden! self.assertIsInstance(exceptions[0], DatabaseError) # After calling inc_thread_sharing() on the connection. connections['default'].inc_thread_sharing() try: exceptions = [] do_thread() # All good self.assertEqual(exceptions, []) finally: connections['default'].dec_thread_sharing() def test_closing_non_shared_connections(self): """ A connection that is not explicitly shareable cannot be closed by another thread (#17258). """ # First, without explicitly enabling the connection for sharing. exceptions = set() def runner1(): def runner2(other_thread_connection): try: other_thread_connection.close() except DatabaseError as e: exceptions.add(e) t2 = threading.Thread(target=runner2, args=[connections['default']]) t2.start() t2.join() t1 = threading.Thread(target=runner1) t1.start() t1.join() # The exception was raised self.assertEqual(len(exceptions), 1) # Then, with explicitly enabling the connection for sharing. exceptions = set() def runner1(): def runner2(other_thread_connection): try: other_thread_connection.close() except DatabaseError as e: exceptions.add(e) # Enable thread sharing connections['default'].inc_thread_sharing() try: t2 = threading.Thread(target=runner2, args=[connections['default']]) t2.start() t2.join() finally: connections['default'].dec_thread_sharing() t1 = threading.Thread(target=runner1) t1.start() t1.join() # No exception was raised self.assertEqual(len(exceptions), 0) def test_thread_sharing_count(self): self.assertIs(connection.allow_thread_sharing, False) connection.inc_thread_sharing() self.assertIs(connection.allow_thread_sharing, True) connection.inc_thread_sharing() self.assertIs(connection.allow_thread_sharing, True) connection.dec_thread_sharing() self.assertIs(connection.allow_thread_sharing, True) connection.dec_thread_sharing() self.assertIs(connection.allow_thread_sharing, False) msg = 'Cannot decrement the thread sharing count below zero.' with self.assertRaisesMessage(RuntimeError, msg): connection.dec_thread_sharing() class MySQLPKZeroTests(TestCase): """ Zero as id for AutoField should raise exception in MySQL, because MySQL does not allow zero for autoincrement primary key. """ @skipIfDBFeature('allows_auto_pk_0') def test_zero_as_autoval(self): with self.assertRaises(ValueError): Square.objects.create(id=0, root=0, square=1) class DBConstraintTestCase(TestCase): def test_can_reference_existent(self): obj = Object.objects.create() ref = ObjectReference.objects.create(obj=obj) self.assertEqual(ref.obj, obj) ref = ObjectReference.objects.get(obj=obj) self.assertEqual(ref.obj, obj) def test_can_reference_non_existent(self): self.assertFalse(Object.objects.filter(id=12345).exists()) ref = ObjectReference.objects.create(obj_id=12345) ref_new = ObjectReference.objects.get(obj_id=12345) self.assertEqual(ref, ref_new) with self.assertRaises(Object.DoesNotExist): ref.obj def test_many_to_many(self): obj = Object.objects.create() obj.related_objects.create() self.assertEqual(Object.objects.count(), 2) self.assertEqual(obj.related_objects.count(), 1) intermediary_model = Object._meta.get_field("related_objects").remote_field.through intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345) self.assertEqual(obj.related_objects.count(), 1) self.assertEqual(intermediary_model.objects.count(), 2)
d208dbec47164f22ed7994654637f5a0b2c8d6997d141f244b96b443a8dd42d8
import datetime import itertools import unittest from copy import copy from unittest import mock from django.core.management.color import no_style from django.db import ( DatabaseError, DataError, IntegrityError, OperationalError, connection, ) from django.db.models import ( CASCADE, PROTECT, AutoField, BigAutoField, BigIntegerField, BinaryField, BooleanField, CharField, CheckConstraint, DateField, DateTimeField, ForeignKey, ForeignObject, Index, IntegerField, ManyToManyField, Model, OneToOneField, PositiveIntegerField, Q, SlugField, SmallAutoField, SmallIntegerField, TextField, TimeField, UniqueConstraint, UUIDField, ) from django.db.transaction import TransactionManagementError, atomic from django.test import ( TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import CaptureQueriesContext, isolate_apps from django.utils import timezone from .fields import ( CustomManyToManyField, InheritedManyToManyField, MediumBlobField, ) from .models import ( Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, AuthorWithIndexedName, AuthorWithIndexedNameAndBirthday, AuthorWithUniqueName, AuthorWithUniqueNameAndBirthday, Book, BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK, Node, Note, NoteRename, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps, ) class SchemaTests(TransactionTestCase): """ Tests for the schema-alteration code. Be aware that these tests are more liable than most to false results, as sometimes the code to check if a test has worked is almost as complex as the code it is testing. """ available_apps = [] models = [ Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Node, Note, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, ] # Utility functions def setUp(self): # local_models should contain test dependent model classes that will be # automatically removed from the app cache on test tear down. self.local_models = [] # isolated_local_models contains models that are in test methods # decorated with @isolate_apps. self.isolated_local_models = [] def tearDown(self): # Delete any tables made for our models self.delete_tables() new_apps.clear_cache() for model in new_apps.get_models(): model._meta._expire_cache() if 'schema' in new_apps.all_models: for model in self.local_models: for many_to_many in model._meta.many_to_many: through = many_to_many.remote_field.through if through and through._meta.auto_created: del new_apps.all_models['schema'][through._meta.model_name] del new_apps.all_models['schema'][model._meta.model_name] if self.isolated_local_models: with connection.schema_editor() as editor: for model in self.isolated_local_models: editor.delete_model(model) def delete_tables(self): "Deletes all model tables for our models for a clean test environment" converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: connection.disable_constraint_checking() table_names = connection.introspection.table_names() for model in itertools.chain(SchemaTests.models, self.local_models): tbl = converter(model._meta.db_table) if tbl in table_names: editor.delete_model(model) table_names.remove(tbl) connection.enable_constraint_checking() def column_classes(self, model): with connection.cursor() as cursor: columns = { d[0]: (connection.introspection.get_field_type(d[1], d), d) for d in connection.introspection.get_table_description( cursor, model._meta.db_table, ) } # SQLite has a different format for field_type for name, (type, desc) in columns.items(): if isinstance(type, tuple): columns[name] = (type[0], desc) # SQLite also doesn't error properly if not columns: raise DatabaseError("Table does not exist (empty pragma)") return columns def get_primary_key(self, table): with connection.cursor() as cursor: return connection.introspection.get_primary_key_column(cursor, table) def get_indexes(self, table): """ Get the indexes on the table using a new cursor. """ with connection.cursor() as cursor: return [ c['columns'][0] for c in connection.introspection.get_constraints(cursor, table).values() if c['index'] and len(c['columns']) == 1 ] def get_uniques(self, table): with connection.cursor() as cursor: return [ c['columns'][0] for c in connection.introspection.get_constraints(cursor, table).values() if c['unique'] and len(c['columns']) == 1 ] def get_constraints(self, table): """ Get the constraints on a table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def get_constraints_for_column(self, model, column_name): constraints = self.get_constraints(model._meta.db_table) constraints_for_column = [] for name, details in constraints.items(): if details['columns'] == [column_name]: constraints_for_column.append(name) return sorted(constraints_for_column) def check_added_field_default(self, schema_editor, model, field, field_name, expected_default, cast_function=None): with connection.cursor() as cursor: schema_editor.add_field(model, field) cursor.execute("SELECT {} FROM {};".format(field_name, model._meta.db_table)) database_default = cursor.fetchall()[0][0] if cast_function and not type(database_default) == type(expected_default): database_default = cast_function(database_default) self.assertEqual(database_default, expected_default) def get_constraints_count(self, table, column, fk_to): """ Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the number of foreign keys, unique constraints, and indexes on `table`.`column`. The `fk_to` argument is a 2-tuple specifying the expected foreign key relationship's (table, column). """ with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, table) counts = {'fks': 0, 'uniques': 0, 'indexes': 0} for c in constraints.values(): if c['columns'] == [column]: if c['foreign_key'] == fk_to: counts['fks'] += 1 if c['unique']: counts['uniques'] += 1 elif c['index']: counts['indexes'] += 1 return counts def assertIndexOrder(self, table, index, order): constraints = self.get_constraints(table) self.assertIn(index, constraints) index_orders = constraints[index]['orders'] self.assertTrue(all(val == expected for val, expected in zip(index_orders, order))) def assertForeignKeyExists(self, model, column, expected_fk_table, field='id'): """ Fail if the FK constraint on `model.Meta.db_table`.`column` to `expected_fk_table`.id doesn't exist. """ constraints = self.get_constraints(model._meta.db_table) constraint_fk = None for details in constraints.values(): if details['columns'] == [column] and details['foreign_key']: constraint_fk = details['foreign_key'] break self.assertEqual(constraint_fk, (expected_fk_table, field)) def assertForeignKeyNotExists(self, model, column, expected_fk_table): with self.assertRaises(AssertionError): self.assertForeignKeyExists(model, column, expected_fk_table) # Tests def test_creation_deletion(self): """ Tries creating a model's table, and then deleting it. """ with connection.schema_editor() as editor: # Create the table editor.create_model(Author) # The table is there list(Author.objects.all()) # Clean up that table editor.delete_model(Author) # No deferred SQL should be left over. self.assertEqual(editor.deferred_sql, []) # The table is gone with self.assertRaises(DatabaseError): list(Author.objects.all()) @skipUnlessDBFeature('supports_foreign_keys') def test_fk(self): "Creating tables out of FK order, then repointing, works" # Create the table with connection.schema_editor() as editor: editor.create_model(Book) editor.create_model(Author) editor.create_model(Tag) # Initial tables are there list(Author.objects.all()) list(Book.objects.all()) # Make sure the FK constraint is present with self.assertRaises(IntegrityError): Book.objects.create( author_id=1, title="Much Ado About Foreign Keys", pub_date=datetime.datetime.now(), ) # Repoint the FK constraint old_field = Book._meta.get_field("author") new_field = ForeignKey(Tag, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) self.assertForeignKeyExists(Book, 'author_id', 'schema_tag') @skipUnlessDBFeature('can_create_inline_fk') def test_inline_fk(self): # Create some tables. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) editor.create_model(Note) self.assertForeignKeyNotExists(Note, 'book_id', 'schema_book') # Add a foreign key from one to the other. with connection.schema_editor() as editor: new_field = ForeignKey(Book, CASCADE) new_field.set_attributes_from_name('book') editor.add_field(Note, new_field) self.assertForeignKeyExists(Note, 'book_id', 'schema_book') # Creating a FK field with a constraint uses a single statement without # a deferred ALTER TABLE. self.assertFalse([ sql for sql in (str(statement) for statement in editor.deferred_sql) if sql.startswith('ALTER TABLE') and 'ADD CONSTRAINT' in sql ]) @skipUnlessDBFeature('can_create_inline_fk') def test_add_inline_fk_update_data(self): with connection.schema_editor() as editor: editor.create_model(Node) # Add an inline foreign key and update data in the same transaction. new_field = ForeignKey(Node, CASCADE, related_name='new_fk', null=True) new_field.set_attributes_from_name('new_parent_fk') parent = Node.objects.create() with connection.schema_editor() as editor: editor.add_field(Node, new_field) editor.execute('UPDATE schema_node SET new_parent_fk_id = %s;', [parent.pk]) self.assertIn('new_parent_fk_id', self.get_indexes(Node._meta.db_table)) @skipUnlessDBFeature( 'can_create_inline_fk', 'allows_multiple_constraints_on_same_fields', ) @isolate_apps('schema') def test_add_inline_fk_index_update_data(self): class Node(Model): class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Node) # Add an inline foreign key, update data, and an index in the same # transaction. new_field = ForeignKey(Node, CASCADE, related_name='new_fk', null=True) new_field.set_attributes_from_name('new_parent_fk') parent = Node.objects.create() with connection.schema_editor() as editor: editor.add_field(Node, new_field) Node._meta.add_field(new_field) editor.execute('UPDATE schema_node SET new_parent_fk_id = %s;', [parent.pk]) editor.add_index(Node, Index(fields=['new_parent_fk'], name='new_parent_inline_fk_idx')) self.assertIn('new_parent_fk_id', self.get_indexes(Node._meta.db_table)) @skipUnlessDBFeature('supports_foreign_keys') def test_char_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorCharFieldWithIndex) # Change CharField to FK old_field = AuthorCharFieldWithIndex._meta.get_field('char_field') new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name('char_field') with connection.schema_editor() as editor: editor.alter_field(AuthorCharFieldWithIndex, old_field, new_field, strict=True) self.assertForeignKeyExists(AuthorCharFieldWithIndex, 'char_field_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') @skipUnlessDBFeature('supports_index_on_text_field') def test_text_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorTextFieldWithIndex) # Change TextField to FK old_field = AuthorTextFieldWithIndex._meta.get_field('text_field') new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name('text_field') with connection.schema_editor() as editor: editor.alter_field(AuthorTextFieldWithIndex, old_field, new_field, strict=True) self.assertForeignKeyExists(AuthorTextFieldWithIndex, 'text_field_id', 'schema_author') @isolate_apps('schema') def test_char_field_pk_to_auto_field(self): class Foo(Model): id = CharField(max_length=255, primary_key=True) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field('id') new_field = AutoField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Foo with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) @skipUnlessDBFeature('supports_foreign_keys') def test_fk_to_proxy(self): "Creating a FK to a proxy model creates database constraints." class AuthorProxy(Author): class Meta: app_label = 'schema' apps = new_apps proxy = True class AuthorRef(Model): author = ForeignKey(AuthorProxy, on_delete=CASCADE) class Meta: app_label = 'schema' apps = new_apps self.local_models = [AuthorProxy, AuthorRef] # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorRef) self.assertForeignKeyExists(AuthorRef, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_fk_db_constraint(self): "The db_constraint parameter is respected" # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Author) editor.create_model(BookWeak) # Initial tables are there list(Author.objects.all()) list(Tag.objects.all()) list(BookWeak.objects.all()) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') # Make a db_constraint=False FK new_field = ForeignKey(Tag, CASCADE, db_constraint=False) new_field.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.add_field(Author, new_field) self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag') # Alter to one with a constraint new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) self.assertForeignKeyExists(Author, 'tag_id', 'schema_tag') # Alter to one without a constraint again new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field2, new_field, strict=True) self.assertForeignKeyNotExists(Author, 'tag_id', 'schema_tag') @isolate_apps('schema') def test_no_db_constraint_added_during_primary_key_change(self): """ When a primary key that's pointed to by a ForeignKey with db_constraint=False is altered, a foreign key constraint isn't added. """ class Author(Model): class Meta: app_label = 'schema' class BookWeak(Model): author = ForeignKey(Author, CASCADE, db_constraint=False) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWeak) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') old_field = Author._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.model = Author new_field.set_attributes_from_name('id') # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertForeignKeyNotExists(BookWeak, 'author_id', 'schema_author') def _test_m2m_db_constraint(self, M2MFieldClass): class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(LocalAuthorWithM2M) # Initial tables are there list(LocalAuthorWithM2M.objects.all()) list(Tag.objects.all()) # Make a db_constraint=False FK new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False) new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) self.assertForeignKeyNotExists(new_field.remote_field.through, 'tag_id', 'schema_tag') @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint(self): self._test_m2m_db_constraint(ManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_custom(self): self._test_m2m_db_constraint(CustomManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_inherited(self): self._test_m2m_db_constraint(InheritedManyToManyField) def test_add_field(self): """ Tests adding fields to models """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add the new field new_field = IntegerField(null=True) new_field.set_attributes_from_name("age") with CaptureQueriesContext(connection) as ctx, connection.schema_editor() as editor: editor.add_field(Author, new_field) drop_default_sql = editor.sql_alter_column_no_default % { 'column': editor.quote_name(new_field.name), } self.assertFalse(any(drop_default_sql in query['sql'] for query in ctx.captured_queries)) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['age'][0], "IntegerField") self.assertTrue(columns['age'][1][6]) def test_add_field_remove_field(self): """ Adding a field and removing it removes all deferred sql referring to it. """ with connection.schema_editor() as editor: # Create a table with a unique constraint on the slug field. editor.create_model(Tag) # Remove the slug column. editor.remove_field(Tag, Tag._meta.get_field('slug')) self.assertEqual(editor.deferred_sql, []) def test_add_field_temp_default(self): """ Tests adding fields to models with a temporary default """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = CharField(max_length=30, default="Godwin") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['surname'][0], "CharField") self.assertEqual(columns['surname'][1][6], connection.features.interprets_empty_strings_as_nulls) def test_add_field_temp_default_boolean(self): """ Tests adding fields to models with a temporary default where the default is False. (#21783) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = BooleanField(default=False) new_field.set_attributes_from_name("awesome") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # BooleanField are stored as TINYINT(1) on MySQL. field_type = columns['awesome'][0] self.assertEqual(field_type, connection.features.introspected_boolean_field_type) def test_add_field_default_transform(self): """ Tests adding fields to models with a default that is not directly valid in the database (#22581) """ class TestTransformField(IntegerField): # Weird field that saves the count of items in its value def get_default(self): return self.default def get_prep_value(self, value): if value is None: return 0 return len(value) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add the field with a default it needs to cast (to string in this case) new_field = TestTransformField(default={1: 2}) new_field.set_attributes_from_name("thing") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is there columns = self.column_classes(Author) field_type, field_info = columns['thing'] self.assertEqual(field_type, 'IntegerField') # Make sure the values were transformed correctly self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2) def test_add_field_binary(self): """ Tests binary fields get a sane default (#22851) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field new_field = BinaryField(blank=True) new_field.set_attributes_from_name("bits") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # MySQL annoyingly uses the same backend, so it'll come back as one of # these two types. self.assertIn(columns['bits'][0], ("BinaryField", "TextField")) @unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific") def test_add_binaryfield_mediumblob(self): """ Test adding a custom-sized binary field on MySQL (#24846). """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field with default new_field = MediumBlobField(blank=True, default=b'123') new_field.set_attributes_from_name('bits') with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # Introspection treats BLOBs as TextFields self.assertEqual(columns['bits'][0], "TextField") def test_alter(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) # Alter the name field to a TextField old_field = Author._meta.get_field("name") new_field = TextField(null=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertTrue(columns['name'][1][6]) # Change nullability again new_field2 = TextField(null=False) new_field2.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) def test_alter_auto_field_to_integer_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to IntegerField old_field = Author._meta.get_field('id') new_field = IntegerField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) def test_alter_auto_field_to_char_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to CharField old_field = Author._meta.get_field('id') new_field = CharField(primary_key=True, max_length=50) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) @isolate_apps('schema') def test_alter_auto_field_quoted_db_column(self): class Foo(Model): id = AutoField(primary_key=True, db_column='"quoted_id"') class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.model = Foo new_field.db_column = '"quoted_id"' new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) Foo.objects.create() def test_alter_not_unique_field_to_primary_key(self): # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) # Change UUIDField to primary key. old_field = Author._meta.get_field('uuid') new_field = UUIDField(primary_key=True) new_field.set_attributes_from_name('uuid') new_field.model = Author with connection.schema_editor() as editor: editor.remove_field(Author, Author._meta.get_field('id')) editor.alter_field(Author, old_field, new_field, strict=True) @isolate_apps('schema') def test_alter_primary_key_quoted_db_table(self): class Foo(Model): class Meta: app_label = 'schema' db_table = '"foo"' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.model = Foo new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) Foo.objects.create() def test_alter_text_field(self): # Regression for "BLOB/TEXT column 'info' can't have a default value") # on MySQL. # Create the table with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("info") new_field = TextField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) @skipUnlessDBFeature('can_defer_constraint_checks', 'can_rollback_ddl') def test_alter_fk_checks_deferred_constraints(self): """ #25492 - Altering a foreign key's structure and data in the same transaction. """ with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field('parent') new_field = ForeignKey(Node, CASCADE) new_field.set_attributes_from_name('parent') parent = Node.objects.create() with connection.schema_editor() as editor: # Update the parent FK to create a deferred constraint check. Node.objects.update(parent=parent) editor.alter_field(Node, old_field, new_field, strict=True) def test_alter_text_field_to_date_field(self): """ #25002 - Test conversion of text field to date field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05') old_field = Note._meta.get_field('info') new_field = DateField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_datetime_field(self): """ #25002 - Test conversion of text field to datetime field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05 3:16:17.4567') old_field = Note._meta.get_field('info') new_field = DateTimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_time_field(self): """ #25002 - Test conversion of text field to time field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='3:16:17.4567') old_field = Note._meta.get_field('info') new_field = TimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_alter_textual_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = CharField(max_length=50) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): Note.objects.create(info=None) def test_alter_numeric_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(UniqueTest) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='aaa') old_field = UniqueTest._meta.get_field("year") new_field = BigIntegerField() new_field.set_attributes_from_name("year") with connection.schema_editor() as editor: editor.alter_field(UniqueTest, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='bbb') def test_alter_null_to_not_null(self): """ #23609 - Tests handling of default values when altering from NULL to NOT NULL. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertTrue(columns['height'][1][6]) # Create some test data Author.objects.create(name='Not null author', height=12) Author.objects.create(name='Null author') # Verify null value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertIsNone(Author.objects.get(name='Null author').height) # Alter the height field to NOT NULL with default old_field = Author._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertFalse(columns['height'][1][6]) # Verify default value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertEqual(Author.objects.get(name='Null author').height, 42) def test_alter_charfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a CharField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change the CharField to null old_field = Author._meta.get_field('name') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific') def test_alter_char_field_decrease_length(self): # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) Author.objects.create(name='x' * 255) # Change max_length of CharField. old_field = Author._meta.get_field('name') new_field = CharField(max_length=254) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: msg = 'value too long for type character varying(254)' with self.assertRaisesMessage(DataError, msg): editor.alter_field(Author, old_field, new_field, strict=True) @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific') def test_alter_field_with_custom_db_type(self): from django.contrib.postgres.fields import ArrayField class Foo(Model): field = ArrayField(CharField(max_length=255)) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field('field') new_field = ArrayField(CharField(max_length=16)) new_field.set_attributes_from_name('field') new_field.model = Foo with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) @isolate_apps('schema') @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific') def test_alter_array_field_decrease_base_field_length(self): from django.contrib.postgres.fields import ArrayField class ArrayModel(Model): field = ArrayField(CharField(max_length=16)) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(ArrayModel) self.isolated_local_models = [ArrayModel] ArrayModel.objects.create(field=['x' * 16]) old_field = ArrayModel._meta.get_field('field') new_field = ArrayField(CharField(max_length=15)) new_field.set_attributes_from_name('field') new_field.model = ArrayModel with connection.schema_editor() as editor: msg = 'value too long for type character varying(15)' with self.assertRaisesMessage(DataError, msg): editor.alter_field(ArrayModel, old_field, new_field, strict=True) @isolate_apps('schema') @unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific') def test_alter_array_field_decrease_nested_base_field_length(self): from django.contrib.postgres.fields import ArrayField class ArrayModel(Model): field = ArrayField(ArrayField(CharField(max_length=16))) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(ArrayModel) self.isolated_local_models = [ArrayModel] ArrayModel.objects.create(field=[['x' * 16]]) old_field = ArrayModel._meta.get_field('field') new_field = ArrayField(ArrayField(CharField(max_length=15))) new_field.set_attributes_from_name('field') new_field.model = ArrayModel with connection.schema_editor() as editor: msg = 'value too long for type character varying(15)' with self.assertRaisesMessage(DataError, msg): editor.alter_field(ArrayModel, old_field, new_field, strict=True) def test_alter_textfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a TextField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Note) # Change the TextField to null old_field = Note._meta.get_field('info') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) @skipUnlessDBFeature('supports_combined_alters') def test_alter_null_to_not_null_keeping_default(self): """ #23738 - Can change a nullable field with default to non-nullable with the same default. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) # Ensure the field is right to begin with columns = self.column_classes(AuthorWithDefaultHeight) self.assertTrue(columns['height'][1][6]) # Alter the height field to NOT NULL keeping the previous default old_field = AuthorWithDefaultHeight._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(AuthorWithDefaultHeight) self.assertFalse(columns['height'][1][6]) @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk(self): """ Tests altering of FKs """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") self.assertForeignKeyExists(Book, 'author_id', 'schema_author') # Alter the FK old_field = Book._meta.get_field("author") new_field = ForeignKey(Author, CASCADE, editable=False) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") self.assertForeignKeyExists(Book, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_to_fk(self): """ #24447 - Tests adding a FK constraint for an existing column """ class LocalBook(Model): author = IntegerField() title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBook] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBook) # Ensure no FK constraint exists constraints = self.get_constraints(LocalBook._meta.db_table) for details in constraints.values(): if details['foreign_key']: self.fail('Found an unexpected FK constraint to %s' % details['columns']) old_field = LocalBook._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(LocalBook, old_field, new_field, strict=True) self.assertForeignKeyExists(LocalBook, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_o2o_to_fk(self): """ #24163 - Tests altering of OneToOneField to ForeignKey """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) # Ensure the field is right to begin with columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique author = Author.objects.create(name="Joe") BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) BookWithO2O.objects.all().delete() self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author') # Alter the OneToOneField to ForeignKey old_field = BookWithO2O._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique anymore Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) self.assertForeignKeyExists(Book, 'author_id', 'schema_author') @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk_to_o2o(self): """ #24163 - Tests altering of ForeignKey to OneToOneField """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique author = Author.objects.create(name="Joe") Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) Book.objects.all().delete() self.assertForeignKeyExists(Book, 'author_id', 'schema_author') # Alter the ForeignKey to OneToOneField old_field = Book._meta.get_field("author") new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique now BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) self.assertForeignKeyExists(BookWithO2O, 'author_id', 'schema_author') def test_alter_field_fk_to_o2o(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) old_field = Book._meta.get_field('author') new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The index on ForeignKey is replaced with a unique constraint for OneToOneField. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) def test_alter_field_fk_keeps_index(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) old_field = Book._meta.get_field('author') # on_delete changed from CASCADE. new_field = ForeignKey(Author, PROTECT) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The index remains. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) def test_alter_field_o2o_to_fk(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) old_field = BookWithO2O._meta.get_field('author') new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint on OneToOneField is replaced with an index for ForeignKey. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) def test_alter_field_o2o_keeps_unique(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) old_field = BookWithO2O._meta.get_field('author') # on_delete changed from CASCADE. new_field = OneToOneField(Author, PROTECT) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint remains. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) @skipUnlessDBFeature('ignores_table_name_case') def test_alter_db_table_case(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Alter the case of the table old_table_name = Author._meta.db_table with connection.schema_editor() as editor: editor.alter_db_table(Author, old_table_name, old_table_name.upper()) def test_alter_implicit_id_to_explicit(self): """ Should be able to convert an implicit "id" field to an explicit "id" primary key field. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # This will fail if DROP DEFAULT is inadvertently executed on this # field which drops the id sequence, at least on PostgreSQL. Author.objects.create(name='Foo') Author.objects.create(name='Bar') def test_alter_autofield_pk_to_bigautofield_pk_sequence_owner(self): """ Converting an implicit PK to BigAutoField(primary_key=True) should keep a sequence owner on PostgreSQL. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field('id') new_field = BigAutoField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) Author.objects.create(name='Foo', pk=1) with connection.cursor() as cursor: sequence_reset_sqls = connection.ops.sequence_reset_sql(no_style(), [Author]) if sequence_reset_sqls: cursor.execute(sequence_reset_sqls[0]) # Fail on PostgreSQL if sequence is missing an owner. self.assertIsNotNone(Author.objects.create(name='Bar')) def test_alter_autofield_pk_to_smallautofield_pk_sequence_owner(self): """ Converting an implicit PK to SmallAutoField(primary_key=True) should keep a sequence owner on PostgreSQL. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field('id') new_field = SmallAutoField(primary_key=True) new_field.set_attributes_from_name('id') new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) Author.objects.create(name='Foo', pk=1) with connection.cursor() as cursor: sequence_reset_sqls = connection.ops.sequence_reset_sql(no_style(), [Author]) if sequence_reset_sqls: cursor.execute(sequence_reset_sqls[0]) # Fail on PostgreSQL if sequence is missing an owner. self.assertIsNotNone(Author.objects.create(name='Bar')) def test_alter_int_pk_to_autofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to AutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field('i') new_field = AutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) def test_alter_int_pk_to_bigautofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to BigAutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field('i') new_field = BigAutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) @isolate_apps('schema') def test_alter_smallint_pk_to_smallautofield_pk(self): """ Should be able to rename an SmallIntegerField(primary_key=True) to SmallAutoField(primary_key=True). """ class SmallIntegerPK(Model): i = SmallIntegerField(primary_key=True) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(SmallIntegerPK) self.isolated_local_models = [SmallIntegerPK] old_field = SmallIntegerPK._meta.get_field('i') new_field = SmallAutoField(primary_key=True) new_field.model = SmallIntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(SmallIntegerPK, old_field, new_field, strict=True) def test_alter_int_pk_to_int_unique(self): """ Should be able to rename an IntegerField(primary_key=True) to IntegerField(unique=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) # Delete the old PK old_field = IntegerPK._meta.get_field('i') new_field = IntegerField(unique=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # The primary key constraint is gone. Result depends on database: # 'id' for SQLite, None for others (must not be 'i'). self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ('id', None)) # Set up a model class as it currently stands. The original IntegerPK # class is now out of date and some backends make use of the whole # model class when modifying a field (such as sqlite3 when remaking a # table) so an outdated model class leads to incorrect results. class Transitional(Model): i = IntegerField(unique=True) j = IntegerField(unique=True) class Meta: app_label = 'schema' apps = new_apps db_table = 'INTEGERPK' # model requires a new PK old_field = Transitional._meta.get_field('j') new_field = IntegerField(primary_key=True) new_field.model = Transitional new_field.set_attributes_from_name('j') with connection.schema_editor() as editor: editor.alter_field(Transitional, old_field, new_field, strict=True) # Create a model class representing the updated model. class IntegerUnique(Model): i = IntegerField(unique=True) j = IntegerField(primary_key=True) class Meta: app_label = 'schema' apps = new_apps db_table = 'INTEGERPK' # Ensure unique constraint works. IntegerUnique.objects.create(i=1, j=1) with self.assertRaises(IntegrityError): IntegerUnique.objects.create(i=1, j=2) def test_rename(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertNotIn("display_name", columns) # Alter the name field's name old_field = Author._meta.get_field("name") new_field = CharField(max_length=254) new_field.set_attributes_from_name("display_name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['display_name'][0], "CharField") self.assertNotIn("name", columns) @isolate_apps('schema') def test_rename_referenced_field(self): class Author(Model): name = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE, to_field='name') class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_field(Author, Author._meta.get_field('name'), new_field) # Ensure the foreign key reference was updated. self.assertForeignKeyExists(Book, 'author_id', 'schema_author', 'renamed') @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_rename_keep_null_status(self): """ Renaming a field shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = TextField() new_field.set_attributes_from_name("detail_info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns['detail_info'][0], "TextField") self.assertNotIn("info", columns) with self.assertRaises(IntegrityError): NoteRename.objects.create(detail_info=None) def _test_m2m_create(self, M2MFieldClass): """ Tests M2M fields on models during creation """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2M) # Ensure there is now an m2m table there columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") def test_m2m_create(self): self._test_m2m_create(ManyToManyField) def test_m2m_create_custom(self): self._test_m2m_create(CustomManyToManyField) def test_m2m_create_inherited(self): self._test_m2m_create(InheritedManyToManyField) def _test_m2m_create_through(self, M2MFieldClass): """ Tests M2M fields on models during creation with through models """ class LocalTagThrough(Model): book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalBookWithM2MThrough(Model): tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalTagThrough, LocalBookWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalTagThrough) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2MThrough) # Ensure there is now an m2m table there columns = self.column_classes(LocalTagThrough) self.assertEqual(columns['book_id'][0], "IntegerField") self.assertEqual(columns['tag_id'][0], "IntegerField") def test_m2m_create_through(self): self._test_m2m_create_through(ManyToManyField) def test_m2m_create_through_custom(self): self._test_m2m_create_through(CustomManyToManyField) def test_m2m_create_through_inherited(self): self._test_m2m_create_through(InheritedManyToManyField) def _test_m2m(self, M2MFieldClass): """ Tests adding/removing M2M fields on models """ class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorWithM2M) editor.create_model(TagM2MTest) # Create an M2M field new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors") new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Ensure there is now an m2m table there columns = self.column_classes(new_field.remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") # "Alter" the field. This should not rename the DB table to itself. with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True) # Remove the M2M table again with connection.schema_editor() as editor: editor.remove_field(LocalAuthorWithM2M, new_field) # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Make sure the model state is coherent with the table one now that # we've removed the tags field. opts = LocalAuthorWithM2M._meta opts.local_many_to_many.remove(new_field) del new_apps.all_models['schema'][new_field.remote_field.through._meta.model_name] opts._expire_cache() def test_m2m(self): self._test_m2m(ManyToManyField) def test_m2m_custom(self): self._test_m2m(CustomManyToManyField) def test_m2m_inherited(self): self._test_m2m(InheritedManyToManyField) def _test_m2m_through_alter(self, M2MFieldClass): """ Tests altering M2Ms with explicit through models (should no-op) """ class LocalAuthorTag(Model): author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalAuthorWithM2MThrough(Model): name = CharField(max_length=255) tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorTag) editor.create_model(LocalAuthorWithM2MThrough) editor.create_model(TagM2MTest) # Ensure the m2m table is there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) # "Alter" the field's blankness. This should not actually do anything. old_field = LocalAuthorWithM2MThrough._meta.get_field("tags") new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags") with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field, strict=True) # Ensure the m2m table is still there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) def test_m2m_through_alter(self): self._test_m2m_through_alter(ManyToManyField) def test_m2m_through_alter_custom(self): self._test_m2m_through_alter(CustomManyToManyField) def test_m2m_through_alter_inherited(self): self._test_m2m_through_alter(InheritedManyToManyField) def _test_m2m_repoint(self, M2MFieldClass): """ Tests repointing M2M fields """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBookWithM2M) editor.create_model(TagM2MTest) editor.create_model(UniqueTest) # Ensure the M2M exists and points to TagM2MTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists( LocalBookWithM2M._meta.get_field("tags").remote_field.through, 'tagm2mtest_id', 'schema_tagm2mtest', ) # Repoint the M2M old_field = LocalBookWithM2M._meta.get_field("tags") new_field = M2MFieldClass(UniqueTest) new_field.contribute_to_class(LocalBookWithM2M, "uniques") with connection.schema_editor() as editor: editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True) # Ensure old M2M is gone with self.assertRaises(DatabaseError): self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) # This model looks like the new model and is used for teardown. opts = LocalBookWithM2M._meta opts.local_many_to_many.remove(old_field) # Ensure the new M2M exists and points to UniqueTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists(new_field.remote_field.through, 'uniquetest_id', 'schema_uniquetest') def test_m2m_repoint(self): self._test_m2m_repoint(ManyToManyField) def test_m2m_repoint_custom(self): self._test_m2m_repoint(CustomManyToManyField) def test_m2m_repoint_inherited(self): self._test_m2m_repoint(InheritedManyToManyField) @isolate_apps('schema') def test_m2m_rename_field_in_target_model(self): class LocalTagM2MTest(Model): title = CharField(max_length=255) class Meta: app_label = 'schema' class LocalM2M(Model): tags = ManyToManyField(LocalTagM2MTest) class Meta: app_label = 'schema' # Create the tables. with connection.schema_editor() as editor: editor.create_model(LocalM2M) editor.create_model(LocalTagM2MTest) self.isolated_local_models = [LocalM2M, LocalTagM2MTest] # Ensure the m2m table is there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) # Alter a field in LocalTagM2MTest. old_field = LocalTagM2MTest._meta.get_field('title') new_field = CharField(max_length=254) new_field.contribute_to_class(LocalTagM2MTest, 'title1') # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True) # Ensure the m2m table is still there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) @skipUnlessDBFeature('supports_column_check_constraints', 'can_introspect_check_constraints') def test_check_constraints(self): """ Tests creating/deleting CHECK constraints """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the constraint exists constraints = self.get_constraints(Author._meta.db_table) if not any(details['columns'] == ['height'] and details['check'] for details in constraints.values()): self.fail("No check constraint for height found") # Alter the column to remove it old_field = Author._meta.get_field("height") new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) for details in constraints.values(): if details['columns'] == ["height"] and details['check']: self.fail("Check constraint for height found") # Alter the column to re-add it new_field2 = Author._meta.get_field("height") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) if not any(details['columns'] == ['height'] and details['check'] for details in constraints.values()): self.fail("No check constraint for height found") @skipUnlessDBFeature('supports_column_check_constraints', 'can_introspect_check_constraints') def test_remove_field_check_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(Author) # Add the custom check constraint constraint = CheckConstraint(check=Q(height__gte=0), name='author_height_gte_0_check') custom_constraint_name = constraint.name Author._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) # Ensure the constraints exist constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field check old_field = Author._meta.get_field('height') new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field check new_field2 = Author._meta.get_field('height') with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['height'] and details['check'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the check constraint with connection.schema_editor() as editor: Author._meta.constraints = [] editor.remove_constraint(Author, constraint) def test_unique(self): """ Tests removing and adding unique constraints to a single column. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the field is unique to begin with Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be non-unique old_field = Tag._meta.get_field("slug") new_field = SlugField(unique=False) new_field.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) # Ensure the field is no longer unique Tag.objects.create(title="foo", slug="foo") Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be unique new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) # Ensure the field is unique again Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Rename the field new_field3 = SlugField(unique=True) new_field3.set_attributes_from_name("slug2") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field2, new_field3, strict=True) # Ensure the field is still unique TagUniqueRename.objects.create(title="foo", slug2="foo") with self.assertRaises(IntegrityError): TagUniqueRename.objects.create(title="bar", slug2="foo") Tag.objects.all().delete() def test_unique_name_quoting(self): old_table_name = TagUniqueRename._meta.db_table try: with connection.schema_editor() as editor: editor.create_model(TagUniqueRename) editor.alter_db_table(TagUniqueRename, old_table_name, 'unique-table') TagUniqueRename._meta.db_table = 'unique-table' # This fails if the unique index name isn't quoted. editor.alter_unique_together(TagUniqueRename, [], (('title', 'slug2'),)) finally: TagUniqueRename._meta.db_table = old_table_name @isolate_apps('schema') @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') @skipUnlessDBFeature('supports_foreign_keys') def test_unique_no_unnecessary_fk_drops(self): """ If AlterField isn't selective about dropping foreign key constraints when modifying a field with a unique constraint, the AlterField incorrectly drops and recreates the Book.author foreign key even though it doesn't restrict the field being changed (#29193). """ class Author(Model): name = CharField(max_length=254, unique=True) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.model = Author new_field.set_attributes_from_name('name') with self.assertLogs('django.db.backends.schema', 'DEBUG') as cm: with connection.schema_editor() as editor: editor.alter_field(Author, Author._meta.get_field('name'), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) @isolate_apps('schema') @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite remakes the table on field alteration.') def test_unique_and_reverse_m2m(self): """ AlterField can modify a unique field when there's a reverse M2M relation on the model. """ class Tag(Model): title = CharField(max_length=255) slug = SlugField(unique=True) class Meta: app_label = 'schema' class Book(Model): tags = ManyToManyField(Tag, related_name='books') class Meta: app_label = 'schema' self.isolated_local_models = [Book._meta.get_field('tags').remote_field.through] with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Book) new_field = SlugField(max_length=75, unique=True) new_field.model = Tag new_field.set_attributes_from_name('slug') with self.assertLogs('django.db.backends.schema', 'DEBUG') as cm: with connection.schema_editor() as editor: editor.alter_field(Tag, Tag._meta.get_field('slug'), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) # Ensure that the field is still unique. Tag.objects.create(title='foo', slug='foo') with self.assertRaises(IntegrityError): Tag.objects.create(title='bar', slug='foo') @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_field_unique_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueName) # Add the custom unique constraint constraint = UniqueConstraint(fields=['name'], name='author_name_uniq') custom_constraint_name = constraint.name AuthorWithUniqueName._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueName, constraint) # Ensure the constraints exist constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field uniqueness old_field = AuthorWithUniqueName._meta.get_field('name') new_field = CharField(max_length=255) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field uniqueness new_field2 = AuthorWithUniqueName._meta.get_field('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueName._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueName, constraint) def test_unique_together(self): """ Tests removing and adding unique_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(UniqueTest) # Ensure the fields are unique to begin with UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2011, slug="foo") UniqueTest.objects.create(year=2011, slug="bar") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter the model to its non-unique-together companion with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, []) # Ensure the fields are no longer unique UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together) # Ensure the fields are unique again UniqueTest.objects.create(year=2012, slug="foo") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() def test_unique_together_with_fk(self): """ Tests removing and adding unique_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) def test_unique_together_with_fk_with_existing_index(self): """ Tests removing and adding unique_together constraints that include a foreign key, where the foreign key is added after the model is created. """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithoutAuthor) new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') editor.add_field(BookWithoutAuthor, new_field) # Ensure the fields aren't unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_unique_together_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueNameAndBirthday) # Add the custom unique constraint constraint = UniqueConstraint(fields=['name', 'birthday'], name='author_name_birthday_uniq') custom_constraint_name = constraint.name AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint) # Ensure the constraints exist constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Remove unique together unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together with connection.schema_editor() as editor: editor.alter_unique_together(AuthorWithUniqueNameAndBirthday, unique_together, []) constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Re-add unique together with connection.schema_editor() as editor: editor.alter_unique_together(AuthorWithUniqueNameAndBirthday, [], unique_together) constraints = self.get_constraints(AuthorWithUniqueNameAndBirthday._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['unique'] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueNameAndBirthday._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint) def test_index_together(self): """ Tests removing and adding index_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure there's no index on the year/slug columns first self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), False, ) # Alter the model to add an index with connection.schema_editor() as editor: editor.alter_index_together(Tag, [], [("slug", "title")]) # Ensure there is now an index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), True, ) # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_index_together(Tag, [("slug", "title")], []) # Ensure there's no index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), False, ) def test_index_together_with_fk(self): """ Tests removing and adding index_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.index_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_index_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_index_together(Book, [['author', 'title']], []) def test_create_index_together(self): """ Tests creating models with index_together already defined """ # Create the table with connection.schema_editor() as editor: editor.create_model(TagIndexed) # Ensure there is an index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tagindexed").values() if c['columns'] == ["slug", "title"] ), True, ) @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields') def test_remove_index_together_does_not_remove_meta_indexes(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedNameAndBirthday) # Add the custom index index = Index(fields=['name', 'birthday'], name='author_name_birthday_idx') custom_index_name = index.name AuthorWithIndexedNameAndBirthday._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedNameAndBirthday, index) # Ensure the indexes exist constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Remove index together index_together = AuthorWithIndexedNameAndBirthday._meta.index_together with connection.schema_editor() as editor: editor.alter_index_together(AuthorWithIndexedNameAndBirthday, index_together, []) constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 0) # Re-add index together with connection.schema_editor() as editor: editor.alter_index_together(AuthorWithIndexedNameAndBirthday, [], index_together) constraints = self.get_constraints(AuthorWithIndexedNameAndBirthday._meta.db_table) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details['columns'] == ['name', 'birthday'] and details['index'] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Drop the index with connection.schema_editor() as editor: AuthorWithIndexedNameAndBirthday._meta.indexes = [] editor.remove_index(AuthorWithIndexedNameAndBirthday, index) @isolate_apps('schema') def test_db_table(self): """ Tests renaming of the table """ class Author(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' # Create the table and one referring it. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Alter the table with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_db_table(Author, "schema_author", "schema_otherauthor") # Ensure the table is there afterwards Author._meta.db_table = "schema_otherauthor" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Ensure the foreign key reference was updated self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor") # Alter the table again with connection.schema_editor(atomic=connection.features.supports_atomic_references_rename) as editor: editor.alter_db_table(Author, "schema_otherauthor", "schema_author") # Ensure the table is still there Author._meta.db_table = "schema_author" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") def test_add_remove_index(self): """ Tests index addition and removal """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the table is there and has no index self.assertNotIn('title', self.get_indexes(Author._meta.db_table)) # Add the index index = Index(fields=['name'], name='author_title_idx') with connection.schema_editor() as editor: editor.add_index(Author, index) self.assertIn('name', self.get_indexes(Author._meta.db_table)) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn('name', self.get_indexes(Author._meta.db_table)) def test_remove_db_index_doesnt_remove_custom_indexes(self): """ Changing db_index to False doesn't remove indexes from Meta.indexes. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedName) # Ensure the table has its index self.assertIn('name', self.get_indexes(AuthorWithIndexedName._meta.db_table)) # Add the custom index index = Index(fields=['-name'], name='author_name_idx') author_index_name = index.name with connection.schema_editor() as editor: db_index_name = editor._create_index_name( table_name=AuthorWithIndexedName._meta.db_table, column_names=('name',), ) try: AuthorWithIndexedName._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedName, index) old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertIn(author_index_name, old_constraints) self.assertIn(db_index_name, old_constraints) # Change name field to db_index=False old_field = AuthorWithIndexedName._meta.get_field('name') new_field = CharField(max_length=255) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(AuthorWithIndexedName, old_field, new_field, strict=True) new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertNotIn(db_index_name, new_constraints) # The index from Meta.indexes is still in the database. self.assertIn(author_index_name, new_constraints) # Drop the index with connection.schema_editor() as editor: editor.remove_index(AuthorWithIndexedName, index) finally: AuthorWithIndexedName._meta.indexes = [] def test_order_index(self): """ Indexes defined with ordering (ASC/DESC) defined on column """ with connection.schema_editor() as editor: editor.create_model(Author) # The table doesn't have an index self.assertNotIn('title', self.get_indexes(Author._meta.db_table)) index_name = 'author_name_idx' # Add the index index = Index(fields=['name', '-weight'], name=index_name) with connection.schema_editor() as editor: editor.add_index(Author, index) if connection.features.supports_index_column_ordering: self.assertIndexOrder(Author._meta.db_table, index_name, ['ASC', 'DESC']) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) def test_indexes(self): """ Tests creation/altering of indexes """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there and has the right index self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to remove the index old_field = Book._meta.get_field("title") new_field = CharField(max_length=100, db_index=False) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the table is there and has no index self.assertNotIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to re-add the index new_field2 = Book._meta.get_field("title") with connection.schema_editor() as editor: editor.alter_field(Book, new_field, new_field2, strict=True) # Ensure the table is there and has the index again self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Add a unique column, verify that creates an implicit index new_field3 = BookWithSlug._meta.get_field("slug") with connection.schema_editor() as editor: editor.add_field(Book, new_field3) self.assertIn( "slug", self.get_uniques(Book._meta.db_table), ) # Remove the unique, check the index goes with it new_field4 = CharField(max_length=20, unique=False) new_field4.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True) self.assertNotIn( "slug", self.get_uniques(Book._meta.db_table), ) def test_text_field_with_db_index(self): with connection.schema_editor() as editor: editor.create_model(AuthorTextFieldWithIndex) # The text_field index is present if the database supports it. assertion = self.assertIn if connection.features.supports_index_on_text_field else self.assertNotIn assertion('text_field', self.get_indexes(AuthorTextFieldWithIndex._meta.db_table)) def test_primary_key(self): """ Tests altering of the primary key """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the table is there and has the right PK self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'id') # Alter to change the PK id_field = Tag._meta.get_field("id") old_field = Tag._meta.get_field("slug") new_field = SlugField(primary_key=True) new_field.set_attributes_from_name("slug") new_field.model = Tag with connection.schema_editor() as editor: editor.remove_field(Tag, id_field) editor.alter_field(Tag, old_field, new_field) # Ensure the PK changed self.assertNotIn( 'id', self.get_indexes(Tag._meta.db_table), ) self.assertEqual(self.get_primary_key(Tag._meta.db_table), 'slug') def test_context_manager_exit(self): """ Ensures transaction is correctly closed when an error occurs inside a SchemaEditor context. """ class SomeError(Exception): pass try: with connection.schema_editor(): raise SomeError except SomeError: self.assertFalse(connection.in_atomic_block) @skipIfDBFeature('can_rollback_ddl') def test_unsupported_transactional_ddl_disallowed(self): message = ( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) with atomic(), connection.schema_editor() as editor: with self.assertRaisesMessage(TransactionManagementError, message): editor.execute(editor.sql_create_table % {'table': 'foo', 'definition': ''}) @skipUnlessDBFeature('supports_foreign_keys') def test_foreign_key_index_long_names_regression(self): """ Regression test for #21497. Only affects databases that supports foreign keys. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Find the properly shortened column name column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id") column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase # Ensure the table is there and has an index on the column self.assertIn( column_name, self.get_indexes(BookWithLongName._meta.db_table), ) @skipUnlessDBFeature('supports_foreign_keys') def test_add_foreign_key_long_names(self): """ Regression test for #23009. Only affects databases that supports foreign keys. """ # Create the initial tables with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Add a second FK, this would fail due to long ref name before the fix new_field = ForeignKey(AuthorWithEvenLongerName, CASCADE, related_name="something") new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk") with connection.schema_editor() as editor: editor.add_field(BookWithLongName, new_field) @isolate_apps('schema') @skipUnlessDBFeature('supports_foreign_keys') def test_add_foreign_key_quoted_db_table(self): class Author(Model): class Meta: db_table = '"table_author_double_quoted"' app_label = 'schema' class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = 'schema' with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) if connection.vendor == 'mysql': self.assertForeignKeyExists(Book, 'author_id', '"table_author_double_quoted"') else: self.assertForeignKeyExists(Book, 'author_id', 'table_author_double_quoted') def test_add_foreign_object(self): with connection.schema_editor() as editor: editor.create_model(BookForeignObj) new_field = ForeignObject(Author, on_delete=CASCADE, from_fields=['author_id'], to_fields=['id']) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.add_field(BookForeignObj, new_field) def test_creation_deletion_reserved_names(self): """ Tries creating a model's table, and then deleting it when it has a SQL reserved name. """ # Create the table with connection.schema_editor() as editor: try: editor.create_model(Thing) except OperationalError as e: self.fail("Errors when applying initial migration for a model " "with a table named after an SQL reserved word: %s" % e) # The table is there list(Thing.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Thing) # The table is gone with self.assertRaises(DatabaseError): list(Thing.objects.all()) def test_remove_constraints_capital_letters(self): """ #23065 - Constraint names must be quoted if they contain capital letters. """ def get_field(*args, field_class=IntegerField, **kwargs): kwargs['db_column'] = "CamelCase" field = field_class(*args, **kwargs) field.set_attributes_from_name("CamelCase") return field model = Author field = get_field() table = model._meta.db_table column = field.column identifier_converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: editor.create_model(model) editor.add_field(model, field) constraint_name = 'CamelCaseIndex' expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_index % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "using": "", "columns": editor.quote_name(column), "extra": "", "condition": "", } ) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(db_index=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) constraint_name = 'CamelCaseUniqConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor._create_unique_sql(model, [field.column], constraint_name)) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(unique=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) if editor.sql_create_fk: constraint_name = 'CamelCaseFKConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_fk % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "column": editor.quote_name(column), "to_table": editor.quote_name(table), "to_column": editor.quote_name(model._meta.auto_field.column), "deferrable": connection.ops.deferrable_sql(), } ) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) def test_add_field_use_effective_default(self): """ #23987 - effective_default() should be used as the field default when adding a new field. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField to ensure default will be used from effective_default new_field = CharField(max_length=15, blank=True) new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '') def test_add_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField with a default new_field = CharField(max_length=15, blank=True, default='surname default') new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], 'surname default') # And that the default is no longer set in the database. field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "surname" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_alter_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') self.assertIsNone(Author.objects.get().height) old_field = Author._meta.get_field('height') # The default from the new field is used in updating existing rows. new_field = IntegerField(blank=True, default=42) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual(Author.objects.get().height, 42) # The database default should be removed. with connection.cursor() as cursor: field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "height" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') def test_alter_field_default_doesnt_perform_queries(self): """ No queries are performed if a field default changes and the field's not changing from null to non-null. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) old_field = AuthorWithDefaultHeight._meta.get_field('height') new_default = old_field.default * 2 new_field = PositiveIntegerField(null=True, blank=True, default=new_default) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True) def test_add_textfield_unhashable_default(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') # Create a field that has an unhashable default new_field = TextField(default={}) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.add_field(Author, new_field) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_add_indexed_charfield(self): field = CharField(max_length=255, db_index=True) field.set_attributes_from_name('nom_de_plume') with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, 'nom_de_plume'), ['schema_author_nom_de_plume_7570a851', 'schema_author_nom_de_plume_7570a851_like'], ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_add_unique_charfield(self): field = CharField(max_length=255, unique=True) field.set_attributes_from_name('nom_de_plume') with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, 'nom_de_plume'), ['schema_author_nom_de_plume_7570a851_like', 'schema_author_nom_de_plume_key'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) # Alter to add db_index=True and create 2 indexes. old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, db_index=True) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, 'name'), ['schema_author_name_1fbc5617', 'schema_author_name_1fbc5617_like'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_unique_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) # Alter to add unique=True and create 2 indexes. old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, 'name'), ['schema_author_name_1fbc5617_like', 'schema_author_name_1fbc5617_uniq'] ) # Remove unique=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_textfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Note) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) # Alter to add db_index=True and create 2 indexes. old_field = Note._meta.get_field('info') new_field = TextField(db_index=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Note, 'info'), ['schema_note_info_4b0ea695', 'schema_note_info_4b0ea695_like'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Note, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_unique_to_charfield_with_db_index(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to remove unique=True (should drop unique index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_remove_unique_and_db_index_from_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to remove both unique=True and db_index=True (should drop all indexes) new_field2 = CharField(max_length=100) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual(self.get_constraints_for_column(BookWithoutAuthor, 'title'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_swap_unique_and_db_index_with_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to set unique=True and remove db_index=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to set db_index=True and remove unique=True (should restore index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_db_index_to_charfield_with_unique(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(Tag) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to add db_index=True old_field = Tag._meta.get_field('slug') new_field = SlugField(db_index=True, unique=True) new_field.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to remove db_index=True new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) def test_alter_field_add_index_to_integerfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), []) # Alter to add db_index=True and create index. old_field = Author._meta.get_field('weight') new_field = IntegerField(null=True, db_index=True) new_field.set_attributes_from_name('weight') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), ['schema_author_weight_587740f9']) # Remove db_index=True to drop index. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), []) def test_alter_pk_with_self_referential_field(self): """ Changing the primary key field name of a model with a self-referential foreign key (#26384). """ with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field('node_id') new_field = AutoField(primary_key=True) new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Node, old_field, new_field, strict=True) self.assertForeignKeyExists(Node, 'parent_id', Node._meta.db_table) @mock.patch('django.db.backends.base.schema.datetime') @mock.patch('django.db.backends.base.schema.timezone') def test_add_datefield_and_datetimefield_use_effective_default(self, mocked_datetime, mocked_tz): """ effective_default() should be used for DateField, DateTimeField, and TimeField if auto_now or auto_add_now is set (#25005). """ now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1) now_tz = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1, tzinfo=timezone.utc) mocked_datetime.now = mock.MagicMock(return_value=now) mocked_tz.now = mock.MagicMock(return_value=now_tz) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Check auto_now/auto_now_add attributes are not defined columns = self.column_classes(Author) self.assertNotIn("dob_auto_now", columns) self.assertNotIn("dob_auto_now_add", columns) self.assertNotIn("dtob_auto_now", columns) self.assertNotIn("dtob_auto_now_add", columns) self.assertNotIn("tob_auto_now", columns) self.assertNotIn("tob_auto_now_add", columns) # Create a row Author.objects.create(name='Anonymous1') # Ensure fields were added with the correct defaults dob_auto_now = DateField(auto_now=True) dob_auto_now.set_attributes_from_name('dob_auto_now') self.check_added_field_default( editor, Author, dob_auto_now, 'dob_auto_now', now.date(), cast_function=lambda x: x.date(), ) dob_auto_now_add = DateField(auto_now_add=True) dob_auto_now_add.set_attributes_from_name('dob_auto_now_add') self.check_added_field_default( editor, Author, dob_auto_now_add, 'dob_auto_now_add', now.date(), cast_function=lambda x: x.date(), ) dtob_auto_now = DateTimeField(auto_now=True) dtob_auto_now.set_attributes_from_name('dtob_auto_now') self.check_added_field_default( editor, Author, dtob_auto_now, 'dtob_auto_now', now, ) dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True) dt_tm_of_birth_auto_now_add.set_attributes_from_name('dtob_auto_now_add') self.check_added_field_default( editor, Author, dt_tm_of_birth_auto_now_add, 'dtob_auto_now_add', now, ) tob_auto_now = TimeField(auto_now=True) tob_auto_now.set_attributes_from_name('tob_auto_now') self.check_added_field_default( editor, Author, tob_auto_now, 'tob_auto_now', now.time(), cast_function=lambda x: x.time(), ) tob_auto_now_add = TimeField(auto_now_add=True) tob_auto_now_add.set_attributes_from_name('tob_auto_now_add') self.check_added_field_default( editor, Author, tob_auto_now_add, 'tob_auto_now_add', now.time(), cast_function=lambda x: x.time(), ) def test_namespaced_db_table_create_index_name(self): """ Table names are stripped of their namespace/schema before being used to generate index names. """ with connection.schema_editor() as editor: max_name_length = connection.ops.max_name_length() or 200 namespace = 'n' * max_name_length table_name = 't' * max_name_length namespaced_table_name = '"%s"."%s"' % (namespace, table_name) self.assertEqual( editor._create_index_name(table_name, []), editor._create_index_name(namespaced_table_name, []), ) @unittest.skipUnless(connection.vendor == 'oracle', 'Oracle specific db_table syntax') def test_creation_with_db_table_double_quotes(self): oracle_user = connection.creation._test_database_user() class Student(Model): name = CharField(max_length=30) class Meta: app_label = 'schema' apps = new_apps db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user class Document(Model): name = CharField(max_length=30) students = ManyToManyField(Student) class Meta: app_label = 'schema' apps = new_apps db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user self.local_models = [Student, Document] with connection.schema_editor() as editor: editor.create_model(Student) editor.create_model(Document) doc = Document.objects.create(name='Test Name') student = Student.objects.create(name='Some man') doc.students.add(student) def test_rename_table_renames_deferred_sql_references(self): atomic_rename = connection.features.supports_atomic_references_rename with connection.schema_editor(atomic=atomic_rename) as editor: editor.create_model(Author) editor.create_model(Book) editor.alter_db_table(Author, 'schema_author', 'schema_renamed_author') editor.alter_db_table(Author, 'schema_book', 'schema_renamed_book') self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_table('schema_author'), False) self.assertIs(statement.references_table('schema_book'), False) @unittest.skipIf(connection.vendor == 'sqlite', 'SQLite naively remakes the table on field alteration.') def test_rename_column_renames_deferred_sql_references(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) old_title = Book._meta.get_field('title') new_title = CharField(max_length=100, db_index=True) new_title.set_attributes_from_name('renamed_title') editor.alter_field(Book, old_title, new_title) old_author = Book._meta.get_field('author') new_author = ForeignKey(Author, CASCADE) new_author.set_attributes_from_name('renamed_author') editor.alter_field(Book, old_author, new_author) self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_column('book', 'title'), False) self.assertIs(statement.references_column('book', 'author_id'), False) @isolate_apps('schema') def test_referenced_field_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the field they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field='field', db_constraint=False) class Meta: app_label = 'schema' self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=True) as editor: editor.alter_field(Foo, Foo._meta.get_field('field'), new_field) @isolate_apps('schema') def test_referenced_table_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the table they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = 'schema' class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field='field', db_constraint=False) class Meta: app_label = 'schema' self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('renamed') with connection.schema_editor(atomic=True) as editor: editor.alter_db_table(Foo, Foo._meta.db_table, 'renamed_table') Foo._meta.db_table = 'renamed_table'
18ac1100b225dad1d90b4307a6c92fb45992ccf99fab18d3b720b65d3326129a
from django.db import models class Classification(models.Model): code = models.CharField(max_length=10) class Employee(models.Model): name = models.CharField(max_length=40, blank=False, null=False) salary = models.PositiveIntegerField() department = models.CharField(max_length=40, blank=False, null=False) hire_date = models.DateField(blank=False, null=False) age = models.IntegerField(blank=False, null=False) classification = models.ForeignKey('Classification', on_delete=models.CASCADE, null=True) def __str__(self): return '{}, {}, {}, {}'.format(self.name, self.department, self.salary, self.hire_date)
0d659c0e776649b28fca9da85f888fe94d9ded142f6f754605ff71a60b79f136
""" Specifying ordering Specify default ordering for a model using the ``ordering`` attribute, which should be a list or tuple of field names. This tells Django how to order ``QuerySet`` results. If a field name in ``ordering`` starts with a hyphen, that field will be ordered in descending order. Otherwise, it'll be ordered in ascending order. The special-case field name ``"?"`` specifies random order. The ordering attribute is not required. If you leave it off, ordering will be undefined -- not random, just undefined. """ from django.db import models class Author(models.Model): name = models.CharField(max_length=63, null=True, blank=True) class Meta: ordering = ('-pk',) class Article(models.Model): author = models.ForeignKey(Author, models.SET_NULL, null=True) second_author = models.ForeignKey(Author, models.SET_NULL, null=True, related_name='+') headline = models.CharField(max_length=100) pub_date = models.DateTimeField() class Meta: ordering = ( '-pub_date', models.F('headline'), models.F('author__name').asc(), models.OrderBy(models.F('second_author__name')), ) def __str__(self): return self.headline class OrderedByAuthorArticle(Article): class Meta: proxy = True ordering = ('author', 'second_author') class OrderedByFArticle(Article): class Meta: proxy = True ordering = (models.F('author').asc(nulls_first=True), 'id') class ChildArticle(Article): pass class Reference(models.Model): article = models.ForeignKey(OrderedByAuthorArticle, models.CASCADE) class Meta: ordering = ('article',)
54f4109b787200f9d886520b6e8ff6ebaa217df5eacf532f8ec0c6cad5d209ff
from math import ceil from django.db import connection, models from django.db.models import ProtectedError, RestrictedError from django.db.models.deletion import Collector from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from .models import ( B1, B2, B3, MR, A, Avatar, B, Base, Child, DeleteBottom, DeleteTop, GenericB1, GenericB2, GenericDeleteBottom, HiddenUser, HiddenUserProfile, M, M2MFrom, M2MTo, MRNull, Origin, P, Parent, R, RChild, RChildChild, Referrer, S, T, User, create_a, get_default_r, ) class OnDeleteTests(TestCase): def setUp(self): self.DEFAULT = get_default_r() def test_auto(self): a = create_a('auto') a.auto.delete() self.assertFalse(A.objects.filter(name='auto').exists()) def test_non_callable(self): msg = 'on_delete must be callable.' with self.assertRaisesMessage(TypeError, msg): models.ForeignKey('self', on_delete=None) with self.assertRaisesMessage(TypeError, msg): models.OneToOneField('self', on_delete=None) def test_auto_nullable(self): a = create_a('auto_nullable') a.auto_nullable.delete() self.assertFalse(A.objects.filter(name='auto_nullable').exists()) def test_setvalue(self): a = create_a('setvalue') a.setvalue.delete() a = A.objects.get(pk=a.pk) self.assertEqual(self.DEFAULT, a.setvalue.pk) def test_setnull(self): a = create_a('setnull') a.setnull.delete() a = A.objects.get(pk=a.pk) self.assertIsNone(a.setnull) def test_setdefault(self): a = create_a('setdefault') a.setdefault.delete() a = A.objects.get(pk=a.pk) self.assertEqual(self.DEFAULT, a.setdefault.pk) def test_setdefault_none(self): a = create_a('setdefault_none') a.setdefault_none.delete() a = A.objects.get(pk=a.pk) self.assertIsNone(a.setdefault_none) def test_cascade(self): a = create_a('cascade') a.cascade.delete() self.assertFalse(A.objects.filter(name='cascade').exists()) def test_cascade_nullable(self): a = create_a('cascade_nullable') a.cascade_nullable.delete() self.assertFalse(A.objects.filter(name='cascade_nullable').exists()) def test_protect(self): a = create_a('protect') msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through protected foreign keys: 'A.protect'." ) with self.assertRaisesMessage(ProtectedError, msg): a.protect.delete() def test_protect_multiple(self): a = create_a('protect') B.objects.create(protect=a.protect) msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through protected foreign keys: 'A.protect', " "'B.protect'." ) with self.assertRaisesMessage(ProtectedError, msg): a.protect.delete() def test_protect_path(self): a = create_a('protect') a.protect.p = P.objects.create() a.protect.save() msg = ( "Cannot delete some instances of model 'P' because they are " "referenced through protected foreign keys: 'R.p'." ) with self.assertRaisesMessage(ProtectedError, msg): a.protect.p.delete() def test_do_nothing(self): # Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model, # so we connect to pre_delete and set the fk to a known value. replacement_r = R.objects.create() def check_do_nothing(sender, **kwargs): obj = kwargs['instance'] obj.donothing_set.update(donothing=replacement_r) models.signals.pre_delete.connect(check_do_nothing) a = create_a('do_nothing') a.donothing.delete() a = A.objects.get(pk=a.pk) self.assertEqual(replacement_r, a.donothing) models.signals.pre_delete.disconnect(check_do_nothing) def test_do_nothing_qscount(self): """ A models.DO_NOTHING relation doesn't trigger a query. """ b = Base.objects.create() with self.assertNumQueries(1): # RelToBase should not be queried. b.delete() self.assertEqual(Base.objects.count(), 0) def test_inheritance_cascade_up(self): child = RChild.objects.create() child.delete() self.assertFalse(R.objects.filter(pk=child.pk).exists()) def test_inheritance_cascade_down(self): child = RChild.objects.create() parent = child.r_ptr parent.delete() self.assertFalse(RChild.objects.filter(pk=child.pk).exists()) def test_cascade_from_child(self): a = create_a('child') a.child.delete() self.assertFalse(A.objects.filter(name='child').exists()) self.assertFalse(R.objects.filter(pk=a.child_id).exists()) def test_cascade_from_parent(self): a = create_a('child') R.objects.get(pk=a.child_id).delete() self.assertFalse(A.objects.filter(name='child').exists()) self.assertFalse(RChild.objects.filter(pk=a.child_id).exists()) def test_setnull_from_child(self): a = create_a('child_setnull') a.child_setnull.delete() self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists()) a = A.objects.get(pk=a.pk) self.assertIsNone(a.child_setnull) def test_setnull_from_parent(self): a = create_a('child_setnull') R.objects.get(pk=a.child_setnull_id).delete() self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists()) a = A.objects.get(pk=a.pk) self.assertIsNone(a.child_setnull) def test_o2o_setnull(self): a = create_a('o2o_setnull') a.o2o_setnull.delete() a = A.objects.get(pk=a.pk) self.assertIsNone(a.o2o_setnull) def test_restrict(self): a = create_a('restrict') msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through restricted foreign keys: 'A.restrict'." ) with self.assertRaisesMessage(RestrictedError, msg): a.restrict.delete() def test_restrict_multiple(self): a = create_a('restrict') B3.objects.create(restrict=a.restrict) msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through restricted foreign keys: 'A.restrict', " "'B3.restrict'." ) with self.assertRaisesMessage(RestrictedError, msg): a.restrict.delete() def test_restrict_path_cascade_indirect(self): a = create_a('restrict') a.restrict.p = P.objects.create() a.restrict.save() msg = ( "Cannot delete some instances of model 'P' because they are " "referenced through restricted foreign keys: 'A.restrict'." ) with self.assertRaisesMessage(RestrictedError, msg): a.restrict.p.delete() # Object referenced also with CASCADE relationship can be deleted. a.cascade.p = a.restrict.p a.cascade.save() a.restrict.p.delete() self.assertFalse(A.objects.filter(name='restrict').exists()) self.assertFalse(R.objects.filter(pk=a.restrict_id).exists()) def test_restrict_path_cascade_direct(self): a = create_a('restrict') a.restrict.p = P.objects.create() a.restrict.save() a.cascade_p = a.restrict.p a.save() a.restrict.p.delete() self.assertFalse(A.objects.filter(name='restrict').exists()) self.assertFalse(R.objects.filter(pk=a.restrict_id).exists()) def test_restrict_path_cascade_indirect_diamond(self): delete_top = DeleteTop.objects.create() b1 = B1.objects.create(delete_top=delete_top) b2 = B2.objects.create(delete_top=delete_top) DeleteBottom.objects.create(b1=b1, b2=b2) msg = ( "Cannot delete some instances of model 'B1' because they are " "referenced through restricted foreign keys: 'DeleteBottom.b1'." ) with self.assertRaisesMessage(RestrictedError, msg): b1.delete() self.assertTrue(DeleteTop.objects.exists()) self.assertTrue(B1.objects.exists()) self.assertTrue(B2.objects.exists()) self.assertTrue(DeleteBottom.objects.exists()) # Object referenced also with CASCADE relationship can be deleted. delete_top.delete() self.assertFalse(DeleteTop.objects.exists()) self.assertFalse(B1.objects.exists()) self.assertFalse(B2.objects.exists()) self.assertFalse(DeleteBottom.objects.exists()) def test_restrict_gfk_no_fast_delete(self): delete_top = DeleteTop.objects.create() generic_b1 = GenericB1.objects.create(generic_delete_top=delete_top) generic_b2 = GenericB2.objects.create(generic_delete_top=delete_top) GenericDeleteBottom.objects.create(generic_b1=generic_b1, generic_b2=generic_b2) msg = ( "Cannot delete some instances of model 'GenericB1' because they " "are referenced through restricted foreign keys: " "'GenericDeleteBottom.generic_b1'." ) with self.assertRaisesMessage(RestrictedError, msg): generic_b1.delete() self.assertTrue(DeleteTop.objects.exists()) self.assertTrue(GenericB1.objects.exists()) self.assertTrue(GenericB2.objects.exists()) self.assertTrue(GenericDeleteBottom.objects.exists()) # Object referenced also with CASCADE relationship can be deleted. delete_top.delete() self.assertFalse(DeleteTop.objects.exists()) self.assertFalse(GenericB1.objects.exists()) self.assertFalse(GenericB2.objects.exists()) self.assertFalse(GenericDeleteBottom.objects.exists()) class DeletionTests(TestCase): def test_m2m(self): m = M.objects.create() r = R.objects.create() MR.objects.create(m=m, r=r) r.delete() self.assertFalse(MR.objects.exists()) r = R.objects.create() MR.objects.create(m=m, r=r) m.delete() self.assertFalse(MR.objects.exists()) m = M.objects.create() r = R.objects.create() m.m2m.add(r) r.delete() through = M._meta.get_field('m2m').remote_field.through self.assertFalse(through.objects.exists()) r = R.objects.create() m.m2m.add(r) m.delete() self.assertFalse(through.objects.exists()) m = M.objects.create() r = R.objects.create() MRNull.objects.create(m=m, r=r) r.delete() self.assertFalse(not MRNull.objects.exists()) self.assertFalse(m.m2m_through_null.exists()) def test_bulk(self): s = S.objects.create(r=R.objects.create()) for i in range(2 * GET_ITERATOR_CHUNK_SIZE): T.objects.create(s=s) # 1 (select related `T` instances) # + 1 (select related `U` instances) # + 2 (delete `T` instances in batches) # + 1 (delete `s`) self.assertNumQueries(5, s.delete) self.assertFalse(S.objects.exists()) def test_instance_update(self): deleted = [] related_setnull_sets = [] def pre_delete(sender, **kwargs): obj = kwargs['instance'] deleted.append(obj) if isinstance(obj, R): related_setnull_sets.append([a.pk for a in obj.setnull_set.all()]) models.signals.pre_delete.connect(pre_delete) a = create_a('update_setnull') a.setnull.delete() a = create_a('update_cascade') a.cascade.delete() for obj in deleted: self.assertIsNone(obj.pk) for pk_list in related_setnull_sets: for a in A.objects.filter(id__in=pk_list): self.assertIsNone(a.setnull) models.signals.pre_delete.disconnect(pre_delete) def test_deletion_order(self): pre_delete_order = [] post_delete_order = [] def log_post_delete(sender, **kwargs): pre_delete_order.append((sender, kwargs['instance'].pk)) def log_pre_delete(sender, **kwargs): post_delete_order.append((sender, kwargs['instance'].pk)) models.signals.post_delete.connect(log_post_delete) models.signals.pre_delete.connect(log_pre_delete) r = R.objects.create(pk=1) s1 = S.objects.create(pk=1, r=r) s2 = S.objects.create(pk=2, r=r) T.objects.create(pk=1, s=s1) T.objects.create(pk=2, s=s2) RChild.objects.create(r_ptr=r) r.delete() self.assertEqual( pre_delete_order, [(T, 2), (T, 1), (RChild, 1), (S, 2), (S, 1), (R, 1)] ) self.assertEqual( post_delete_order, [(T, 1), (T, 2), (RChild, 1), (S, 1), (S, 2), (R, 1)] ) models.signals.post_delete.disconnect(log_post_delete) models.signals.pre_delete.disconnect(log_pre_delete) def test_relational_post_delete_signals_happen_before_parent_object(self): deletions = [] def log_post_delete(instance, **kwargs): self.assertTrue(R.objects.filter(pk=instance.r_id)) self.assertIs(type(instance), S) deletions.append(instance.id) r = R.objects.create(pk=1) S.objects.create(pk=1, r=r) models.signals.post_delete.connect(log_post_delete, sender=S) try: r.delete() finally: models.signals.post_delete.disconnect(log_post_delete) self.assertEqual(len(deletions), 1) self.assertEqual(deletions[0], 1) @skipUnlessDBFeature("can_defer_constraint_checks") def test_can_defer_constraint_checks(self): u = User.objects.create( avatar=Avatar.objects.create() ) a = Avatar.objects.get(pk=u.avatar_id) # 1 query to find the users for the avatar. # 1 query to delete the user # 1 query to delete the avatar # The important thing is that when we can defer constraint checks there # is no need to do an UPDATE on User.avatar to null it out. # Attach a signal to make sure we will not do fast_deletes. calls = [] def noop(*args, **kwargs): calls.append('') models.signals.post_delete.connect(noop, sender=User) self.assertNumQueries(3, a.delete) self.assertFalse(User.objects.exists()) self.assertFalse(Avatar.objects.exists()) self.assertEqual(len(calls), 1) models.signals.post_delete.disconnect(noop, sender=User) @skipIfDBFeature("can_defer_constraint_checks") def test_cannot_defer_constraint_checks(self): u = User.objects.create( avatar=Avatar.objects.create() ) # Attach a signal to make sure we will not do fast_deletes. calls = [] def noop(*args, **kwargs): calls.append('') models.signals.post_delete.connect(noop, sender=User) a = Avatar.objects.get(pk=u.avatar_id) # The below doesn't make sense... Why do we need to null out # user.avatar if we are going to delete the user immediately after it, # and there are no more cascades. # 1 query to find the users for the avatar. # 1 query to delete the user # 1 query to null out user.avatar, because we can't defer the constraint # 1 query to delete the avatar self.assertNumQueries(4, a.delete) self.assertFalse(User.objects.exists()) self.assertFalse(Avatar.objects.exists()) self.assertEqual(len(calls), 1) models.signals.post_delete.disconnect(noop, sender=User) def test_hidden_related(self): r = R.objects.create() h = HiddenUser.objects.create(r=r) HiddenUserProfile.objects.create(user=h) r.delete() self.assertEqual(HiddenUserProfile.objects.count(), 0) def test_large_delete(self): TEST_SIZE = 2000 objs = [Avatar() for i in range(0, TEST_SIZE)] Avatar.objects.bulk_create(objs) # Calculate the number of queries needed. batch_size = connection.ops.bulk_batch_size(['pk'], objs) # The related fetches are done in batches. batches = ceil(len(objs) / batch_size) # One query for Avatar.objects.all() and then one related fast delete for # each batch. fetches_to_mem = 1 + batches # The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE self.assertNumQueries(queries, Avatar.objects.all().delete) self.assertFalse(Avatar.objects.exists()) def test_large_delete_related(self): TEST_SIZE = 2000 s = S.objects.create(r=R.objects.create()) for i in range(TEST_SIZE): T.objects.create(s=s) batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1) # TEST_SIZE / batch_size (select related `T` instances) # + 1 (select related `U` instances) # + TEST_SIZE / GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches) # + 1 (delete `s`) expected_num_queries = ceil(TEST_SIZE / batch_size) expected_num_queries += ceil(TEST_SIZE / GET_ITERATOR_CHUNK_SIZE) + 2 self.assertNumQueries(expected_num_queries, s.delete) self.assertFalse(S.objects.exists()) self.assertFalse(T.objects.exists()) def test_delete_with_keeping_parents(self): child = RChild.objects.create() parent_id = child.r_ptr_id child.delete(keep_parents=True) self.assertFalse(RChild.objects.filter(id=child.id).exists()) self.assertTrue(R.objects.filter(id=parent_id).exists()) def test_delete_with_keeping_parents_relationships(self): child = RChild.objects.create() parent_id = child.r_ptr_id parent_referent_id = S.objects.create(r=child.r_ptr).pk child.delete(keep_parents=True) self.assertFalse(RChild.objects.filter(id=child.id).exists()) self.assertTrue(R.objects.filter(id=parent_id).exists()) self.assertTrue(S.objects.filter(pk=parent_referent_id).exists()) childchild = RChildChild.objects.create() parent_id = childchild.rchild_ptr.r_ptr_id child_id = childchild.rchild_ptr_id parent_referent_id = S.objects.create(r=childchild.rchild_ptr.r_ptr).pk childchild.delete(keep_parents=True) self.assertFalse(RChildChild.objects.filter(id=childchild.id).exists()) self.assertTrue(RChild.objects.filter(id=child_id).exists()) self.assertTrue(R.objects.filter(id=parent_id).exists()) self.assertTrue(S.objects.filter(pk=parent_referent_id).exists()) def test_queryset_delete_returns_num_rows(self): """ QuerySet.delete() should return the number of deleted rows and a dictionary with the number of deletions for each object type. """ Avatar.objects.bulk_create([Avatar(desc='a'), Avatar(desc='b'), Avatar(desc='c')]) avatars_count = Avatar.objects.count() deleted, rows_count = Avatar.objects.all().delete() self.assertEqual(deleted, avatars_count) # more complex example with multiple object types r = R.objects.create() h1 = HiddenUser.objects.create(r=r) HiddenUser.objects.create(r=r) HiddenUserProfile.objects.create(user=h1) existed_objs = { R._meta.label: R.objects.count(), HiddenUser._meta.label: HiddenUser.objects.count(), A._meta.label: A.objects.count(), MR._meta.label: MR.objects.count(), HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(), } deleted, deleted_objs = R.objects.all().delete() for k, v in existed_objs.items(): self.assertEqual(deleted_objs[k], v) def test_model_delete_returns_num_rows(self): """ Model.delete() should return the number of deleted rows and a dictionary with the number of deletions for each object type. """ r = R.objects.create() h1 = HiddenUser.objects.create(r=r) h2 = HiddenUser.objects.create(r=r) HiddenUser.objects.create(r=r) HiddenUserProfile.objects.create(user=h1) HiddenUserProfile.objects.create(user=h2) m1 = M.objects.create() m2 = M.objects.create() MR.objects.create(r=r, m=m1) r.m_set.add(m1) r.m_set.add(m2) r.save() existed_objs = { R._meta.label: R.objects.count(), HiddenUser._meta.label: HiddenUser.objects.count(), A._meta.label: A.objects.count(), MR._meta.label: MR.objects.count(), HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(), M.m2m.through._meta.label: M.m2m.through.objects.count(), } deleted, deleted_objs = r.delete() self.assertEqual(deleted, sum(existed_objs.values())) for k, v in existed_objs.items(): self.assertEqual(deleted_objs[k], v) def test_proxied_model_duplicate_queries(self): """ #25685 - Deleting instances of a model with existing proxy classes should not issue multiple queries during cascade deletion of referring models. """ avatar = Avatar.objects.create() # One query for the Avatar table and a second for the User one. with self.assertNumQueries(2): avatar.delete() def test_only_referenced_fields_selected(self): """ Only referenced fields are selected during cascade deletion SELECT unless deletion signals are connected. """ origin = Origin.objects.create() expected_sql = str( Referrer.objects.only( # Both fields are referenced by SecondReferrer. 'id', 'unique_field', ).filter(origin__in=[origin]).query ) with self.assertNumQueries(2) as ctx: origin.delete() self.assertEqual(ctx.captured_queries[0]['sql'], expected_sql) def receiver(instance, **kwargs): pass # All fields are selected if deletion signals are connected. for signal_name in ('pre_delete', 'post_delete'): with self.subTest(signal=signal_name): origin = Origin.objects.create() signal = getattr(models.signals, signal_name) signal.connect(receiver, sender=Referrer) with self.assertNumQueries(2) as ctx: origin.delete() self.assertIn( connection.ops.quote_name('large_field'), ctx.captured_queries[0]['sql'], ) signal.disconnect(receiver, sender=Referrer) class FastDeleteTests(TestCase): def test_fast_delete_fk(self): u = User.objects.create( avatar=Avatar.objects.create() ) a = Avatar.objects.get(pk=u.avatar_id) # 1 query to fast-delete the user # 1 query to delete the avatar self.assertNumQueries(2, a.delete) self.assertFalse(User.objects.exists()) self.assertFalse(Avatar.objects.exists()) def test_fast_delete_m2m(self): t = M2MTo.objects.create() f = M2MFrom.objects.create() f.m2m.add(t) # 1 to delete f, 1 to fast-delete m2m for f self.assertNumQueries(2, f.delete) def test_fast_delete_revm2m(self): t = M2MTo.objects.create() f = M2MFrom.objects.create() f.m2m.add(t) # 1 to delete t, 1 to fast-delete t's m_set self.assertNumQueries(2, f.delete) def test_fast_delete_qs(self): u1 = User.objects.create() u2 = User.objects.create() self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete) self.assertEqual(User.objects.count(), 1) self.assertTrue(User.objects.filter(pk=u2.pk).exists()) def test_fast_delete_instance_set_pk_none(self): u = User.objects.create() # User can be fast-deleted. collector = Collector(using='default') self.assertTrue(collector.can_fast_delete(u)) u.delete() self.assertIsNone(u.pk) def test_fast_delete_joined_qs(self): a = Avatar.objects.create(desc='a') User.objects.create(avatar=a) u2 = User.objects.create() self.assertNumQueries(1, User.objects.filter(avatar__desc='a').delete) self.assertEqual(User.objects.count(), 1) self.assertTrue(User.objects.filter(pk=u2.pk).exists()) def test_fast_delete_inheritance(self): c = Child.objects.create() p = Parent.objects.create() # 1 for self, 1 for parent self.assertNumQueries(2, c.delete) self.assertFalse(Child.objects.exists()) self.assertEqual(Parent.objects.count(), 1) self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1) # 1 for self delete, 1 for fast delete of empty "child" qs. self.assertNumQueries(2, p.delete) self.assertFalse(Parent.objects.exists()) # 1 for self delete, 1 for fast delete of empty "child" qs. c = Child.objects.create() p = c.parent_ptr self.assertNumQueries(2, p.delete) self.assertFalse(Parent.objects.exists()) self.assertFalse(Child.objects.exists()) def test_fast_delete_large_batch(self): User.objects.bulk_create(User() for i in range(0, 2000)) # No problems here - we aren't going to cascade, so we will fast # delete the objects in a single query. self.assertNumQueries(1, User.objects.all().delete) a = Avatar.objects.create(desc='a') User.objects.bulk_create(User(avatar=a) for i in range(0, 2000)) # We don't hit parameter amount limits for a, so just one query for # that + fast delete of the related objs. self.assertNumQueries(2, a.delete) self.assertEqual(User.objects.count(), 0) def test_fast_delete_empty_no_update_can_self_select(self): """ #25932 - Fast deleting on backends that don't have the `no_update_can_self_select` feature should work even if the specified filter doesn't match any row. """ with self.assertNumQueries(1): self.assertEqual( User.objects.filter(avatar__desc='missing').delete(), (0, {'delete.User': 0}) ) def test_fast_delete_combined_relationships(self): # The cascading fast-delete of SecondReferrer should be combined # in a single DELETE WHERE referrer_id OR unique_field. origin = Origin.objects.create() referer = Referrer.objects.create(origin=origin, unique_field=42) with self.assertNumQueries(2): referer.delete()
a89bce54c6626c7e41d9499e5cfb1596f7ea1e326728ac38d4b1c11103a43ab4
from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.db import models class P(models.Model): pass class R(models.Model): is_default = models.BooleanField(default=False) p = models.ForeignKey(P, models.CASCADE, null=True) def __str__(self): return "%s" % self.pk def get_default_r(): return R.objects.get_or_create(is_default=True)[0].pk class S(models.Model): r = models.ForeignKey(R, models.CASCADE) class T(models.Model): s = models.ForeignKey(S, models.CASCADE) class U(models.Model): t = models.ForeignKey(T, models.CASCADE) class RChild(R): pass class RChildChild(RChild): pass class A(models.Model): name = models.CharField(max_length=30) auto = models.ForeignKey(R, models.CASCADE, related_name="auto_set") auto_nullable = models.ForeignKey(R, models.CASCADE, null=True, related_name='auto_nullable_set') setvalue = models.ForeignKey(R, models.SET(get_default_r), related_name='setvalue') setnull = models.ForeignKey(R, models.SET_NULL, null=True, related_name='setnull_set') setdefault = models.ForeignKey(R, models.SET_DEFAULT, default=get_default_r, related_name='setdefault_set') setdefault_none = models.ForeignKey( R, models.SET_DEFAULT, default=None, null=True, related_name='setnull_nullable_set', ) cascade = models.ForeignKey(R, models.CASCADE, related_name='cascade_set') cascade_nullable = models.ForeignKey(R, models.CASCADE, null=True, related_name='cascade_nullable_set') protect = models.ForeignKey(R, models.PROTECT, null=True, related_name='protect_set') restrict = models.ForeignKey(R, models.RESTRICT, null=True, related_name='restrict_set') donothing = models.ForeignKey(R, models.DO_NOTHING, null=True, related_name='donothing_set') child = models.ForeignKey(RChild, models.CASCADE, related_name="child") child_setnull = models.ForeignKey(RChild, models.SET_NULL, null=True, related_name="child_setnull") cascade_p = models.ForeignKey(P, models.CASCADE, related_name='cascade_p_set', null=True) # A OneToOneField is just a ForeignKey unique=True, so we don't duplicate # all the tests; just one smoke test to ensure on_delete works for it as # well. o2o_setnull = models.ForeignKey(R, models.SET_NULL, null=True, related_name="o2o_nullable_set") class B(models.Model): protect = models.ForeignKey(R, models.PROTECT) def create_a(name): a = A(name=name) for name in ('auto', 'auto_nullable', 'setvalue', 'setnull', 'setdefault', 'setdefault_none', 'cascade', 'cascade_nullable', 'protect', 'restrict', 'donothing', 'o2o_setnull'): r = R.objects.create() setattr(a, name, r) a.child = RChild.objects.create() a.child_setnull = RChild.objects.create() a.save() return a class M(models.Model): m2m = models.ManyToManyField(R, related_name="m_set") m2m_through = models.ManyToManyField(R, through="MR", related_name="m_through_set") m2m_through_null = models.ManyToManyField(R, through="MRNull", related_name="m_through_null_set") class MR(models.Model): m = models.ForeignKey(M, models.CASCADE) r = models.ForeignKey(R, models.CASCADE) class MRNull(models.Model): m = models.ForeignKey(M, models.CASCADE) r = models.ForeignKey(R, models.SET_NULL, null=True) class Avatar(models.Model): desc = models.TextField(null=True) # This model is used to test a duplicate query regression (#25685) class AvatarProxy(Avatar): class Meta: proxy = True class User(models.Model): avatar = models.ForeignKey(Avatar, models.CASCADE, null=True) class HiddenUser(models.Model): r = models.ForeignKey(R, models.CASCADE, related_name="+") class HiddenUserProfile(models.Model): user = models.ForeignKey(HiddenUser, models.CASCADE) class M2MTo(models.Model): pass class M2MFrom(models.Model): m2m = models.ManyToManyField(M2MTo) class Parent(models.Model): pass class Child(Parent): pass class Base(models.Model): pass class RelToBase(models.Model): base = models.ForeignKey(Base, models.DO_NOTHING) class Origin(models.Model): pass class Referrer(models.Model): origin = models.ForeignKey(Origin, models.CASCADE) unique_field = models.IntegerField(unique=True) large_field = models.TextField() class SecondReferrer(models.Model): referrer = models.ForeignKey(Referrer, models.CASCADE) other_referrer = models.ForeignKey( Referrer, models.CASCADE, to_field='unique_field', related_name='+' ) class DeleteTop(models.Model): b1 = GenericRelation('GenericB1') b2 = GenericRelation('GenericB2') class B1(models.Model): delete_top = models.ForeignKey(DeleteTop, models.CASCADE) class B2(models.Model): delete_top = models.ForeignKey(DeleteTop, models.CASCADE) class B3(models.Model): restrict = models.ForeignKey(R, models.RESTRICT) class DeleteBottom(models.Model): b1 = models.ForeignKey(B1, models.RESTRICT) b2 = models.ForeignKey(B2, models.CASCADE) class GenericB1(models.Model): content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) object_id = models.PositiveIntegerField() generic_delete_top = GenericForeignKey('content_type', 'object_id') class GenericB2(models.Model): content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) object_id = models.PositiveIntegerField() generic_delete_top = GenericForeignKey('content_type', 'object_id') generic_delete_bottom = GenericRelation('GenericDeleteBottom') class GenericDeleteBottom(models.Model): generic_b1 = models.ForeignKey(GenericB1, models.RESTRICT) content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) object_id = models.PositiveIntegerField() generic_b2 = GenericForeignKey() class GenericDeleteBottomParent(models.Model): generic_delete_bottom = models.ForeignKey(GenericDeleteBottom, on_delete=models.CASCADE)
b5c63a4da52d6a649e1256113e3a402cbca748669de89aeea1f72dffc5f4bfec
from unittest import mock from django.core.checks import Error, Warning as DjangoWarning from django.db import connection, models from django.test.testcases import SimpleTestCase from django.test.utils import isolate_apps, override_settings @isolate_apps('invalid_models_tests') class RelativeFieldTests(SimpleTestCase): def test_valid_foreign_key_without_accessor(self): class Target(models.Model): # There would be a clash if Model.field installed an accessor. model = models.IntegerField() class Model(models.Model): field = models.ForeignKey(Target, models.CASCADE, related_name='+') field = Model._meta.get_field('field') self.assertEqual(field.check(), []) def test_foreign_key_to_missing_model(self): # Model names are resolved when a model is being created, so we cannot # test relative fields in isolation and we need to attach them to a # model. class Model(models.Model): foreign_key = models.ForeignKey('Rel1', models.CASCADE) field = Model._meta.get_field('foreign_key') self.assertEqual(field.check(), [ Error( "Field defines a relation with model 'Rel1', " "which is either not installed, or is abstract.", obj=field, id='fields.E300', ), ]) @isolate_apps('invalid_models_tests') def test_foreign_key_to_isolate_apps_model(self): """ #25723 - Referenced model registration lookup should be run against the field's model registry. """ class OtherModel(models.Model): pass class Model(models.Model): foreign_key = models.ForeignKey('OtherModel', models.CASCADE) field = Model._meta.get_field('foreign_key') self.assertEqual(field.check(from_model=Model), []) def test_many_to_many_to_missing_model(self): class Model(models.Model): m2m = models.ManyToManyField("Rel2") field = Model._meta.get_field('m2m') self.assertEqual(field.check(from_model=Model), [ Error( "Field defines a relation with model 'Rel2', " "which is either not installed, or is abstract.", obj=field, id='fields.E300', ), ]) @isolate_apps('invalid_models_tests') def test_many_to_many_to_isolate_apps_model(self): """ #25723 - Referenced model registration lookup should be run against the field's model registry. """ class OtherModel(models.Model): pass class Model(models.Model): m2m = models.ManyToManyField('OtherModel') field = Model._meta.get_field('m2m') self.assertEqual(field.check(from_model=Model), []) def test_many_to_many_with_limit_choices_auto_created_no_warning(self): class Model(models.Model): name = models.CharField(max_length=20) class ModelM2M(models.Model): m2m = models.ManyToManyField(Model, limit_choices_to={'name': 'test_name'}) self.assertEqual(ModelM2M.check(), []) def test_many_to_many_with_useless_options(self): class Model(models.Model): name = models.CharField(max_length=20) class ModelM2M(models.Model): m2m = models.ManyToManyField( Model, null=True, validators=[lambda x: x], limit_choices_to={'name': 'test_name'}, through='ThroughModel', through_fields=('modelm2m', 'model'), ) class ThroughModel(models.Model): model = models.ForeignKey('Model', models.CASCADE) modelm2m = models.ForeignKey('ModelM2M', models.CASCADE) field = ModelM2M._meta.get_field('m2m') self.assertEqual(ModelM2M.check(), [ DjangoWarning( 'null has no effect on ManyToManyField.', obj=field, id='fields.W340', ), DjangoWarning( 'ManyToManyField does not support validators.', obj=field, id='fields.W341', ), DjangoWarning( 'limit_choices_to has no effect on ManyToManyField ' 'with a through model.', obj=field, id='fields.W343', ), ]) def test_ambiguous_relationship_model(self): class Person(models.Model): pass class Group(models.Model): field = models.ManyToManyField('Person', through="AmbiguousRelationship", related_name='tertiary') class AmbiguousRelationship(models.Model): # Too much foreign keys to Person. first_person = models.ForeignKey(Person, models.CASCADE, related_name="first") second_person = models.ForeignKey(Person, models.CASCADE, related_name="second") second_model = models.ForeignKey(Group, models.CASCADE) field = Group._meta.get_field('field') self.assertEqual(field.check(from_model=Group), [ Error( "The model is used as an intermediate model by " "'invalid_models_tests.Group.field', but it has more than one " "foreign key to 'Person', which is ambiguous. You must specify " "which foreign key Django should use via the through_fields " "keyword argument.", hint=( 'If you want to create a recursive relationship, use ' 'ForeignKey("self", symmetrical=False, through="AmbiguousRelationship").' ), obj=field, id='fields.E335', ), ]) def test_relationship_model_with_foreign_key_to_wrong_model(self): class WrongModel(models.Model): pass class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through="InvalidRelationship") class InvalidRelationship(models.Model): person = models.ForeignKey(Person, models.CASCADE) wrong_foreign_key = models.ForeignKey(WrongModel, models.CASCADE) # The last foreign key should point to Group model. field = Group._meta.get_field('members') self.assertEqual(field.check(from_model=Group), [ Error( "The model is used as an intermediate model by " "'invalid_models_tests.Group.members', but it does not " "have a foreign key to 'Group' or 'Person'.", obj=InvalidRelationship, id='fields.E336', ), ]) def test_relationship_model_missing_foreign_key(self): class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through="InvalidRelationship") class InvalidRelationship(models.Model): group = models.ForeignKey(Group, models.CASCADE) # No foreign key to Person field = Group._meta.get_field('members') self.assertEqual(field.check(from_model=Group), [ Error( "The model is used as an intermediate model by " "'invalid_models_tests.Group.members', but it does not have " "a foreign key to 'Group' or 'Person'.", obj=InvalidRelationship, id='fields.E336', ), ]) def test_missing_relationship_model(self): class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through="MissingM2MModel") field = Group._meta.get_field('members') self.assertEqual(field.check(from_model=Group), [ Error( "Field specifies a many-to-many relation through model " "'MissingM2MModel', which has not been installed.", obj=field, id='fields.E331', ), ]) def test_missing_relationship_model_on_model_check(self): class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through='MissingM2MModel') self.assertEqual(Group.check(), [ Error( "Field specifies a many-to-many relation through model " "'MissingM2MModel', which has not been installed.", obj=Group._meta.get_field('members'), id='fields.E331', ), ]) @isolate_apps('invalid_models_tests') def test_many_to_many_through_isolate_apps_model(self): """ #25723 - Through model registration lookup should be run against the field's model registry. """ class GroupMember(models.Model): person = models.ForeignKey('Person', models.CASCADE) group = models.ForeignKey('Group', models.CASCADE) class Person(models.Model): pass class Group(models.Model): members = models.ManyToManyField('Person', through='GroupMember') field = Group._meta.get_field('members') self.assertEqual(field.check(from_model=Group), []) def test_too_many_foreign_keys_in_self_referential_model(self): class Person(models.Model): friends = models.ManyToManyField('self', through="InvalidRelationship", symmetrical=False) class InvalidRelationship(models.Model): first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set_2") second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set_2") third = models.ForeignKey(Person, models.CASCADE, related_name="too_many_by_far") field = Person._meta.get_field('friends') self.assertEqual(field.check(from_model=Person), [ Error( "The model is used as an intermediate model by " "'invalid_models_tests.Person.friends', but it has more than two " "foreign keys to 'Person', which is ambiguous. You must specify " "which two foreign keys Django should use via the through_fields " "keyword argument.", hint='Use through_fields to specify which two foreign keys Django should use.', obj=InvalidRelationship, id='fields.E333', ), ]) def test_foreign_key_to_abstract_model(self): class AbstractModel(models.Model): class Meta: abstract = True class Model(models.Model): rel_string_foreign_key = models.ForeignKey('AbstractModel', models.CASCADE) rel_class_foreign_key = models.ForeignKey(AbstractModel, models.CASCADE) fields = [ Model._meta.get_field('rel_string_foreign_key'), Model._meta.get_field('rel_class_foreign_key'), ] expected_error = Error( "Field defines a relation with model 'AbstractModel', " "which is either not installed, or is abstract.", id='fields.E300', ) for field in fields: expected_error.obj = field self.assertEqual(field.check(), [expected_error]) def test_m2m_to_abstract_model(self): class AbstractModel(models.Model): class Meta: abstract = True class Model(models.Model): rel_string_m2m = models.ManyToManyField('AbstractModel') rel_class_m2m = models.ManyToManyField(AbstractModel) fields = [ Model._meta.get_field('rel_string_m2m'), Model._meta.get_field('rel_class_m2m'), ] expected_error = Error( "Field defines a relation with model 'AbstractModel', " "which is either not installed, or is abstract.", id='fields.E300', ) for field in fields: expected_error.obj = field self.assertEqual(field.check(from_model=Model), [expected_error]) def test_unique_m2m(self): class Person(models.Model): name = models.CharField(max_length=5) class Group(models.Model): members = models.ManyToManyField('Person', unique=True) field = Group._meta.get_field('members') self.assertEqual(field.check(from_model=Group), [ Error( 'ManyToManyFields cannot be unique.', obj=field, id='fields.E330', ), ]) def test_foreign_key_to_non_unique_field(self): class Target(models.Model): bad = models.IntegerField() # No unique=True class Model(models.Model): foreign_key = models.ForeignKey('Target', models.CASCADE, to_field='bad') field = Model._meta.get_field('foreign_key') self.assertEqual(field.check(), [ Error( "'Target.bad' must set unique=True because it is referenced by a foreign key.", obj=field, id='fields.E311', ), ]) def test_foreign_key_to_non_unique_field_under_explicit_model(self): class Target(models.Model): bad = models.IntegerField() class Model(models.Model): field = models.ForeignKey(Target, models.CASCADE, to_field='bad') field = Model._meta.get_field('field') self.assertEqual(field.check(), [ Error( "'Target.bad' must set unique=True because it is referenced by a foreign key.", obj=field, id='fields.E311', ), ]) def test_foreign_object_to_non_unique_fields(self): class Person(models.Model): # Note that both fields are not unique. country_id = models.IntegerField() city_id = models.IntegerField() class MMembership(models.Model): person_country_id = models.IntegerField() person_city_id = models.IntegerField() person = models.ForeignObject( Person, on_delete=models.CASCADE, from_fields=['person_country_id', 'person_city_id'], to_fields=['country_id', 'city_id'], ) field = MMembership._meta.get_field('person') self.assertEqual(field.check(), [ Error( "No subset of the fields 'country_id', 'city_id' on model 'Person' is unique.", hint=( "Add unique=True on any of those fields or add at least " "a subset of them to a unique_together constraint." ), obj=field, id='fields.E310', ) ]) def test_on_delete_set_null_on_non_nullable_field(self): class Person(models.Model): pass class Model(models.Model): foreign_key = models.ForeignKey('Person', models.SET_NULL) field = Model._meta.get_field('foreign_key') self.assertEqual(field.check(), [ Error( 'Field specifies on_delete=SET_NULL, but cannot be null.', hint='Set null=True argument on the field, or change the on_delete rule.', obj=field, id='fields.E320', ), ]) def test_on_delete_set_default_without_default_value(self): class Person(models.Model): pass class Model(models.Model): foreign_key = models.ForeignKey('Person', models.SET_DEFAULT) field = Model._meta.get_field('foreign_key') self.assertEqual(field.check(), [ Error( 'Field specifies on_delete=SET_DEFAULT, but has no default value.', hint='Set a default value, or change the on_delete rule.', obj=field, id='fields.E321', ), ]) def test_nullable_primary_key(self): class Model(models.Model): field = models.IntegerField(primary_key=True, null=True) field = Model._meta.get_field('field') with mock.patch.object(connection.features, 'interprets_empty_strings_as_nulls', False): results = field.check() self.assertEqual(results, [ Error( 'Primary keys must not have null=True.', hint='Set null=False on the field, or remove primary_key=True argument.', obj=field, id='fields.E007', ), ]) def test_not_swapped_model(self): class SwappableModel(models.Model): # A model that can be, but isn't swapped out. References to this # model should *not* raise any validation error. class Meta: swappable = 'TEST_SWAPPABLE_MODEL' class Model(models.Model): explicit_fk = models.ForeignKey( SwappableModel, models.CASCADE, related_name='explicit_fk', ) implicit_fk = models.ForeignKey( 'invalid_models_tests.SwappableModel', models.CASCADE, related_name='implicit_fk', ) explicit_m2m = models.ManyToManyField(SwappableModel, related_name='explicit_m2m') implicit_m2m = models.ManyToManyField( 'invalid_models_tests.SwappableModel', related_name='implicit_m2m', ) explicit_fk = Model._meta.get_field('explicit_fk') self.assertEqual(explicit_fk.check(), []) implicit_fk = Model._meta.get_field('implicit_fk') self.assertEqual(implicit_fk.check(), []) explicit_m2m = Model._meta.get_field('explicit_m2m') self.assertEqual(explicit_m2m.check(from_model=Model), []) implicit_m2m = Model._meta.get_field('implicit_m2m') self.assertEqual(implicit_m2m.check(from_model=Model), []) @override_settings(TEST_SWAPPED_MODEL='invalid_models_tests.Replacement') def test_referencing_to_swapped_model(self): class Replacement(models.Model): pass class SwappedModel(models.Model): class Meta: swappable = 'TEST_SWAPPED_MODEL' class Model(models.Model): explicit_fk = models.ForeignKey( SwappedModel, models.CASCADE, related_name='explicit_fk', ) implicit_fk = models.ForeignKey( 'invalid_models_tests.SwappedModel', models.CASCADE, related_name='implicit_fk', ) explicit_m2m = models.ManyToManyField(SwappedModel, related_name='explicit_m2m') implicit_m2m = models.ManyToManyField( 'invalid_models_tests.SwappedModel', related_name='implicit_m2m', ) fields = [ Model._meta.get_field('explicit_fk'), Model._meta.get_field('implicit_fk'), Model._meta.get_field('explicit_m2m'), Model._meta.get_field('implicit_m2m'), ] expected_error = Error( ("Field defines a relation with the model " "'invalid_models_tests.SwappedModel', which has been swapped out."), hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.", id='fields.E301', ) for field in fields: expected_error.obj = field self.assertEqual(field.check(from_model=Model), [expected_error]) def test_related_field_has_invalid_related_name(self): digit = 0 illegal_non_alphanumeric = '!' whitespace = '\t' invalid_related_names = [ '%s_begins_with_digit' % digit, '%s_begins_with_illegal_non_alphanumeric' % illegal_non_alphanumeric, '%s_begins_with_whitespace' % whitespace, 'contains_%s_illegal_non_alphanumeric' % illegal_non_alphanumeric, 'contains_%s_whitespace' % whitespace, 'ends_with_with_illegal_non_alphanumeric_%s' % illegal_non_alphanumeric, 'ends_with_whitespace_%s' % whitespace, 'with', # a Python keyword 'related_name\n', '', ',', # non-ASCII ] class Parent(models.Model): pass for invalid_related_name in invalid_related_names: Child = type('Child%s' % invalid_related_name, (models.Model,), { 'parent': models.ForeignKey('Parent', models.CASCADE, related_name=invalid_related_name), '__module__': Parent.__module__, }) field = Child._meta.get_field('parent') self.assertEqual(Child.check(), [ Error( "The name '%s' is invalid related_name for field Child%s.parent" % (invalid_related_name, invalid_related_name), hint="Related name must be a valid Python identifier or end with a '+'", obj=field, id='fields.E306', ), ]) def test_related_field_has_valid_related_name(self): lowercase = 'a' uppercase = 'A' digit = 0 related_names = [ '%s_starts_with_lowercase' % lowercase, '%s_tarts_with_uppercase' % uppercase, '_starts_with_underscore', 'contains_%s_digit' % digit, 'ends_with_plus+', '_+', '+', '試', '試驗+', ] class Parent(models.Model): pass for related_name in related_names: Child = type('Child%s' % related_name, (models.Model,), { 'parent': models.ForeignKey('Parent', models.CASCADE, related_name=related_name), '__module__': Parent.__module__, }) self.assertEqual(Child.check(), []) def test_to_fields_exist(self): class Parent(models.Model): pass class Child(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() parent = models.ForeignObject( Parent, on_delete=models.SET_NULL, from_fields=('a', 'b'), to_fields=('a', 'b'), ) field = Child._meta.get_field('parent') self.assertEqual(field.check(), [ Error( "The to_field 'a' doesn't exist on the related model 'invalid_models_tests.Parent'.", obj=field, id='fields.E312', ), Error( "The to_field 'b' doesn't exist on the related model 'invalid_models_tests.Parent'.", obj=field, id='fields.E312', ), ]) def test_to_fields_not_checked_if_related_model_doesnt_exist(self): class Child(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() parent = models.ForeignObject( 'invalid_models_tests.Parent', on_delete=models.SET_NULL, from_fields=('a', 'b'), to_fields=('a', 'b'), ) field = Child._meta.get_field('parent') self.assertEqual(field.check(), [ Error( "Field defines a relation with model 'invalid_models_tests.Parent', " "which is either not installed, or is abstract.", id='fields.E300', obj=field, ), ]) def test_invalid_related_query_name(self): class Target(models.Model): pass class Model(models.Model): first = models.ForeignKey(Target, models.CASCADE, related_name='contains__double') second = models.ForeignKey(Target, models.CASCADE, related_query_name='ends_underscore_') self.assertEqual(Model.check(), [ Error( "Reverse query name 'contains__double' must not contain '__'.", hint=("Add or change a related_name or related_query_name " "argument for this field."), obj=Model._meta.get_field('first'), id='fields.E309', ), Error( "Reverse query name 'ends_underscore_' must not end with an " "underscore.", hint=("Add or change a related_name or related_query_name " "argument for this field."), obj=Model._meta.get_field('second'), id='fields.E308', ), ]) @isolate_apps('invalid_models_tests') class AccessorClashTests(SimpleTestCase): def test_fk_to_integer(self): self._test_accessor_clash( target=models.IntegerField(), relative=models.ForeignKey('Target', models.CASCADE)) def test_fk_to_fk(self): self._test_accessor_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ForeignKey('Target', models.CASCADE)) def test_fk_to_m2m(self): self._test_accessor_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey('Target', models.CASCADE)) def test_m2m_to_integer(self): self._test_accessor_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target')) def test_m2m_to_fk(self): self._test_accessor_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ManyToManyField('Target')) def test_m2m_to_m2m(self): self._test_accessor_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField('Target')) def _test_accessor_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): model_set = target class Model(models.Model): rel = relative self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.rel' clashes with field name 'Target.model_set'.", hint=("Rename field 'Target.model_set', or add/change " "a related_name argument to the definition " "for field 'Model.rel'."), obj=Model._meta.get_field('rel'), id='fields.E302', ), ]) def test_clash_between_accessors(self): class Target(models.Model): pass class Model(models.Model): foreign = models.ForeignKey(Target, models.CASCADE) m2m = models.ManyToManyField(Target) self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.foreign' clashes with reverse accessor for 'Model.m2m'.", hint=( "Add or change a related_name argument to the definition " "for 'Model.foreign' or 'Model.m2m'." ), obj=Model._meta.get_field('foreign'), id='fields.E304', ), Error( "Reverse accessor for 'Model.m2m' clashes with reverse accessor for 'Model.foreign'.", hint=( "Add or change a related_name argument to the definition " "for 'Model.m2m' or 'Model.foreign'." ), obj=Model._meta.get_field('m2m'), id='fields.E304', ), ]) def test_m2m_to_m2m_with_inheritance(self): """ Ref #22047. """ class Target(models.Model): pass class Model(models.Model): children = models.ManyToManyField('Child', related_name="m2m_clash", related_query_name="no_clash") class Parent(models.Model): m2m_clash = models.ManyToManyField('Target') class Child(Parent): pass self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.children' clashes with field name 'Child.m2m_clash'.", hint=( "Rename field 'Child.m2m_clash', or add/change a related_name " "argument to the definition for field 'Model.children'." ), obj=Model._meta.get_field('children'), id='fields.E302', ) ]) def test_no_clash_for_hidden_related_name(self): class Stub(models.Model): pass class ManyToManyRel(models.Model): thing1 = models.ManyToManyField(Stub, related_name='+') thing2 = models.ManyToManyField(Stub, related_name='+') class FKRel(models.Model): thing1 = models.ForeignKey(Stub, models.CASCADE, related_name='+') thing2 = models.ForeignKey(Stub, models.CASCADE, related_name='+') self.assertEqual(ManyToManyRel.check(), []) self.assertEqual(FKRel.check(), []) @isolate_apps('invalid_models_tests') class ReverseQueryNameClashTests(SimpleTestCase): def test_fk_to_integer(self): self._test_reverse_query_name_clash( target=models.IntegerField(), relative=models.ForeignKey('Target', models.CASCADE)) def test_fk_to_fk(self): self._test_reverse_query_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ForeignKey('Target', models.CASCADE)) def test_fk_to_m2m(self): self._test_reverse_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey('Target', models.CASCADE)) def test_m2m_to_integer(self): self._test_reverse_query_name_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target')) def test_m2m_to_fk(self): self._test_reverse_query_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ManyToManyField('Target')) def test_m2m_to_m2m(self): self._test_reverse_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField('Target')) def _test_reverse_query_name_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): model = target class Model(models.Model): rel = relative self.assertEqual(Model.check(), [ Error( "Reverse query name for 'Model.rel' clashes with field name 'Target.model'.", hint=( "Rename field 'Target.model', or add/change a related_name " "argument to the definition for field 'Model.rel'." ), obj=Model._meta.get_field('rel'), id='fields.E303', ), ]) @isolate_apps('invalid_models_tests') class ExplicitRelatedNameClashTests(SimpleTestCase): def test_fk_to_integer(self): self._test_explicit_related_name_clash( target=models.IntegerField(), relative=models.ForeignKey('Target', models.CASCADE, related_name='clash')) def test_fk_to_fk(self): self._test_explicit_related_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ForeignKey('Target', models.CASCADE, related_name='clash')) def test_fk_to_m2m(self): self._test_explicit_related_name_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey('Target', models.CASCADE, related_name='clash')) def test_m2m_to_integer(self): self._test_explicit_related_name_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target', related_name='clash')) def test_m2m_to_fk(self): self._test_explicit_related_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ManyToManyField('Target', related_name='clash')) def test_m2m_to_m2m(self): self._test_explicit_related_name_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField('Target', related_name='clash')) def _test_explicit_related_name_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): clash = target class Model(models.Model): rel = relative self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.rel' clashes with field name 'Target.clash'.", hint=( "Rename field 'Target.clash', or add/change a related_name " "argument to the definition for field 'Model.rel'." ), obj=Model._meta.get_field('rel'), id='fields.E302', ), Error( "Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.", hint=( "Rename field 'Target.clash', or add/change a related_name " "argument to the definition for field 'Model.rel'." ), obj=Model._meta.get_field('rel'), id='fields.E303', ), ]) @isolate_apps('invalid_models_tests') class ExplicitRelatedQueryNameClashTests(SimpleTestCase): def test_fk_to_integer(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.IntegerField(), relative=models.ForeignKey( 'Target', models.CASCADE, related_name=related_name, related_query_name='clash', ) ) def test_hidden_fk_to_integer(self, related_name=None): self.test_fk_to_integer(related_name='+') def test_fk_to_fk(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ForeignKey( 'Target', models.CASCADE, related_name=related_name, related_query_name='clash', ) ) def test_hidden_fk_to_fk(self): self.test_fk_to_fk(related_name='+') def test_fk_to_m2m(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ForeignKey( 'Target', models.CASCADE, related_name=related_name, related_query_name='clash', ) ) def test_hidden_fk_to_m2m(self): self.test_fk_to_m2m(related_name='+') def test_m2m_to_integer(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.IntegerField(), relative=models.ManyToManyField('Target', related_name=related_name, related_query_name='clash')) def test_hidden_m2m_to_integer(self): self.test_m2m_to_integer(related_name='+') def test_m2m_to_fk(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.ForeignKey('Another', models.CASCADE), relative=models.ManyToManyField('Target', related_name=related_name, related_query_name='clash')) def test_hidden_m2m_to_fk(self): self.test_m2m_to_fk(related_name='+') def test_m2m_to_m2m(self, related_name=None): self._test_explicit_related_query_name_clash( target=models.ManyToManyField('Another'), relative=models.ManyToManyField( 'Target', related_name=related_name, related_query_name='clash', ) ) def test_hidden_m2m_to_m2m(self): self.test_m2m_to_m2m(related_name='+') def _test_explicit_related_query_name_clash(self, target, relative): class Another(models.Model): pass class Target(models.Model): clash = target class Model(models.Model): rel = relative self.assertEqual(Model.check(), [ Error( "Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.", hint=( "Rename field 'Target.clash', or add/change a related_name " "argument to the definition for field 'Model.rel'." ), obj=Model._meta.get_field('rel'), id='fields.E303', ), ]) @isolate_apps('invalid_models_tests') class SelfReferentialM2MClashTests(SimpleTestCase): def test_clash_between_accessors(self): class Model(models.Model): first_m2m = models.ManyToManyField('self', symmetrical=False) second_m2m = models.ManyToManyField('self', symmetrical=False) self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.first_m2m' clashes with reverse accessor for 'Model.second_m2m'.", hint=( "Add or change a related_name argument to the definition " "for 'Model.first_m2m' or 'Model.second_m2m'." ), obj=Model._meta.get_field('first_m2m'), id='fields.E304', ), Error( "Reverse accessor for 'Model.second_m2m' clashes with reverse accessor for 'Model.first_m2m'.", hint=( "Add or change a related_name argument to the definition " "for 'Model.second_m2m' or 'Model.first_m2m'." ), obj=Model._meta.get_field('second_m2m'), id='fields.E304', ), ]) def test_accessor_clash(self): class Model(models.Model): model_set = models.ManyToManyField("self", symmetrical=False) self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.", hint=( "Rename field 'Model.model_set', or add/change a related_name " "argument to the definition for field 'Model.model_set'." ), obj=Model._meta.get_field('model_set'), id='fields.E302', ), ]) def test_reverse_query_name_clash(self): class Model(models.Model): model = models.ManyToManyField("self", symmetrical=False) self.assertEqual(Model.check(), [ Error( "Reverse query name for 'Model.model' clashes with field name 'Model.model'.", hint=( "Rename field 'Model.model', or add/change a related_name " "argument to the definition for field 'Model.model'." ), obj=Model._meta.get_field('model'), id='fields.E303', ), ]) def test_clash_under_explicit_related_name(self): class Model(models.Model): clash = models.IntegerField() m2m = models.ManyToManyField("self", symmetrical=False, related_name='clash') self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.m2m' clashes with field name 'Model.clash'.", hint=( "Rename field 'Model.clash', or add/change a related_name " "argument to the definition for field 'Model.m2m'." ), obj=Model._meta.get_field('m2m'), id='fields.E302', ), Error( "Reverse query name for 'Model.m2m' clashes with field name 'Model.clash'.", hint=( "Rename field 'Model.clash', or add/change a related_name " "argument to the definition for field 'Model.m2m'." ), obj=Model._meta.get_field('m2m'), id='fields.E303', ), ]) def test_valid_model(self): class Model(models.Model): first = models.ManyToManyField("self", symmetrical=False, related_name='first_accessor') second = models.ManyToManyField("self", symmetrical=False, related_name='second_accessor') self.assertEqual(Model.check(), []) @isolate_apps('invalid_models_tests') class SelfReferentialFKClashTests(SimpleTestCase): def test_accessor_clash(self): class Model(models.Model): model_set = models.ForeignKey("Model", models.CASCADE) self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.", hint=( "Rename field 'Model.model_set', or add/change " "a related_name argument to the definition " "for field 'Model.model_set'." ), obj=Model._meta.get_field('model_set'), id='fields.E302', ), ]) def test_reverse_query_name_clash(self): class Model(models.Model): model = models.ForeignKey("Model", models.CASCADE) self.assertEqual(Model.check(), [ Error( "Reverse query name for 'Model.model' clashes with field name 'Model.model'.", hint=( "Rename field 'Model.model', or add/change a related_name " "argument to the definition for field 'Model.model'." ), obj=Model._meta.get_field('model'), id='fields.E303', ), ]) def test_clash_under_explicit_related_name(self): class Model(models.Model): clash = models.CharField(max_length=10) foreign = models.ForeignKey("Model", models.CASCADE, related_name='clash') self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.foreign' clashes with field name 'Model.clash'.", hint=( "Rename field 'Model.clash', or add/change a related_name " "argument to the definition for field 'Model.foreign'." ), obj=Model._meta.get_field('foreign'), id='fields.E302', ), Error( "Reverse query name for 'Model.foreign' clashes with field name 'Model.clash'.", hint=( "Rename field 'Model.clash', or add/change a related_name " "argument to the definition for field 'Model.foreign'." ), obj=Model._meta.get_field('foreign'), id='fields.E303', ), ]) @isolate_apps('invalid_models_tests') class ComplexClashTests(SimpleTestCase): # New tests should not be included here, because this is a single, # self-contained sanity check, not a test of everything. def test_complex_clash(self): class Target(models.Model): tgt_safe = models.CharField(max_length=10) clash = models.CharField(max_length=10) model = models.CharField(max_length=10) clash1_set = models.CharField(max_length=10) class Model(models.Model): src_safe = models.CharField(max_length=10) foreign_1 = models.ForeignKey(Target, models.CASCADE, related_name='id') foreign_2 = models.ForeignKey(Target, models.CASCADE, related_name='src_safe') m2m_1 = models.ManyToManyField(Target, related_name='id') m2m_2 = models.ManyToManyField(Target, related_name='src_safe') self.assertEqual(Model.check(), [ Error( "Reverse accessor for 'Model.foreign_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.foreign_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E302', ), Error( "Reverse query name for 'Model.foreign_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.foreign_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E303', ), Error( "Reverse accessor for 'Model.foreign_1' clashes with reverse accessor for 'Model.m2m_1'.", hint=("Add or change a related_name argument to " "the definition for 'Model.foreign_1' or 'Model.m2m_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E304', ), Error( "Reverse query name for 'Model.foreign_1' clashes with reverse query name for 'Model.m2m_1'.", hint=("Add or change a related_name argument to " "the definition for 'Model.foreign_1' or 'Model.m2m_1'."), obj=Model._meta.get_field('foreign_1'), id='fields.E305', ), Error( "Reverse accessor for 'Model.foreign_2' clashes with reverse accessor for 'Model.m2m_2'.", hint=("Add or change a related_name argument " "to the definition for 'Model.foreign_2' or 'Model.m2m_2'."), obj=Model._meta.get_field('foreign_2'), id='fields.E304', ), Error( "Reverse query name for 'Model.foreign_2' clashes with reverse query name for 'Model.m2m_2'.", hint=("Add or change a related_name argument to " "the definition for 'Model.foreign_2' or 'Model.m2m_2'."), obj=Model._meta.get_field('foreign_2'), id='fields.E305', ), Error( "Reverse accessor for 'Model.m2m_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.m2m_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E302', ), Error( "Reverse query name for 'Model.m2m_1' clashes with field name 'Target.id'.", hint=("Rename field 'Target.id', or add/change a related_name " "argument to the definition for field 'Model.m2m_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E303', ), Error( "Reverse accessor for 'Model.m2m_1' clashes with reverse accessor for 'Model.foreign_1'.", hint=("Add or change a related_name argument to the definition " "for 'Model.m2m_1' or 'Model.foreign_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E304', ), Error( "Reverse query name for 'Model.m2m_1' clashes with reverse query name for 'Model.foreign_1'.", hint=("Add or change a related_name argument to " "the definition for 'Model.m2m_1' or 'Model.foreign_1'."), obj=Model._meta.get_field('m2m_1'), id='fields.E305', ), Error( "Reverse accessor for 'Model.m2m_2' clashes with reverse accessor for 'Model.foreign_2'.", hint=("Add or change a related_name argument to the definition " "for 'Model.m2m_2' or 'Model.foreign_2'."), obj=Model._meta.get_field('m2m_2'), id='fields.E304', ), Error( "Reverse query name for 'Model.m2m_2' clashes with reverse query name for 'Model.foreign_2'.", hint=("Add or change a related_name argument to the definition " "for 'Model.m2m_2' or 'Model.foreign_2'."), obj=Model._meta.get_field('m2m_2'), id='fields.E305', ), ]) def test_clash_parent_link(self): class Parent(models.Model): pass class Child(Parent): other_parent = models.OneToOneField(Parent, models.CASCADE) errors = [ ('fields.E304', 'accessor', 'parent_ptr', 'other_parent'), ('fields.E305', 'query name', 'parent_ptr', 'other_parent'), ('fields.E304', 'accessor', 'other_parent', 'parent_ptr'), ('fields.E305', 'query name', 'other_parent', 'parent_ptr'), ] self.assertEqual(Child.check(), [ Error( "Reverse %s for 'Child.%s' clashes with reverse %s for " "'Child.%s'." % (attr, field_name, attr, clash_name), hint=( "Add or change a related_name argument to the definition " "for 'Child.%s' or 'Child.%s'." % (field_name, clash_name) ), obj=Child._meta.get_field(field_name), id=error_id, ) for error_id, attr, field_name, clash_name in errors ]) @isolate_apps('invalid_models_tests') class M2mThroughFieldsTests(SimpleTestCase): def test_m2m_field_argument_validation(self): """ ManyToManyField accepts the ``through_fields`` kwarg only if an intermediary table is specified. """ class Fan(models.Model): pass with self.assertRaisesMessage(ValueError, 'Cannot specify through_fields without a through model'): models.ManyToManyField(Fan, through_fields=('f1', 'f2')) def test_invalid_order(self): """ Mixing up the order of link fields to ManyToManyField.through_fields triggers validation errors. """ class Fan(models.Model): pass class Event(models.Model): invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invitee', 'event')) class Invitation(models.Model): event = models.ForeignKey(Event, models.CASCADE) invitee = models.ForeignKey(Fan, models.CASCADE) inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+') field = Event._meta.get_field('invitees') self.assertEqual(field.check(from_model=Event), [ Error( "'Invitation.invitee' is not a foreign key to 'Event'.", hint="Did you mean one of the following foreign keys to 'Event': event?", obj=field, id='fields.E339', ), Error( "'Invitation.event' is not a foreign key to 'Fan'.", hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?", obj=field, id='fields.E339', ), ]) def test_invalid_field(self): """ Providing invalid field names to ManyToManyField.through_fields triggers validation errors. """ class Fan(models.Model): pass class Event(models.Model): invitees = models.ManyToManyField( Fan, through='Invitation', through_fields=('invalid_field_1', 'invalid_field_2'), ) class Invitation(models.Model): event = models.ForeignKey(Event, models.CASCADE) invitee = models.ForeignKey(Fan, models.CASCADE) inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+') field = Event._meta.get_field('invitees') self.assertEqual(field.check(from_model=Event), [ Error( "The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_1'.", hint="Did you mean one of the following foreign keys to 'Event': event?", obj=field, id='fields.E338', ), Error( "The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_2'.", hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?", obj=field, id='fields.E338', ), ]) def test_explicit_field_names(self): """ If ``through_fields`` kwarg is given, it must specify both link fields of the intermediary table. """ class Fan(models.Model): pass class Event(models.Model): invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=(None, 'invitee')) class Invitation(models.Model): event = models.ForeignKey(Event, models.CASCADE) invitee = models.ForeignKey(Fan, models.CASCADE) inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+') field = Event._meta.get_field('invitees') self.assertEqual(field.check(from_model=Event), [ Error( "Field specifies 'through_fields' but does not provide the names " "of the two link fields that should be used for the relation " "through model 'invalid_models_tests.Invitation'.", hint="Make sure you specify 'through_fields' as through_fields=('field1', 'field2')", obj=field, id='fields.E337', ), ]) def test_superset_foreign_object(self): class Parent(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() c = models.PositiveIntegerField() class Meta: unique_together = (('a', 'b', 'c'),) class Child(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() value = models.CharField(max_length=255) parent = models.ForeignObject( Parent, on_delete=models.SET_NULL, from_fields=('a', 'b'), to_fields=('a', 'b'), related_name='children', ) field = Child._meta.get_field('parent') self.assertEqual(field.check(from_model=Child), [ Error( "No subset of the fields 'a', 'b' on model 'Parent' is unique.", hint=( "Add unique=True on any of those fields or add at least " "a subset of them to a unique_together constraint." ), obj=field, id='fields.E310', ), ]) def test_intersection_foreign_object(self): class Parent(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() c = models.PositiveIntegerField() d = models.PositiveIntegerField() class Meta: unique_together = (('a', 'b', 'c'),) class Child(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() d = models.PositiveIntegerField() value = models.CharField(max_length=255) parent = models.ForeignObject( Parent, on_delete=models.SET_NULL, from_fields=('a', 'b', 'd'), to_fields=('a', 'b', 'd'), related_name='children', ) field = Child._meta.get_field('parent') self.assertEqual(field.check(from_model=Child), [ Error( "No subset of the fields 'a', 'b', 'd' on model 'Parent' is unique.", hint=( "Add unique=True on any of those fields or add at least " "a subset of them to a unique_together constraint." ), obj=field, id='fields.E310', ), ])
b118acdfe68600152565147f58952211059ac695d43f5198412c21c797104d5a
import unittest from django.conf import settings from django.core.checks import Error, Warning from django.core.checks.model_checks import _check_lazy_references from django.db import connection, connections, models from django.db.models.functions import Lower from django.db.models.signals import post_init from django.test import SimpleTestCase from django.test.utils import isolate_apps, override_settings, register_lookup def get_max_column_name_length(): allowed_len = None db_alias = None for db in settings.DATABASES: connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is not None and not connection.features.truncates_names: if allowed_len is None or max_name_length < allowed_len: allowed_len = max_name_length db_alias = db return (allowed_len, db_alias) @isolate_apps('invalid_models_tests') class IndexTogetherTests(SimpleTestCase): def test_non_iterable(self): class Model(models.Model): class Meta: index_together = 42 self.assertEqual(Model.check(), [ Error( "'index_together' must be a list or tuple.", obj=Model, id='models.E008', ), ]) def test_non_list(self): class Model(models.Model): class Meta: index_together = 'not-a-list' self.assertEqual(Model.check(), [ Error( "'index_together' must be a list or tuple.", obj=Model, id='models.E008', ), ]) def test_list_containing_non_iterable(self): class Model(models.Model): class Meta: index_together = [('a', 'b'), 42] self.assertEqual(Model.check(), [ Error( "All 'index_together' elements must be lists or tuples.", obj=Model, id='models.E009', ), ]) def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: index_together = [['missing_field']] self.assertEqual(Model.check(), [ Error( "'index_together' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.IntegerField() class Bar(Foo): field2 = models.IntegerField() class Meta: index_together = [['field2', 'field1']] self.assertEqual(Bar.check(), [ Error( "'index_together' refers to field 'field1' which is not " "local to model 'Bar'.", hint='This issue may be caused by multi-table inheritance.', obj=Bar, id='models.E016', ), ]) def test_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: index_together = [['m2m']] self.assertEqual(Model.check(), [ Error( "'index_together' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'index_together'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: index_together = [['foo_1_id', 'foo_2']] self.assertEqual(Bar.check(), []) # unique_together tests are very similar to index_together tests. @isolate_apps('invalid_models_tests') class UniqueTogetherTests(SimpleTestCase): def test_non_iterable(self): class Model(models.Model): class Meta: unique_together = 42 self.assertEqual(Model.check(), [ Error( "'unique_together' must be a list or tuple.", obj=Model, id='models.E010', ), ]) def test_list_containing_non_iterable(self): class Model(models.Model): one = models.IntegerField() two = models.IntegerField() class Meta: unique_together = [('a', 'b'), 42] self.assertEqual(Model.check(), [ Error( "All 'unique_together' elements must be lists or tuples.", obj=Model, id='models.E011', ), ]) def test_non_list(self): class Model(models.Model): class Meta: unique_together = 'not-a-list' self.assertEqual(Model.check(), [ Error( "'unique_together' must be a list or tuple.", obj=Model, id='models.E010', ), ]) def test_valid_model(self): class Model(models.Model): one = models.IntegerField() two = models.IntegerField() class Meta: # unique_together can be a simple tuple unique_together = ('one', 'two') self.assertEqual(Model.check(), []) def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: unique_together = [['missing_field']] self.assertEqual(Model.check(), [ Error( "'unique_together' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_m2m(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: unique_together = [['m2m']] self.assertEqual(Model.check(), [ Error( "'unique_together' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'unique_together'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: unique_together = [['foo_1_id', 'foo_2']] self.assertEqual(Bar.check(), []) @isolate_apps('invalid_models_tests') class IndexesTests(SimpleTestCase): def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: indexes = [models.Index(fields=['missing_field'], name='name')] self.assertEqual(Model.check(), [ Error( "'indexes' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: indexes = [models.Index(fields=['m2m'], name='name')] self.assertEqual(Model.check(), [ Error( "'indexes' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'indexes'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.IntegerField() class Bar(Foo): field2 = models.IntegerField() class Meta: indexes = [models.Index(fields=['field2', 'field1'], name='name')] self.assertEqual(Bar.check(), [ Error( "'indexes' refers to field 'field1' which is not local to " "model 'Bar'.", hint='This issue may be caused by multi-table inheritance.', obj=Bar, id='models.E016', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: indexes = [models.Index(fields=['foo_1_id', 'foo_2'], name='index_name')] self.assertEqual(Bar.check(), []) def test_name_constraints(self): class Model(models.Model): class Meta: indexes = [ models.Index(fields=['id'], name='_index_name'), models.Index(fields=['id'], name='5index_name'), ] self.assertEqual(Model.check(), [ Error( "The index name '%sindex_name' cannot start with an " "underscore or a number." % prefix, obj=Model, id='models.E033', ) for prefix in ('_', '5') ]) def test_max_name_length(self): index_name = 'x' * 31 class Model(models.Model): class Meta: indexes = [models.Index(fields=['id'], name=index_name)] self.assertEqual(Model.check(), [ Error( "The index name '%s' cannot be longer than 30 characters." % index_name, obj=Model, id='models.E034', ), ]) @isolate_apps('invalid_models_tests') class FieldNamesTests(SimpleTestCase): def test_ending_with_underscore(self): class Model(models.Model): field_ = models.CharField(max_length=10) m2m_ = models.ManyToManyField('self') self.assertEqual(Model.check(), [ Error( 'Field names must not end with an underscore.', obj=Model._meta.get_field('field_'), id='fields.E001', ), Error( 'Field names must not end with an underscore.', obj=Model._meta.get_field('m2m_'), id='fields.E001', ), ]) max_column_name_length, column_limit_db_alias = get_max_column_name_length() @unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.") def test_M2M_long_column_name(self): """ #13711 -- Model check for long M2M column names when database has column name length limits. """ allowed_len, db_alias = get_max_column_name_length() # A model with very long name which will be used to set relations to. class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(models.Model): title = models.CharField(max_length=11) # Main model for which checks will be performed. class ModelWithLongField(models.Model): m2m_field = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn1', ) m2m_field2 = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn2', through='m2msimple', ) m2m_field3 = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn3', through='m2mcomplex', ) fk = models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, related_name='rn4', ) # Models used for setting `through` in M2M field. class m2msimple(models.Model): id2 = models.ForeignKey(ModelWithLongField, models.CASCADE) class m2mcomplex(models.Model): id2 = models.ForeignKey(ModelWithLongField, models.CASCADE) long_field_name = 'a' * (self.max_column_name_length + 1) models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, ).contribute_to_class(m2msimple, long_field_name) models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, db_column=long_field_name ).contribute_to_class(m2mcomplex, long_field_name) errors = ModelWithLongField.check() # First error because of M2M field set on the model with long name. m2m_long_name = "verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id" if self.max_column_name_length > len(m2m_long_name): # Some databases support names longer than the test name. expected = [] else: expected = [ Error( 'Autogenerated column name too long for M2M field "%s". ' 'Maximum length is "%s" for database "%s".' % (m2m_long_name, self.max_column_name_length, self.column_limit_db_alias), hint="Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'.", obj=ModelWithLongField, id='models.E019', ) ] # Second error because the FK specified in the `through` model # `m2msimple` has auto-generated name longer than allowed. # There will be no check errors in the other M2M because it # specifies db_column for the FK in `through` model even if the actual # name is longer than the limits of the database. expected.append( Error( 'Autogenerated column name too long for M2M field "%s_id". ' 'Maximum length is "%s" for database "%s".' % (long_field_name, self.max_column_name_length, self.column_limit_db_alias), hint="Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'.", obj=ModelWithLongField, id='models.E019', ) ) self.assertEqual(errors, expected) @unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.") def test_local_field_long_column_name(self): """ #13711 -- Model check for long column names when database does not support long names. """ allowed_len, db_alias = get_max_column_name_length() class ModelWithLongField(models.Model): title = models.CharField(max_length=11) long_field_name = 'a' * (self.max_column_name_length + 1) long_field_name2 = 'b' * (self.max_column_name_length + 1) models.CharField(max_length=11).contribute_to_class(ModelWithLongField, long_field_name) models.CharField(max_length=11, db_column='vlmn').contribute_to_class(ModelWithLongField, long_field_name2) self.assertEqual(ModelWithLongField.check(), [ Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (long_field_name, self.max_column_name_length, self.column_limit_db_alias), hint="Set the column name manually using 'db_column'.", obj=ModelWithLongField, id='models.E018', ) ]) def test_including_separator(self): class Model(models.Model): some__field = models.IntegerField() self.assertEqual(Model.check(), [ Error( 'Field names must not contain "__".', obj=Model._meta.get_field('some__field'), id='fields.E002', ) ]) def test_pk(self): class Model(models.Model): pk = models.IntegerField() self.assertEqual(Model.check(), [ Error( "'pk' is a reserved word that cannot be used as a field name.", obj=Model._meta.get_field('pk'), id='fields.E003', ) ]) def test_db_column_clash(self): class Model(models.Model): foo = models.IntegerField() bar = models.IntegerField(db_column='foo') self.assertEqual(Model.check(), [ Error( "Field 'bar' has column name 'foo' that is used by " "another field.", hint="Specify a 'db_column' for the field.", obj=Model, id='models.E007', ) ]) @isolate_apps('invalid_models_tests') class ShadowingFieldsTests(SimpleTestCase): def test_field_name_clash_with_child_accessor(self): class Parent(models.Model): pass class Child(Parent): child = models.CharField(max_length=100) self.assertEqual(Child.check(), [ Error( "The field 'child' clashes with the field " "'child' from model 'invalid_models_tests.parent'.", obj=Child._meta.get_field('child'), id='models.E006', ) ]) def test_multiinheritance_clash(self): class Mother(models.Model): clash = models.IntegerField() class Father(models.Model): clash = models.IntegerField() class Child(Mother, Father): # Here we have two clashed: id (automatic field) and clash, because # both parents define these fields. pass self.assertEqual(Child.check(), [ Error( "The field 'id' from parent model " "'invalid_models_tests.mother' clashes with the field 'id' " "from parent model 'invalid_models_tests.father'.", obj=Child, id='models.E005', ), Error( "The field 'clash' from parent model " "'invalid_models_tests.mother' clashes with the field 'clash' " "from parent model 'invalid_models_tests.father'.", obj=Child, id='models.E005', ) ]) def test_inheritance_clash(self): class Parent(models.Model): f_id = models.IntegerField() class Target(models.Model): # This field doesn't result in a clash. f_id = models.IntegerField() class Child(Parent): # This field clashes with parent "f_id" field. f = models.ForeignKey(Target, models.CASCADE) self.assertEqual(Child.check(), [ Error( "The field 'f' clashes with the field 'f_id' " "from model 'invalid_models_tests.parent'.", obj=Child._meta.get_field('f'), id='models.E006', ) ]) def test_multigeneration_inheritance(self): class GrandParent(models.Model): clash = models.IntegerField() class Parent(GrandParent): pass class Child(Parent): pass class GrandChild(Child): clash = models.IntegerField() self.assertEqual(GrandChild.check(), [ Error( "The field 'clash' clashes with the field 'clash' " "from model 'invalid_models_tests.grandparent'.", obj=GrandChild._meta.get_field('clash'), id='models.E006', ) ]) def test_id_clash(self): class Target(models.Model): pass class Model(models.Model): fk = models.ForeignKey(Target, models.CASCADE) fk_id = models.IntegerField() self.assertEqual(Model.check(), [ Error( "The field 'fk_id' clashes with the field 'fk' from model " "'invalid_models_tests.model'.", obj=Model._meta.get_field('fk_id'), id='models.E006', ) ]) @isolate_apps('invalid_models_tests') class OtherModelTests(SimpleTestCase): def test_unique_primary_key(self): invalid_id = models.IntegerField(primary_key=False) class Model(models.Model): id = invalid_id self.assertEqual(Model.check(), [ Error( "'id' can only be used as a field name if the field also sets " "'primary_key=True'.", obj=Model, id='models.E004', ), ]) def test_ordering_non_iterable(self): class Model(models.Model): class Meta: ordering = 'missing_field' self.assertEqual(Model.check(), [ Error( "'ordering' must be a tuple or list " "(even if you want to order by only one field).", obj=Model, id='models.E014', ), ]) def test_just_ordering_no_errors(self): class Model(models.Model): order = models.PositiveIntegerField() class Meta: ordering = ['order'] self.assertEqual(Model.check(), []) def test_just_order_with_respect_to_no_errors(self): class Question(models.Model): pass class Answer(models.Model): question = models.ForeignKey(Question, models.CASCADE) class Meta: order_with_respect_to = 'question' self.assertEqual(Answer.check(), []) def test_ordering_with_order_with_respect_to(self): class Question(models.Model): pass class Answer(models.Model): question = models.ForeignKey(Question, models.CASCADE) order = models.IntegerField() class Meta: order_with_respect_to = 'question' ordering = ['order'] self.assertEqual(Answer.check(), [ Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=Answer, id='models.E021', ), ]) def test_non_valid(self): class RelationModel(models.Model): pass class Model(models.Model): relation = models.ManyToManyField(RelationModel) class Meta: ordering = ['relation'] self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'relation'.", obj=Model, id='models.E015', ), ]) def test_ordering_pointing_to_missing_field(self): class Model(models.Model): class Meta: ordering = ('missing_field',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_field'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_foreignkey_field(self): class Model(models.Model): missing_fk_field = models.IntegerField() class Meta: ordering = ('missing_fk_field_id',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_fk_field_id'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_related_field(self): class Model(models.Model): test = models.IntegerField() class Meta: ordering = ('missing_related__id',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_related__id'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_related_model_field(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_pointing_to_non_related_field(self): class Child(models.Model): parent = models.IntegerField() class Meta: ordering = ('parent__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_pointing_to_two_related_model_field(self): class Parent2(models.Model): pass class Parent1(models.Model): parent2 = models.ForeignKey(Parent2, models.CASCADE) class Child(models.Model): parent1 = models.ForeignKey(Parent1, models.CASCADE) class Meta: ordering = ('parent1__parent2__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent1__parent2__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_pointing_multiple_times_to_model_fields(self): class Parent(models.Model): field1 = models.CharField(max_length=100) field2 = models.CharField(max_length=100) class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent__field1__field2',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__field1__field2'.", obj=Child, id='models.E015', ) ]) def test_ordering_allows_registered_lookups(self): class Model(models.Model): test = models.CharField(max_length=100) class Meta: ordering = ('test__lower',) with register_lookup(models.CharField, Lower): self.assertEqual(Model.check(), []) def test_ordering_pointing_to_related_model_pk(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent__pk',) self.assertEqual(Child.check(), []) def test_ordering_pointing_to_foreignkey_field(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent_id',) self.assertFalse(Child.check()) def test_name_beginning_with_underscore(self): class _Model(models.Model): pass self.assertEqual(_Model.check(), [ Error( "The model name '_Model' cannot start or end with an underscore " "as it collides with the query lookup syntax.", obj=_Model, id='models.E023', ) ]) def test_name_ending_with_underscore(self): class Model_(models.Model): pass self.assertEqual(Model_.check(), [ Error( "The model name 'Model_' cannot start or end with an underscore " "as it collides with the query lookup syntax.", obj=Model_, id='models.E023', ) ]) def test_name_contains_double_underscores(self): class Test__Model(models.Model): pass self.assertEqual(Test__Model.check(), [ Error( "The model name 'Test__Model' cannot contain double underscores " "as it collides with the query lookup syntax.", obj=Test__Model, id='models.E024', ) ]) def test_property_and_related_field_accessor_clash(self): class Model(models.Model): fk = models.ForeignKey('self', models.CASCADE) @property def fk_id(self): pass self.assertEqual(Model.check(), [ Error( "The property 'fk_id' clashes with a related field accessor.", obj=Model, id='models.E025', ) ]) def test_single_primary_key(self): class Model(models.Model): foo = models.IntegerField(primary_key=True) bar = models.IntegerField(primary_key=True) self.assertEqual(Model.check(), [ Error( "The model cannot have more than one field with 'primary_key=True'.", obj=Model, id='models.E026', ) ]) @override_settings(TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model') def test_swappable_missing_app_name(self): class Model(models.Model): class Meta: swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE' self.assertEqual(Model.check(), [ Error( "'TEST_SWAPPED_MODEL_BAD_VALUE' is not of the form 'app_label.app_name'.", id='models.E001', ), ]) @override_settings(TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target') def test_swappable_missing_app(self): class Model(models.Model): class Meta: swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL' self.assertEqual(Model.check(), [ Error( "'TEST_SWAPPED_MODEL_BAD_MODEL' references 'not_an_app.Target', " 'which has not been installed, or is abstract.', id='models.E002', ), ]) def test_two_m2m_through_same_relationship(self): class Person(models.Model): pass class Group(models.Model): primary = models.ManyToManyField(Person, through='Membership', related_name='primary') secondary = models.ManyToManyField(Person, through='Membership', related_name='secondary') class Membership(models.Model): person = models.ForeignKey(Person, models.CASCADE) group = models.ForeignKey(Group, models.CASCADE) self.assertEqual(Group.check(), [ Error( "The model has two identical many-to-many relations through " "the intermediate model 'invalid_models_tests.Membership'.", obj=Group, id='models.E003', ) ]) def test_two_m2m_through_same_model_with_different_through_fields(self): class Country(models.Model): pass class ShippingMethod(models.Model): to_countries = models.ManyToManyField( Country, through='ShippingMethodPrice', through_fields=('method', 'to_country'), ) from_countries = models.ManyToManyField( Country, through='ShippingMethodPrice', through_fields=('method', 'from_country'), related_name='+', ) class ShippingMethodPrice(models.Model): method = models.ForeignKey(ShippingMethod, models.CASCADE) to_country = models.ForeignKey(Country, models.CASCADE) from_country = models.ForeignKey(Country, models.CASCADE) self.assertEqual(ShippingMethod.check(), []) def test_onetoone_with_parent_model(self): class Place(models.Model): pass class ParkingLot(Place): other_place = models.OneToOneField(Place, models.CASCADE, related_name='other_parking') self.assertEqual(ParkingLot.check(), []) def test_onetoone_with_explicit_parent_link_parent_model(self): class Place(models.Model): pass class ParkingLot(Place): place = models.OneToOneField(Place, models.CASCADE, parent_link=True, primary_key=True) other_place = models.OneToOneField(Place, models.CASCADE, related_name='other_parking') self.assertEqual(ParkingLot.check(), []) def test_m2m_table_name_clash(self): class Foo(models.Model): bar = models.ManyToManyField('Bar', db_table='myapp_bar') class Meta: db_table = 'myapp_foo' class Bar(models.Model): class Meta: db_table = 'myapp_bar' self.assertEqual(Foo.check(), [ Error( "The field's intermediary table 'myapp_bar' clashes with the " "table name of 'invalid_models_tests.Bar'.", obj=Foo._meta.get_field('bar'), id='fields.E340', ) ]) def test_m2m_field_table_name_clash(self): class Foo(models.Model): pass class Bar(models.Model): foos = models.ManyToManyField(Foo, db_table='clash') class Baz(models.Model): foos = models.ManyToManyField(Foo, db_table='clash') self.assertEqual(Bar.check() + Baz.check(), [ Error( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.Baz.foos'.", obj=Bar._meta.get_field('foos'), id='fields.E340', ), Error( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.Bar.foos'.", obj=Baz._meta.get_field('foos'), id='fields.E340', ) ]) def test_m2m_autogenerated_table_name_clash(self): class Foo(models.Model): class Meta: db_table = 'bar_foos' class Bar(models.Model): # The autogenerated `db_table` will be bar_foos. foos = models.ManyToManyField(Foo) class Meta: db_table = 'bar' self.assertEqual(Bar.check(), [ Error( "The field's intermediary table 'bar_foos' clashes with the " "table name of 'invalid_models_tests.Foo'.", obj=Bar._meta.get_field('foos'), id='fields.E340', ) ]) def test_m2m_unmanaged_shadow_models_not_checked(self): class A1(models.Model): pass class C1(models.Model): mm_a = models.ManyToManyField(A1, db_table='d1') # Unmanaged models that shadow the above models. Reused table names # shouldn't be flagged by any checks. class A2(models.Model): class Meta: managed = False class C2(models.Model): mm_a = models.ManyToManyField(A2, through='Intermediate') class Meta: managed = False class Intermediate(models.Model): a2 = models.ForeignKey(A2, models.CASCADE, db_column='a1_id') c2 = models.ForeignKey(C2, models.CASCADE, db_column='c1_id') class Meta: db_table = 'd1' managed = False self.assertEqual(C1.check(), []) self.assertEqual(C2.check(), []) def test_m2m_to_concrete_and_proxy_allowed(self): class A(models.Model): pass class Through(models.Model): a = models.ForeignKey('A', models.CASCADE) c = models.ForeignKey('C', models.CASCADE) class ThroughProxy(Through): class Meta: proxy = True class C(models.Model): mm_a = models.ManyToManyField(A, through=Through) mm_aproxy = models.ManyToManyField(A, through=ThroughProxy, related_name='proxied_m2m') self.assertEqual(C.check(), []) @isolate_apps('django.contrib.auth', kwarg_name='apps') def test_lazy_reference_checks(self, apps): class DummyModel(models.Model): author = models.ForeignKey('Author', models.CASCADE) class Meta: app_label = 'invalid_models_tests' class DummyClass: def __call__(self, **kwargs): pass def dummy_method(self): pass def dummy_function(*args, **kwargs): pass apps.lazy_model_operation(dummy_function, ('auth', 'imaginarymodel')) apps.lazy_model_operation(dummy_function, ('fanciful_app', 'imaginarymodel')) post_init.connect(dummy_function, sender='missing-app.Model', apps=apps) post_init.connect(DummyClass(), sender='missing-app.Model', apps=apps) post_init.connect(DummyClass().dummy_method, sender='missing-app.Model', apps=apps) self.assertEqual(_check_lazy_references(apps), [ Error( "%r contains a lazy reference to auth.imaginarymodel, " "but app 'auth' doesn't provide model 'imaginarymodel'." % dummy_function, obj=dummy_function, id='models.E022', ), Error( "%r contains a lazy reference to fanciful_app.imaginarymodel, " "but app 'fanciful_app' isn't installed." % dummy_function, obj=dummy_function, id='models.E022', ), Error( "An instance of class 'DummyClass' was connected to " "the 'post_init' signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), Error( "Bound method 'DummyClass.dummy_method' was connected to the " "'post_init' signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), Error( "The field invalid_models_tests.DummyModel.author was declared " "with a lazy reference to 'invalid_models_tests.author', but app " "'invalid_models_tests' isn't installed.", hint=None, obj=DummyModel.author.field, id='fields.E307', ), Error( "The function 'dummy_function' was connected to the 'post_init' " "signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), ]) @isolate_apps('invalid_models_tests') class ConstraintsTests(SimpleTestCase): def test_check_constraints(self): class Model(models.Model): age = models.IntegerField() class Meta: constraints = [models.CheckConstraint(check=models.Q(age__gte=18), name='is_adult')] errors = Model.check() warn = Warning( '%s does not support check constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this warning if you " "don't care about it." ), obj=Model, id='models.W027', ) expected = [] if connection.features.supports_table_check_constraints else [warn, warn] self.assertCountEqual(errors, expected) def test_check_constraints_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {'supports_table_check_constraints'} constraints = [models.CheckConstraint(check=models.Q(age__gte=18), name='is_adult')] self.assertEqual(Model.check(), [])
d8af45d5e4a1287583a80c788ba9380f5118d42459ce19bf56b39d7e64af5101
import os import tempfile import uuid from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.core.files.storage import FileSystemStorage from django.db import models from django.db.models.fields.files import ImageFieldFile from django.utils.translation import gettext_lazy as _ try: from PIL import Image except ImportError: Image = None class Foo(models.Model): a = models.CharField(max_length=10) d = models.DecimalField(max_digits=5, decimal_places=3) def get_foo(): return Foo.objects.get(id=1).pk class Bar(models.Model): b = models.CharField(max_length=10) a = models.ForeignKey(Foo, models.CASCADE, default=get_foo, related_name='bars') class Whiz(models.Model): CHOICES = ( ('Group 1', ( (1, 'First'), (2, 'Second'), ) ), ('Group 2', ( (3, 'Third'), (4, 'Fourth'), ) ), (0, 'Other'), (5, _('translated')), ) c = models.IntegerField(choices=CHOICES, null=True) class WhizDelayed(models.Model): c = models.IntegerField(choices=(), null=True) # Contrived way of adding choices later. WhizDelayed._meta.get_field('c').choices = Whiz.CHOICES class WhizIter(models.Model): c = models.IntegerField(choices=iter(Whiz.CHOICES), null=True) class WhizIterEmpty(models.Model): c = models.CharField(choices=iter(()), blank=True, max_length=1) class Choiceful(models.Model): no_choices = models.IntegerField(null=True) empty_choices = models.IntegerField(choices=(), null=True) with_choices = models.IntegerField(choices=[(1, 'A')], null=True) empty_choices_bool = models.BooleanField(choices=()) empty_choices_text = models.TextField(choices=()) class BigD(models.Model): d = models.DecimalField(max_digits=32, decimal_places=30) class FloatModel(models.Model): size = models.FloatField() class BigS(models.Model): s = models.SlugField(max_length=255) class UnicodeSlugField(models.Model): s = models.SlugField(max_length=255, allow_unicode=True) class AutoModel(models.Model): value = models.AutoField(primary_key=True) class BigAutoModel(models.Model): value = models.BigAutoField(primary_key=True) class SmallAutoModel(models.Model): value = models.SmallAutoField(primary_key=True) class SmallIntegerModel(models.Model): value = models.SmallIntegerField() class IntegerModel(models.Model): value = models.IntegerField() class BigIntegerModel(models.Model): value = models.BigIntegerField() null_value = models.BigIntegerField(null=True, blank=True) class PositiveBigIntegerModel(models.Model): value = models.PositiveBigIntegerField() class PositiveSmallIntegerModel(models.Model): value = models.PositiveSmallIntegerField() class PositiveIntegerModel(models.Model): value = models.PositiveIntegerField() class Post(models.Model): title = models.CharField(max_length=100) body = models.TextField() class NullBooleanModel(models.Model): nbfield = models.BooleanField(null=True, blank=True) nbfield_old = models.NullBooleanField() class BooleanModel(models.Model): bfield = models.BooleanField() string = models.CharField(max_length=10, default='abc') class DateTimeModel(models.Model): d = models.DateField() dt = models.DateTimeField() t = models.TimeField() class DurationModel(models.Model): field = models.DurationField() class NullDurationModel(models.Model): field = models.DurationField(null=True) class PrimaryKeyCharModel(models.Model): string = models.CharField(max_length=10, primary_key=True) class FksToBooleans(models.Model): """Model with FKs to models with {Null,}BooleanField's, #15040""" bf = models.ForeignKey(BooleanModel, models.CASCADE) nbf = models.ForeignKey(NullBooleanModel, models.CASCADE) class FkToChar(models.Model): """Model with FK to a model with a CharField primary key, #19299""" out = models.ForeignKey(PrimaryKeyCharModel, models.CASCADE) class RenamedField(models.Model): modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),)) class VerboseNameField(models.Model): id = models.AutoField("verbose pk", primary_key=True) field1 = models.BigIntegerField("verbose field1") field2 = models.BooleanField("verbose field2", default=False) field3 = models.CharField("verbose field3", max_length=10) field4 = models.DateField("verbose field4") field5 = models.DateTimeField("verbose field5") field6 = models.DecimalField("verbose field6", max_digits=6, decimal_places=1) field7 = models.EmailField("verbose field7") field8 = models.FileField("verbose field8", upload_to="unused") field9 = models.FilePathField("verbose field9") field10 = models.FloatField("verbose field10") # Don't want to depend on Pillow in this test # field_image = models.ImageField("verbose field") field11 = models.IntegerField("verbose field11") field12 = models.GenericIPAddressField("verbose field12", protocol="ipv4") field13 = models.NullBooleanField("verbose field13") field14 = models.PositiveIntegerField("verbose field14") field15 = models.PositiveSmallIntegerField("verbose field15") field16 = models.SlugField("verbose field16") field17 = models.SmallIntegerField("verbose field17") field18 = models.TextField("verbose field18") field19 = models.TimeField("verbose field19") field20 = models.URLField("verbose field20") field21 = models.UUIDField("verbose field21") field22 = models.DurationField("verbose field22") class GenericIPAddress(models.Model): ip = models.GenericIPAddressField(null=True, protocol='ipv4') ############################################################################### # These models aren't used in any test, just here to ensure they validate # successfully. # See ticket #16570. class DecimalLessThanOne(models.Model): d = models.DecimalField(max_digits=3, decimal_places=3) # See ticket #18389. class FieldClassAttributeModel(models.Model): field_class = models.CharField ############################################################################### class DataModel(models.Model): short_data = models.BinaryField(max_length=10, default=b'\x08') data = models.BinaryField() ############################################################################### # FileField class Document(models.Model): myfile = models.FileField(upload_to='unused', unique=True) ############################################################################### # ImageField # If Pillow available, do these tests. if Image: class TestImageFieldFile(ImageFieldFile): """ Custom Field File class that records whether or not the underlying file was opened. """ def __init__(self, *args, **kwargs): self.was_opened = False super().__init__(*args, **kwargs) def open(self): self.was_opened = True super().open() class TestImageField(models.ImageField): attr_class = TestImageFieldFile # Set up a temp directory for file storage. temp_storage_dir = tempfile.mkdtemp() temp_storage = FileSystemStorage(temp_storage_dir) temp_upload_to_dir = os.path.join(temp_storage.location, 'tests') class Person(models.Model): """ Model that defines an ImageField with no dimension fields. """ name = models.CharField(max_length=50) mugshot = TestImageField(storage=temp_storage, upload_to='tests') class AbstractPersonWithHeight(models.Model): """ Abstract model that defines an ImageField with only one dimension field to make sure the dimension update is correctly run on concrete subclass instance post-initialization. """ mugshot = TestImageField(storage=temp_storage, upload_to='tests', height_field='mugshot_height') mugshot_height = models.PositiveSmallIntegerField() class Meta: abstract = True class PersonWithHeight(AbstractPersonWithHeight): """ Concrete model that subclass an abstract one with only on dimension field. """ name = models.CharField(max_length=50) class PersonWithHeightAndWidth(models.Model): """ Model that defines height and width fields after the ImageField. """ name = models.CharField(max_length=50) mugshot = TestImageField(storage=temp_storage, upload_to='tests', height_field='mugshot_height', width_field='mugshot_width') mugshot_height = models.PositiveSmallIntegerField() mugshot_width = models.PositiveSmallIntegerField() class PersonDimensionsFirst(models.Model): """ Model that defines height and width fields before the ImageField. """ name = models.CharField(max_length=50) mugshot_height = models.PositiveSmallIntegerField() mugshot_width = models.PositiveSmallIntegerField() mugshot = TestImageField(storage=temp_storage, upload_to='tests', height_field='mugshot_height', width_field='mugshot_width') class PersonTwoImages(models.Model): """ Model that: * Defines two ImageFields * Defines the height/width fields before the ImageFields * Has a nullable ImageField """ name = models.CharField(max_length=50) mugshot_height = models.PositiveSmallIntegerField() mugshot_width = models.PositiveSmallIntegerField() mugshot = TestImageField(storage=temp_storage, upload_to='tests', height_field='mugshot_height', width_field='mugshot_width') headshot_height = models.PositiveSmallIntegerField( blank=True, null=True) headshot_width = models.PositiveSmallIntegerField( blank=True, null=True) headshot = TestImageField(blank=True, null=True, storage=temp_storage, upload_to='tests', height_field='headshot_height', width_field='headshot_width') class AllFieldsModel(models.Model): big_integer = models.BigIntegerField() binary = models.BinaryField() boolean = models.BooleanField(default=False) char = models.CharField(max_length=10) date = models.DateField() datetime = models.DateTimeField() decimal = models.DecimalField(decimal_places=2, max_digits=2) duration = models.DurationField() email = models.EmailField() file_path = models.FilePathField() floatf = models.FloatField() integer = models.IntegerField() generic_ip = models.GenericIPAddressField() null_boolean = models.NullBooleanField() positive_integer = models.PositiveIntegerField() positive_small_integer = models.PositiveSmallIntegerField() slug = models.SlugField() small_integer = models.SmallIntegerField() text = models.TextField() time = models.TimeField() url = models.URLField() uuid = models.UUIDField() fo = models.ForeignObject( 'self', on_delete=models.CASCADE, from_fields=['positive_integer'], to_fields=['id'], related_name='reverse' ) fk = models.ForeignKey( 'self', models.CASCADE, related_name='reverse2' ) m2m = models.ManyToManyField('self') oto = models.OneToOneField('self', models.CASCADE) object_id = models.PositiveIntegerField() content_type = models.ForeignKey(ContentType, models.CASCADE) gfk = GenericForeignKey() gr = GenericRelation(DataModel) class ManyToMany(models.Model): m2m = models.ManyToManyField('self') ############################################################################### class UUIDModel(models.Model): field = models.UUIDField() class NullableUUIDModel(models.Model): field = models.UUIDField(blank=True, null=True) class PrimaryKeyUUIDModel(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4) class RelatedToUUIDModel(models.Model): uuid_fk = models.ForeignKey('PrimaryKeyUUIDModel', models.CASCADE) class UUIDChild(PrimaryKeyUUIDModel): pass class UUIDGrandchild(UUIDChild): pass
6988fe9c07410695420fda09a21ed86251f2cd783cd319d8c5d1b3c77f23dada
import datetime import pickle import unittest import uuid from copy import deepcopy from unittest import mock from django.core.exceptions import FieldError from django.db import DatabaseError, connection from django.db.models import ( Avg, BooleanField, Case, CharField, Count, DateField, DateTimeField, DurationField, Exists, Expression, ExpressionList, ExpressionWrapper, F, Func, IntegerField, Max, Min, Model, OrderBy, OuterRef, Q, StdDev, Subquery, Sum, TimeField, UUIDField, Value, Variance, When, ) from django.db.models.expressions import Col, Combinable, Random, RawSQL, Ref from django.db.models.functions import ( Coalesce, Concat, Length, Lower, Substr, Upper, ) from django.db.models.sql import constants from django.db.models.sql.datastructures import Join from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import Approximate, isolate_apps from .models import ( UUID, UUIDPK, Company, Employee, Experiment, Number, RemoteEmployee, Result, SimulationRun, Time, ) class BasicExpressionsTests(TestCase): @classmethod def setUpTestData(cls): cls.example_inc = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10) ) cls.foobar_ltd = Company.objects.create( name="Foobar Ltd.", num_employees=3, num_chairs=4, based_in_eu=True, ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20) ) cls.max = Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30) cls.gmbh = Company.objects.create(name='Test GmbH', num_employees=32, num_chairs=1, ceo=cls.max) def setUp(self): self.company_query = Company.objects.values( "name", "num_employees", "num_chairs" ).order_by( "name", "num_employees", "num_chairs" ) def test_annotate_values_aggregate(self): companies = Company.objects.annotate( salaries=F('ceo__salary'), ).values('num_employees', 'salaries').aggregate( result=Sum( F('salaries') + F('num_employees'), output_field=IntegerField() ), ) self.assertEqual(companies['result'], 2395) def test_annotate_values_filter(self): companies = Company.objects.annotate( foo=RawSQL('%s', ['value']), ).filter(foo='value').order_by('name') self.assertQuerysetEqual( companies, ['<Company: Example Inc.>', '<Company: Foobar Ltd.>', '<Company: Test GmbH>'], ) def test_annotate_values_count(self): companies = Company.objects.annotate(foo=RawSQL('%s', ['value'])) self.assertEqual(companies.count(), 3) @skipUnlessDBFeature('supports_boolean_expr_in_select_clause') def test_filtering_on_annotate_that_uses_q(self): self.assertEqual( Company.objects.annotate( num_employees_check=ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField()) ).filter(num_employees_check=True).count(), 2, ) def test_filtering_on_q_that_is_boolean(self): self.assertEqual( Company.objects.filter( ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField()) ).count(), 2, ) def test_filtering_on_rawsql_that_is_boolean(self): self.assertEqual( Company.objects.filter( RawSQL('num_employees > %s', (3,), output_field=BooleanField()), ).count(), 2, ) def test_filter_inter_attribute(self): # We can filter on attribute relationships on same model obj, e.g. # find companies where the number of employees is greater # than the number of chairs. self.assertSequenceEqual( self.company_query.filter(num_employees__gt=F("num_chairs")), [ { "num_chairs": 5, "name": "Example Inc.", "num_employees": 2300, }, { "num_chairs": 1, "name": "Test GmbH", "num_employees": 32 }, ], ) def test_update(self): # We can set one field to have the value of another field # Make sure we have enough chairs self.company_query.update(num_chairs=F("num_employees")) self.assertSequenceEqual( self.company_query, [ { "num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300 }, { "num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3 }, { "num_chairs": 32, "name": "Test GmbH", "num_employees": 32 } ], ) def test_arithmetic(self): # We can perform arithmetic operations in expressions # Make sure we have 2 spare chairs self.company_query.update(num_chairs=F("num_employees") + 2) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 2302, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 5, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 34, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_order_of_operations(self): # Law of order of operations is followed self.company_query.update(num_chairs=F('num_employees') + 2 * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 6900, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 9, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 96, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_parenthesis_priority(self): # Law of order of operations can be overridden by parentheses self.company_query.update(num_chairs=(F('num_employees') + 2) * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 5294600, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 15, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 1088, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_update_with_fk(self): # ForeignKey can become updated with the value of another ForeignKey. self.assertEqual(Company.objects.update(point_of_contact=F('ceo')), 3) self.assertQuerysetEqual( Company.objects.all(), ['Joe Smith', 'Frank Meyer', 'Max Mustermann'], lambda c: str(c.point_of_contact), ordered=False ) def test_update_with_none(self): Number.objects.create(integer=1, float=1.0) Number.objects.create(integer=2) Number.objects.filter(float__isnull=False).update(float=Value(None)) self.assertQuerysetEqual( Number.objects.all(), [None, None], lambda n: n.float, ordered=False ) def test_filter_with_join(self): # F Expressions can also span joins Company.objects.update(point_of_contact=F('ceo')) c = Company.objects.first() c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum") c.save() self.assertQuerysetEqual( Company.objects.filter(ceo__firstname=F('point_of_contact__firstname')), ['Foobar Ltd.', 'Test GmbH'], lambda c: c.name, ordered=False ) Company.objects.exclude( ceo__firstname=F("point_of_contact__firstname") ).update(name="foo") self.assertEqual( Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).get().name, "foo", ) msg = "Joined field references are not permitted in this query" with self.assertRaisesMessage(FieldError, msg): Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).update(name=F('point_of_contact__lastname')) def test_object_update(self): # F expressions can be used to update attributes on single objects self.gmbh.num_employees = F('num_employees') + 4 self.gmbh.save() self.gmbh.refresh_from_db() self.assertEqual(self.gmbh.num_employees, 36) def test_new_object_save(self): # We should be able to use Funcs when inserting new data test_co = Company(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.save() test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_new_object_create(self): test_co = Company.objects.create(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_object_create_with_aggregate(self): # Aggregates are not allowed when inserting new data msg = 'Aggregate functions are not allowed in this query (num_employees=Max(Value(1))).' with self.assertRaisesMessage(FieldError, msg): Company.objects.create( name='Company', num_employees=Max(Value(1)), num_chairs=1, ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30), ) def test_object_update_fk(self): # F expressions cannot be used to update attributes which are foreign # keys, or attributes which involve joins. test_gmbh = Company.objects.get(pk=self.gmbh.pk) msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.' with self.assertRaisesMessage(ValueError, msg): test_gmbh.point_of_contact = F('ceo') test_gmbh.point_of_contact = self.gmbh.ceo test_gmbh.save() test_gmbh.name = F('ceo__lastname') msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): test_gmbh.save() def test_update_inherited_field_value(self): msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): RemoteEmployee.objects.update(adjusted_salary=F('salary') * 5) def test_object_update_unsaved_objects(self): # F expressions cannot be used to update attributes on objects which do # not yet exist in the database acme = Company(name='The Acme Widget Co.', num_employees=12, num_chairs=5, ceo=self.max) acme.num_employees = F("num_employees") + 16 msg = ( 'Failed to insert expression "Col(expressions_company, ' 'expressions.Company.num_employees) + Value(16)" on ' 'expressions.Company.num_employees. F() expressions can only be ' 'used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() acme.num_employees = 12 acme.name = Lower(F('name')) msg = ( 'Failed to insert expression "Lower(Col(expressions_company, ' 'expressions.Company.name))" on expressions.Company.name. F() ' 'expressions can only be used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() def test_ticket_11722_iexact_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") Employee.objects.create(firstname="Test", lastname="test") queryset = Employee.objects.filter(firstname__iexact=F('lastname')) self.assertQuerysetEqual(queryset, ["<Employee: Test test>"]) def test_ticket_16731_startswith_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") e2 = Employee.objects.create(firstname="Jack", lastname="Jackson") e3 = Employee.objects.create(firstname="Jack", lastname="jackson") self.assertSequenceEqual( Employee.objects.filter(lastname__startswith=F('firstname')), [e2, e3] if connection.features.has_case_insensitive_like else [e2] ) qs = Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk') self.assertSequenceEqual(qs, [e2, e3]) def test_ticket_18375_join_reuse(self): # Reverse multijoin F() references and the lookup target the same join. # Pre #18375 the F() join was generated first and the lookup couldn't # reuse that join. qs = Employee.objects.filter(company_ceo_set__num_chairs=F('company_ceo_set__num_employees')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering(self): # The next query was dict-randomization dependent - if the "gte=1" # was seen first, then the F() will reuse the join generated by the # gte lookup, if F() was seen first, then it generated a join the # other lookups could not reuse. qs = Employee.objects.filter( company_ceo_set__num_chairs=F('company_ceo_set__num_employees'), company_ceo_set__num_chairs__gte=1, ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering_2(self): # Another similar case for F() than above. Now we have the same join # in two filter kwargs, one in the lhs lookup, one in F. Here pre # #18375 the amount of joins generated was random if dict # randomization was enabled, that is the generated query dependent # on which clause was seen first. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk'), pk=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_chained_filters(self): # F() expressions do not reuse joins from previous filter. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk') ).filter( company_ceo_set__num_employees=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 2) def test_order_by_exists(self): mary = Employee.objects.create(firstname='Mary', lastname='Mustermann', salary=20) mustermanns_by_seniority = Employee.objects.filter(lastname='Mustermann').order_by( # Order by whether the employee is the CEO of a company Exists(Company.objects.filter(ceo=OuterRef('pk'))).desc() ) self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary]) def test_order_by_multiline_sql(self): raw_order_by = ( RawSQL(''' CASE WHEN num_employees > 1000 THEN num_chairs ELSE 0 END ''', []).desc(), RawSQL(''' CASE WHEN num_chairs > 1 THEN 1 ELSE 0 END ''', []).asc() ) for qs in ( Company.objects.all(), Company.objects.distinct(), ): with self.subTest(qs=qs): self.assertSequenceEqual( qs.order_by(*raw_order_by), [self.example_inc, self.gmbh, self.foobar_ltd], ) def test_outerref(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) msg = ( 'This queryset contains a reference to an outer query and may only ' 'be used in a subquery.' ) with self.assertRaisesMessage(ValueError, msg): inner.exists() outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) self.assertIs(outer.exists(), True) def test_exist_single_field_output_field(self): queryset = Company.objects.values('pk') self.assertIsInstance(Exists(queryset).output_field, BooleanField) def test_subquery(self): Company.objects.filter(name='Example Inc.').update( point_of_contact=Employee.objects.get(firstname='Joe', lastname='Smith'), ceo=self.max, ) Employee.objects.create(firstname='Bob', lastname='Brown', salary=40) qs = Employee.objects.annotate( is_point_of_contact=Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_not_point_of_contact=~Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_ceo_of_small_company=Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), is_ceo_small_2=~~Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), largest_company=Subquery(Company.objects.order_by('-num_employees').filter( Q(ceo=OuterRef('pk')) | Q(point_of_contact=OuterRef('pk')) ).values('name')[:1], output_field=CharField()) ).values( 'firstname', 'is_point_of_contact', 'is_not_point_of_contact', 'is_ceo_of_small_company', 'is_ceo_small_2', 'largest_company', ).order_by('firstname') results = list(qs) # Could use Coalesce(subq, Value('')) instead except for the bug in # cx_Oracle mentioned in #23843. bob = results[0] if bob['largest_company'] == '' and connection.features.interprets_empty_strings_as_nulls: bob['largest_company'] = None self.assertEqual(results, [ { 'firstname': 'Bob', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': None, }, { 'firstname': 'Frank', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Foobar Ltd.', }, { 'firstname': 'Joe', 'is_point_of_contact': True, 'is_not_point_of_contact': False, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': 'Example Inc.', }, { 'firstname': 'Max', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Example Inc.' } ]) # A less elegant way to write the same query: this uses a LEFT OUTER # JOIN and an IS NULL, inside a WHERE NOT IN which is probably less # efficient than EXISTS. self.assertCountEqual( qs.filter(is_point_of_contact=True).values('pk'), Employee.objects.exclude(company_point_of_contact_set=None).values('pk') ) def test_in_subquery(self): # This is a contrived test (and you really wouldn't write this query), # but it is a succinct way to test the __in=Subquery() construct. small_companies = Company.objects.filter(num_employees__lt=200).values('pk') subquery_test = Company.objects.filter(pk__in=Subquery(small_companies)) self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh]) subquery_test2 = Company.objects.filter(pk=Subquery(small_companies.filter(num_employees=3))) self.assertCountEqual(subquery_test2, [self.foobar_ltd]) def test_uuid_pk_subquery(self): u = UUIDPK.objects.create() UUID.objects.create(uuid_fk=u) qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values('uuid_fk__id'))) self.assertCountEqual(qs, [u]) def test_nested_subquery(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) contrived = Employee.objects.annotate( is_point_of_contact=Subquery( outer.filter(pk=OuterRef('pk')).values('is_point_of_contact'), output_field=BooleanField(), ), ) self.assertCountEqual(contrived.values_list(), outer.values_list()) def test_nested_subquery_join_outer_ref(self): inner = Employee.objects.filter(pk=OuterRef('ceo__pk')).values('pk') qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( ceo__in=inner, ceo__pk=OuterRef('pk'), ).values('pk'), ), ) self.assertSequenceEqual( qs.values_list('ceo_company', flat=True), [self.example_inc.pk, self.foobar_ltd.pk, self.gmbh.pk], ) def test_nested_subquery_outer_ref_2(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') third = Time.objects.create(time='21:00') SimulationRun.objects.bulk_create([ SimulationRun(start=first, end=second, midpoint='12:00'), SimulationRun(start=first, end=third, midpoint='15:00'), SimulationRun(start=second, end=first, midpoint='00:00'), ]) inner = Time.objects.filter(time=OuterRef(OuterRef('time')), pk=OuterRef('start')).values('time') middle = SimulationRun.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=TimeField())) # This is a contrived example. It exercises the double OuterRef form. self.assertCountEqual(outer, [first, second, third]) def test_nested_subquery_outer_ref_with_autofield(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') SimulationRun.objects.create(start=first, end=second, midpoint='12:00') inner = SimulationRun.objects.filter(start=OuterRef(OuterRef('pk'))).values('start') middle = Time.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=IntegerField())) # This exercises the double OuterRef form with AutoField as pk. self.assertCountEqual(outer, [first, second]) def test_annotations_within_subquery(self): Company.objects.filter(num_employees__lt=50).update(ceo=Employee.objects.get(firstname='Frank')) inner = Company.objects.filter( ceo=OuterRef('pk') ).values('ceo').annotate(total_employees=Sum('num_employees')).values('total_employees') outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(salary__lte=Subquery(inner)) self.assertSequenceEqual( outer.order_by('-total_employees').values('salary', 'total_employees'), [{'salary': 10, 'total_employees': 2300}, {'salary': 20, 'total_employees': 35}], ) def test_subquery_references_joined_table_twice(self): inner = Company.objects.filter( num_chairs__gte=OuterRef('ceo__salary'), num_employees__gte=OuterRef('point_of_contact__salary'), ) # Another contrived example (there is no need to have a subquery here) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_subquery_filter_by_aggregate(self): Number.objects.create(integer=1000, float=1.2) Employee.objects.create(salary=1000) qs = Number.objects.annotate( min_valuable_count=Subquery( Employee.objects.filter( salary=OuterRef('integer'), ).annotate(cnt=Count('salary')).filter(cnt__gt=0).values('cnt')[:1] ), ) self.assertEqual(qs.get().float, 1.2) def test_aggregate_subquery_annotation(self): with self.assertNumQueries(1) as ctx: aggregate = Company.objects.annotate( ceo_salary=Subquery( Employee.objects.filter( id=OuterRef('ceo_id'), ).values('salary') ), ).aggregate( ceo_salary_gt_20=Count('pk', filter=Q(ceo_salary__gt=20)), ) self.assertEqual(aggregate, {'ceo_salary_gt_20': 1}) # Aggregation over a subquery annotation doesn't annotate the subquery # twice in the inner query. sql = ctx.captured_queries[0]['sql'] self.assertLessEqual(sql.count('SELECT'), 3) # GROUP BY isn't required to aggregate over a query that doesn't # contain nested aggregates. self.assertNotIn('GROUP BY', sql) def test_explicit_output_field(self): class FuncA(Func): output_field = CharField() class FuncB(Func): pass expr = FuncB(FuncA()) self.assertEqual(expr.output_field, FuncA.output_field) def test_outerref_mixed_case_table_name(self): inner = Result.objects.filter(result_time__gte=OuterRef('experiment__assigned')) outer = Result.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_outerref_with_operator(self): inner = Company.objects.filter(num_employees=OuterRef('ceo__salary') + 2) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertEqual(outer.get().name, 'Test GmbH') def test_annotation_with_outerref(self): gmbh_salary = Company.objects.annotate( max_ceo_salary_raise=Subquery( Company.objects.annotate( salary_raise=OuterRef('num_employees') + F('num_employees'), ).order_by('-salary_raise').values('salary_raise')[:1], output_field=IntegerField(), ), ).get(pk=self.gmbh.pk) self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332) def test_pickle_expression(self): expr = Value(1, output_field=IntegerField()) expr.convert_value # populate cached property self.assertEqual(pickle.loads(pickle.dumps(expr)), expr) def test_incorrect_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Employee.objects.filter(firstname=F('nope'))) def test_incorrect_joined_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Company.objects.filter(ceo__pk=F('point_of_contact__nope'))) def test_exists_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('pk') qs1 = Employee.objects.filter(Exists(inner)) qs2 = Employee.objects.annotate(found=Exists(inner)).filter(found=True) self.assertCountEqual(qs1, qs2) self.assertFalse(Employee.objects.exclude(Exists(inner)).exists()) self.assertCountEqual(qs2, Employee.objects.exclude(~Exists(inner))) def test_subquery_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('based_in_eu') self.assertSequenceEqual( Employee.objects.filter(Subquery(inner)), [self.foobar_ltd.ceo], ) def test_case_in_filter_if_boolean_output_field(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) qs = Employee.objects.filter( Case( When(Exists(is_ceo), then=True), When(Exists(is_poc), then=True), default=False, output_field=BooleanField(), ), ) self.assertSequenceEqual(qs, [self.example_inc.ceo, self.foobar_ltd.ceo, self.max]) def test_boolean_expression_combined(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) self.gmbh.point_of_contact = self.max self.gmbh.save() self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) | Exists(is_poc)), [self.example_inc.ceo, self.foobar_ltd.ceo, self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) & Exists(is_poc)), [self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) & Q(salary__gte=30)), [self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)), [self.example_inc.ceo, self.max], ) class IterableLookupInnerExpressionsTests(TestCase): @classmethod def setUpTestData(cls): ceo = Employee.objects.create(firstname='Just', lastname='Doit', salary=30) # MySQL requires that the values calculated for expressions don't pass # outside of the field's range, so it's inconvenient to use the values # in the more general tests. Company.objects.create(name='5020 Ltd', num_employees=50, num_chairs=20, ceo=ceo) Company.objects.create(name='5040 Ltd', num_employees=50, num_chairs=40, ceo=ceo) Company.objects.create(name='5050 Ltd', num_employees=50, num_chairs=50, ceo=ceo) Company.objects.create(name='5060 Ltd', num_employees=50, num_chairs=60, ceo=ceo) Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo) def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self): # __in lookups can use F() expressions for integers. queryset = Company.objects.filter(num_employees__in=([F('num_chairs') - 10])) self.assertQuerysetEqual(queryset, ['<Company: 5060 Ltd>'], ordered=False) self.assertQuerysetEqual( Company.objects.filter(num_employees__in=([F('num_chairs') - 10, F('num_chairs') + 10])), ['<Company: 5040 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter( num_employees__in=([F('num_chairs') - 10, F('num_chairs'), F('num_chairs') + 10]) ), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) def test_expressions_in_lookups_join_choice(self): midpoint = datetime.time(13, 0) t1 = Time.objects.create(time=datetime.time(12, 0)) t2 = Time.objects.create(time=datetime.time(14, 0)) SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint) SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=None, end=None, midpoint=midpoint) queryset = SimulationRun.objects.filter(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual( queryset, ['<SimulationRun: 13:00:00 (12:00:00 to 14:00:00)>'], ordered=False ) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.INNER) queryset = SimulationRun.objects.exclude(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual(queryset, [], ordered=False) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.LOUTER) def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self): # Range lookups can use F() expressions for integers. Company.objects.filter(num_employees__exact=F("num_chairs")) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs'), 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, F('num_chairs') + 10)), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(1, 100)), [ '<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>', '<Company: 99300 Ltd>', ], ordered=False ) @unittest.skipUnless(connection.vendor == 'sqlite', "This defensive test only works on databases that don't validate parameter types") def test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion(self): """ This tests that SQL injection isn't possible using compilation of expressions in iterable filters, as their compilation happens before the main query compilation. It's limited to SQLite, as PostgreSQL, Oracle and other vendors have defense in depth against this by type checking. Testing against SQLite (the most permissive of the built-in databases) demonstrates that the problem doesn't exist while keeping the test simple. """ queryset = Company.objects.filter(name__in=[F('num_chairs') + '1)) OR ((1==1']) self.assertQuerysetEqual(queryset, [], ordered=False) def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self): start = datetime.datetime(2016, 2, 3, 15, 0, 0) end = datetime.datetime(2016, 2, 5, 15, 0, 0) experiment_1 = Experiment.objects.create( name='Integrity testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) experiment_2 = Experiment.objects.create( name='Taste testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 2, 4, 15, 0, 0), ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 3, 10, 2, 0, 0), ) Result.objects.create( experiment=experiment_2, result_time=datetime.datetime(2016, 1, 8, 5, 0, 0), ) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) class FTests(SimpleTestCase): def test_deepcopy(self): f = F("foo") g = deepcopy(f) self.assertEqual(f.name, g.name) def test_deconstruct(self): f = F('name') path, args, kwargs = f.deconstruct() self.assertEqual(path, 'django.db.models.expressions.F') self.assertEqual(args, (f.name,)) self.assertEqual(kwargs, {}) def test_equal(self): f = F('name') same_f = F('name') other_f = F('username') self.assertEqual(f, same_f) self.assertNotEqual(f, other_f) def test_hash(self): d = {F('name'): 'Bob'} self.assertIn(F('name'), d) self.assertEqual(d[F('name')], 'Bob') def test_not_equal_Value(self): f = F('name') value = Value('name') self.assertNotEqual(f, value) self.assertNotEqual(value, f) class ExpressionsTests(TestCase): def test_F_reuse(self): f = F('id') n = Number.objects.create(integer=-1) c = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith") ) c_qs = Company.objects.filter(id=f) self.assertEqual(c_qs.get(), c) # Reuse the same F-object for another queryset n_qs = Number.objects.filter(id=f) self.assertEqual(n_qs.get(), n) # The original query still works correctly self.assertEqual(c_qs.get(), c) def test_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a pattern lookup with an expression refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%Joh\\n"), Employee(firstname="Johnny", lastname="%John"), Employee(firstname="Jean-Claude", lastname="Claud_"), Employee(firstname="Jean-Claude", lastname="Claude"), Employee(firstname="Jean-Claude", lastname="Claude%"), Employee(firstname="Johnny", lastname="Joh\\n"), Employee(firstname="Johnny", lastname="John"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__contains=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__startswith=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__endswith=F('lastname')), ["<Employee: Jean-Claude Claude>"], ordered=False, ) def test_insensitive_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a case insensitive pattern lookup with an expression -- refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%joh\\n"), Employee(firstname="Johnny", lastname="%john"), Employee(firstname="Jean-Claude", lastname="claud_"), Employee(firstname="Jean-Claude", lastname="claude"), Employee(firstname="Jean-Claude", lastname="claude%"), Employee(firstname="Johnny", lastname="joh\\n"), Employee(firstname="Johnny", lastname="john"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__icontains=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__istartswith=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__iendswith=F('lastname')), ["<Employee: Jean-Claude claude>"], ordered=False, ) @isolate_apps('expressions') class SimpleExpressionTests(SimpleTestCase): def test_equal(self): self.assertEqual(Expression(), Expression()) self.assertEqual( Expression(IntegerField()), Expression(output_field=IntegerField()) ) self.assertEqual(Expression(IntegerField()), mock.ANY) self.assertNotEqual( Expression(IntegerField()), Expression(CharField()) ) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( Expression(TestModel._meta.get_field('field')), Expression(TestModel._meta.get_field('other_field')), ) def test_hash(self): self.assertEqual(hash(Expression()), hash(Expression())) self.assertEqual( hash(Expression(IntegerField())), hash(Expression(output_field=IntegerField())) ) self.assertNotEqual( hash(Expression(IntegerField())), hash(Expression(CharField())), ) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( hash(Expression(TestModel._meta.get_field('field'))), hash(Expression(TestModel._meta.get_field('other_field'))), ) class ExpressionsNumericTests(TestCase): @classmethod def setUpTestData(cls): Number(integer=-1).save() Number(integer=42).save() Number(integer=1337).save() Number.objects.update(float=F('integer')) def test_fill_with_value_from_same_object(self): """ We can fill a value in all objects with an other value of the same object. """ self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 42, 42.000>', '<Number: 1337, 1337.000>'], ordered=False ) def test_increment_value(self): """ We can increment a value of all objects in a query set. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_filter_not_equals_other_field(self): """ We can filter for objects, where a value is not equals the value of an other field. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.exclude(float=F('integer')), ['<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_complex_expressions(self): """ Complex expressions of different connection types are possible. """ n = Number.objects.create(integer=10, float=123.45) self.assertEqual(Number.objects.filter(pk=n.pk).update( float=F('integer') + F('float') * 2), 1) self.assertEqual(Number.objects.get(pk=n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3)) class ExpressionOperatorTests(TestCase): @classmethod def setUpTestData(cls): cls.n = Number.objects.create(integer=42, float=15.5) cls.n1 = Number.objects.create(integer=-42, float=-15.5) def test_lefthand_addition(self): # LH Addition of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F('integer') + 15, float=F('float') + 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_lefthand_subtraction(self): # LH Subtraction of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15, float=F('float') - 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3)) def test_lefthand_multiplication(self): # Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15, float=F('float') * 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_lefthand_division(self): # LH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2, float=F('float') / 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3)) def test_lefthand_modulo(self): # LH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_lefthand_bitwise_and(self): # LH Bitwise ands on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56)) Number.objects.filter(pk=self.n1.pk).update(integer=F('integer').bitand(-56)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_lefthand_bitwise_left_shift_operator(self): Number.objects.update(integer=F('integer').bitleftshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168) def test_lefthand_bitwise_right_shift_operator(self): Number.objects.update(integer=F('integer').bitrightshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11) def test_lefthand_bitwise_or(self): # LH Bitwise or on integers Number.objects.update(integer=F('integer').bitor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_lefthand_power(self): # LH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2, float=F('float') ** 1.5) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2)) def test_right_hand_addition(self): # Right hand operators Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'), float=42.7 + F('float')) # RH Addition of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_right_hand_subtraction(self): Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'), float=42.7 - F('float')) # RH Subtraction of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3)) def test_right_hand_multiplication(self): # RH Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'), float=42.7 * F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_right_hand_division(self): # RH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'), float=42.7 / F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3)) def test_right_hand_modulo(self): # RH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3)) def test_righthand_power(self): # RH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'), float=1.5 ** F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3)) class FTimeDeltaTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) midnight = datetime.time(0) delta0 = datetime.timedelta(0) delta1 = datetime.timedelta(microseconds=253000) delta2 = datetime.timedelta(seconds=44) delta3 = datetime.timedelta(hours=21, minutes=8) delta4 = datetime.timedelta(days=10) delta5 = datetime.timedelta(days=90) # Test data is set so that deltas and delays will be # strictly increasing. cls.deltas = [] cls.delays = [] cls.days_long = [] # e0: started same day as assigned, zero duration end = stime + delta0 e0 = Experiment.objects.create( name='e0', assigned=sday, start=stime, end=end, completed=end.date(), estimated_time=delta0, ) cls.deltas.append(delta0) cls.delays.append(e0.start - datetime.datetime.combine(e0.assigned, midnight)) cls.days_long.append(e0.completed - e0.assigned) # e1: started one day after assigned, tiny duration, data # set so that end time has no fractional seconds, which # tests an edge case on sqlite. delay = datetime.timedelta(1) end = stime + delay + delta1 e1 = Experiment.objects.create( name='e1', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta1, ) cls.deltas.append(delta1) cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight)) cls.days_long.append(e1.completed - e1.assigned) # e2: started three days after assigned, small duration end = stime + delta2 e2 = Experiment.objects.create( name='e2', assigned=sday - datetime.timedelta(3), start=stime, end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1), ) cls.deltas.append(delta2) cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight)) cls.days_long.append(e2.completed - e2.assigned) # e3: started four days after assigned, medium duration delay = datetime.timedelta(4) end = stime + delay + delta3 e3 = Experiment.objects.create( name='e3', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3, ) cls.deltas.append(delta3) cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight)) cls.days_long.append(e3.completed - e3.assigned) # e4: started 10 days after assignment, long duration end = stime + delta4 e4 = Experiment.objects.create( name='e4', assigned=sday - datetime.timedelta(10), start=stime, end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1), ) cls.deltas.append(delta4) cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight)) cls.days_long.append(e4.completed - e4.assigned) # e5: started a month after assignment, very long duration delay = datetime.timedelta(30) end = stime + delay + delta5 e5 = Experiment.objects.create( name='e5', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta5, ) cls.deltas.append(delta5) cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight)) cls.days_long.append(e5.completed - e5.assigned) cls.expnames = [e.name for e in Experiment.objects.all()] def test_multiple_query_compilation(self): # Ticket #21643 queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) q1 = str(queryset.query) q2 = str(queryset.query) self.assertEqual(q1, q2) def test_query_clone(self): # Ticket #21643 - Crash when compiling query more than once qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) qs2 = qs.all() list(qs) list(qs2) # Intentionally no assert def test_delta_add(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lt=delta + F('start'))] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_subtract(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(start__gt=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(start__gte=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_exclude(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.exclude(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[i:]) test_set = [e.name for e in Experiment.objects.exclude(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[i + 1:]) def test_date_comparison(self): for i, days in enumerate(self.days_long): test_set = [e.name for e in Experiment.objects.filter(completed__lt=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(completed__lte=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i + 1]) @skipUnlessDBFeature("supports_mixed_date_datetime_comparisons") def test_mixed_comparisons1(self): for i, delay in enumerate(self.delays): test_set = [e.name for e in Experiment.objects.filter(assigned__gt=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(assigned__gte=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_mixed_comparisons2(self): for i, delay in enumerate(self.delays): delay = datetime.timedelta(delay.days) test_set = [e.name for e in Experiment.objects.filter(start__lt=F('assigned') + delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(start__lte=F('assigned') + delay + datetime.timedelta(1)) ] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_update(self): for delta in self.deltas: exps = Experiment.objects.all() expected_durations = [e.duration() for e in exps] expected_starts = [e.start + delta for e in exps] expected_ends = [e.end + delta for e in exps] Experiment.objects.update(start=F('start') + delta, end=F('end') + delta) exps = Experiment.objects.all() new_starts = [e.start for e in exps] new_ends = [e.end for e in exps] new_durations = [e.duration() for e in exps] self.assertEqual(expected_starts, new_starts) self.assertEqual(expected_ends, new_ends) self.assertEqual(expected_durations, new_durations) def test_invalid_operator(self): with self.assertRaises(DatabaseError): list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0))) def test_durationfield_add(self): zeros = [e.name for e in Experiment.objects.filter(start=F('start') + F('estimated_time'))] self.assertEqual(zeros, ['e0']) end_less = [e.name for e in Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))] self.assertEqual(end_less, ['e2']) delta_math = [ e.name for e in Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1)) ] self.assertEqual(delta_math, ['e4']) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') + Value(None, output_field=DurationField()), output_field=DateTimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subtraction(self): queryset = Experiment.objects.annotate( completion_duration=ExpressionWrapper( F('completed') - F('assigned'), output_field=DurationField() ) ) at_least_5_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=5))} self.assertEqual(at_least_5_days, {'e3', 'e4', 'e5'}) at_least_120_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=120))} self.assertEqual(at_least_120_days, {'e5'}) less_than_5_days = {e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))} self.assertEqual(less_than_5_days, {'e0', 'e1', 'e2'}) queryset = Experiment.objects.annotate(difference=ExpressionWrapper( F('completed') - Value(None, output_field=DateField()), output_field=DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('completed') - Value(None, output_field=DurationField()), output_field=DateField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('completed') queryset = Experiment.objects.annotate( difference=ExpressionWrapper( subquery - F('completed'), output_field=DurationField(), ), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) queryset = Time.objects.annotate( difference=ExpressionWrapper( F('time') - Value(datetime.time(11, 15, 0), output_field=TimeField()), output_field=DurationField(), ) ) self.assertEqual( queryset.get().difference, datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345) ) queryset = Time.objects.annotate(difference=ExpressionWrapper( F('time') - Value(None, output_field=TimeField()), output_field=DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Time.objects.annotate(shifted=ExpressionWrapper( F('time') - Value(None, output_field=DurationField()), output_field=TimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subquery_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) subquery = Time.objects.filter(pk=OuterRef('pk')).values('time') queryset = Time.objects.annotate( difference=ExpressionWrapper( subquery - F('time'), output_field=DurationField(), ), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction(self): under_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__gt=F('end') - F('start')) ] self.assertEqual(under_estimate, ['e2']) over_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__lt=F('end') - F('start')) ] self.assertEqual(over_estimate, ['e4']) queryset = Experiment.objects.annotate(difference=ExpressionWrapper( F('start') - Value(None, output_field=DateTimeField()), output_field=DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') - Value(None, output_field=DurationField()), output_field=DateTimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('start') queryset = Experiment.objects.annotate( difference=ExpressionWrapper( subquery - F('start'), output_field=DurationField(), ), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) Experiment.objects.update(end=F('start') + delta) qs = Experiment.objects.annotate( delta=ExpressionWrapper(F('end') - F('start'), output_field=DurationField()) ) for e in qs: self.assertEqual(e.delta, delta) def test_duration_with_datetime(self): # Exclude e1 which has very high precision so we can test this on all # backends regardless of whether or not it supports # microsecond_precision. over_estimate = Experiment.objects.exclude(name='e1').filter( completed__gt=self.stime + F('estimated_time'), ).order_by('name') self.assertQuerysetEqual(over_estimate, ['e3', 'e4', 'e5'], lambda e: e.name) def test_duration_with_datetime_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) qs = Experiment.objects.annotate(dt=ExpressionWrapper( F('start') + delta, output_field=DateTimeField(), )) for e in qs: self.assertEqual(e.dt, e.start + delta) def test_date_minus_duration(self): more_than_4_days = Experiment.objects.filter( assigned__lt=F('completed') - Value(datetime.timedelta(days=4), output_field=DurationField()) ) self.assertQuerysetEqual(more_than_4_days, ['e3', 'e4', 'e5'], lambda e: e.name) def test_negative_timedelta_update(self): # subtract 30 seconds, 30 minutes, 2 hours and 2 days experiments = Experiment.objects.filter(name='e0').annotate( start_sub_seconds=F('start') + datetime.timedelta(seconds=-30), ).annotate( start_sub_minutes=F('start_sub_seconds') + datetime.timedelta(minutes=-30), ).annotate( start_sub_hours=F('start_sub_minutes') + datetime.timedelta(hours=-2), ).annotate( new_start=F('start_sub_hours') + datetime.timedelta(days=-2), ) expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0) # subtract 30 microseconds experiments = experiments.annotate(new_start=F('new_start') + datetime.timedelta(microseconds=-30)) expected_start += datetime.timedelta(microseconds=+746970) experiments.update(start=F('new_start')) e0 = Experiment.objects.get(name='e0') self.assertEqual(e0.start, expected_start) class ValueTests(TestCase): def test_update_TimeField_using_Value(self): Time.objects.create() Time.objects.update(time=Value(datetime.time(1), output_field=TimeField())) self.assertEqual(Time.objects.get().time, datetime.time(1)) def test_update_UUIDField_using_Value(self): UUID.objects.create() UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField())) self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012')) def test_deconstruct(self): value = Value('name') path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(kwargs, {}) def test_deconstruct_output_field(self): value = Value('name', output_field=CharField()) path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(len(kwargs), 1) self.assertEqual(kwargs['output_field'].deconstruct(), CharField().deconstruct()) def test_equal(self): value = Value('name') self.assertEqual(value, Value('name')) self.assertNotEqual(value, Value('username')) def test_hash(self): d = {Value('name'): 'Bob'} self.assertIn(Value('name'), d) self.assertEqual(d[Value('name')], 'Bob') def test_equal_output_field(self): value = Value('name', output_field=CharField()) same_value = Value('name', output_field=CharField()) other_value = Value('name', output_field=TimeField()) no_output_field = Value('name') self.assertEqual(value, same_value) self.assertNotEqual(value, other_value) self.assertNotEqual(value, no_output_field) def test_raise_empty_expressionlist(self): msg = 'ExpressionList requires at least one expression' with self.assertRaisesMessage(ValueError, msg): ExpressionList() class FieldTransformTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) cls.ex1 = Experiment.objects.create( name='Experiment 1', assigned=sday, completed=sday + datetime.timedelta(2), estimated_time=datetime.timedelta(2), start=stime, end=stime + datetime.timedelta(2), ) def test_month_aggregation(self): self.assertEqual( Experiment.objects.aggregate(month_count=Count('assigned__month')), {'month_count': 1} ) def test_transform_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('assigned__month'), ["{'assigned__month': 6}"] ) def test_multiple_transforms_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('end__date__month'), ["{'end__date__month': 6}"] ) class ReprTests(SimpleTestCase): def test_expressions(self): self.assertEqual( repr(Case(When(a=1))), "<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>" ) self.assertEqual( repr(When(Q(age__gte=18), then=Value('legal'))), "<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value(legal)>" ) self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)") self.assertEqual(repr(F('published')), "F(published)") self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>") self.assertEqual( repr(ExpressionWrapper(F('cost') + F('tax'), IntegerField())), "ExpressionWrapper(F(cost) + F(tax))" ) self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)") self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)') self.assertEqual(repr(Random()), "Random()") self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])") self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))") self.assertEqual(repr(Value(1)), "Value(1)") self.assertEqual( repr(ExpressionList(F('col'), F('anothercol'))), 'ExpressionList(F(col), F(anothercol))' ) self.assertEqual( repr(ExpressionList(OrderBy(F('col'), descending=False))), 'ExpressionList(OrderBy(F(col), descending=False))' ) def test_functions(self): self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))") self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))") self.assertEqual(repr(Length('a')), "Length(F(a))") self.assertEqual(repr(Lower('a')), "Lower(F(a))") self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))") self.assertEqual(repr(Upper('a')), "Upper(F(a))") def test_aggregates(self): self.assertEqual(repr(Avg('a')), "Avg(F(a))") self.assertEqual(repr(Count('a')), "Count(F(a))") self.assertEqual(repr(Count('*')), "Count('*')") self.assertEqual(repr(Max('a')), "Max(F(a))") self.assertEqual(repr(Min('a')), "Min(F(a))") self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)") self.assertEqual(repr(Sum('a')), "Sum(F(a))") self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)") def test_distinct_aggregates(self): self.assertEqual(repr(Count('a', distinct=True)), "Count(F(a), distinct=True)") self.assertEqual(repr(Count('*', distinct=True)), "Count('*', distinct=True)") def test_filtered_aggregates(self): filter = Q(a=1) self.assertEqual(repr(Avg('a', filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Count('a', filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Max('a', filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Min('a', filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(StdDev('a', filter=filter)), "StdDev(F(a), filter=(AND: ('a', 1)), sample=False)") self.assertEqual(repr(Sum('a', filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))") self.assertEqual( repr(Variance('a', sample=True, filter=filter)), "Variance(F(a), filter=(AND: ('a', 1)), sample=True)" ) self.assertEqual( repr(Count('a', filter=filter, distinct=True)), "Count(F(a), distinct=True, filter=(AND: ('a', 1)))" ) class CombinableTests(SimpleTestCase): bitwise_msg = 'Use .bitand() and .bitor() for bitwise logical operations.' def test_negation(self): c = Combinable() self.assertEqual(-c, c * -1) def test_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() & Combinable() def test_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() | Combinable() def test_reversed_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() & Combinable() def test_reversed_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() | Combinable()
7df1fdf7f7baff1ce6f829d538f25b603f513dcc676a8378cfc6de0374c34018
import datetime from operator import attrgetter from django.core.exceptions import FieldError from django.db import models from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import isolate_apps from django.utils import translation from .models import ( Article, ArticleIdea, ArticleTag, ArticleTranslation, Country, Friendship, Group, Membership, NewsArticle, Person, ) # Note that these tests are testing internal implementation details. # ForeignObject is not part of public API. class MultiColumnFKTests(TestCase): @classmethod def setUpTestData(cls): # Creating countries cls.usa = Country.objects.create(name="United States of America") cls.soviet_union = Country.objects.create(name="Soviet Union") # Creating People cls.bob = Person.objects.create(name='Bob', person_country=cls.usa) cls.jim = Person.objects.create(name='Jim', person_country=cls.usa) cls.george = Person.objects.create(name='George', person_country=cls.usa) cls.jane = Person.objects.create(name='Jane', person_country=cls.soviet_union) cls.mark = Person.objects.create(name='Mark', person_country=cls.soviet_union) cls.sam = Person.objects.create(name='Sam', person_country=cls.soviet_union) # Creating Groups cls.kgb = Group.objects.create(name='KGB', group_country=cls.soviet_union) cls.cia = Group.objects.create(name='CIA', group_country=cls.usa) cls.republican = Group.objects.create(name='Republican', group_country=cls.usa) cls.democrat = Group.objects.create(name='Democrat', group_country=cls.usa) def test_get_succeeds_on_multicolumn_match(self): # Membership objects have access to their related Person if both # country_ids match between them membership = Membership.objects.create( membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id) person = membership.person self.assertEqual((person.id, person.name), (self.bob.id, "Bob")) def test_get_fails_on_multicolumn_mismatch(self): # Membership objects returns DoesNotExist error when the there is no # Person with the same id and country_id membership = Membership.objects.create( membership_country_id=self.usa.id, person_id=self.jane.id, group_id=self.cia.id) with self.assertRaises(Person.DoesNotExist): getattr(membership, 'person') def test_reverse_query_returns_correct_result(self): # Creating a valid membership because it has the same country has the person Membership.objects.create( membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id) # Creating an invalid membership because it has a different country has the person Membership.objects.create( membership_country_id=self.soviet_union.id, person_id=self.bob.id, group_id=self.republican.id) with self.assertNumQueries(1): membership = self.bob.membership_set.get() self.assertEqual(membership.group_id, self.cia.id) self.assertIs(membership.person, self.bob) def test_query_filters_correctly(self): # Creating a to valid memberships Membership.objects.create( membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id) Membership.objects.create( membership_country_id=self.usa.id, person_id=self.jim.id, group_id=self.cia.id) # Creating an invalid membership Membership.objects.create(membership_country_id=self.soviet_union.id, person_id=self.george.id, group_id=self.cia.id) self.assertQuerysetEqual( Membership.objects.filter(person__name__contains='o'), [ self.bob.id ], attrgetter("person_id") ) def test_reverse_query_filters_correctly(self): timemark = datetime.datetime.utcnow() timedelta = datetime.timedelta(days=1) # Creating a to valid memberships Membership.objects.create( membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id, date_joined=timemark - timedelta) Membership.objects.create( membership_country_id=self.usa.id, person_id=self.jim.id, group_id=self.cia.id, date_joined=timemark + timedelta) # Creating an invalid membership Membership.objects.create( membership_country_id=self.soviet_union.id, person_id=self.george.id, group_id=self.cia.id, date_joined=timemark + timedelta) self.assertQuerysetEqual( Person.objects.filter(membership__date_joined__gte=timemark), [ 'Jim' ], attrgetter('name') ) def test_forward_in_lookup_filters_correctly(self): Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id) Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id, group_id=self.cia.id) # Creating an invalid membership Membership.objects.create( membership_country_id=self.soviet_union.id, person_id=self.george.id, group_id=self.cia.id) self.assertQuerysetEqual( Membership.objects.filter(person__in=[self.george, self.jim]), [ self.jim.id, ], attrgetter('person_id') ) self.assertQuerysetEqual( Membership.objects.filter(person__in=Person.objects.filter(name='Jim')), [ self.jim.id, ], attrgetter('person_id') ) def test_double_nested_query(self): m1 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id) m2 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id, group_id=self.cia.id) Friendship.objects.create(from_friend_country_id=self.usa.id, from_friend_id=self.bob.id, to_friend_country_id=self.usa.id, to_friend_id=self.jim.id) self.assertSequenceEqual( Membership.objects.filter( person__in=Person.objects.filter( from_friend__in=Friendship.objects.filter(to_friend__in=Person.objects.all()) ) ), [m1] ) self.assertSequenceEqual( Membership.objects.exclude( person__in=Person.objects.filter( from_friend__in=Friendship.objects.filter(to_friend__in=Person.objects.all()) ) ), [m2] ) def test_select_related_foreignkey_forward_works(self): Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia) Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat) with self.assertNumQueries(1): people = [m.person for m in Membership.objects.select_related('person').order_by('pk')] normal_people = [m.person for m in Membership.objects.all().order_by('pk')] self.assertEqual(people, normal_people) def test_prefetch_foreignkey_forward_works(self): Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia) Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat) with self.assertNumQueries(2): people = [ m.person for m in Membership.objects.prefetch_related('person').order_by('pk')] normal_people = [m.person for m in Membership.objects.order_by('pk')] self.assertEqual(people, normal_people) def test_prefetch_foreignkey_reverse_works(self): Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia) Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat) with self.assertNumQueries(2): membership_sets = [ list(p.membership_set.all()) for p in Person.objects.prefetch_related('membership_set').order_by('pk')] with self.assertNumQueries(7): normal_membership_sets = [ list(p.membership_set.all()) for p in Person.objects.order_by('pk') ] self.assertEqual(membership_sets, normal_membership_sets) def test_m2m_through_forward_returns_valid_members(self): # We start out by making sure that the Group 'CIA' has no members. self.assertQuerysetEqual( self.cia.members.all(), [] ) Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia) Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.cia) # Let's check to make sure that it worked. Bob and Jim should be members of the CIA. self.assertQuerysetEqual( self.cia.members.all(), [ 'Bob', 'Jim' ], attrgetter("name") ) def test_m2m_through_reverse_returns_valid_members(self): # We start out by making sure that Bob is in no groups. self.assertQuerysetEqual( self.bob.groups.all(), [] ) Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia) Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.republican) # Bob should be in the CIA and a Republican self.assertQuerysetEqual( self.bob.groups.all(), [ 'CIA', 'Republican' ], attrgetter("name") ) def test_m2m_through_forward_ignores_invalid_members(self): # We start out by making sure that the Group 'CIA' has no members. self.assertQuerysetEqual( self.cia.members.all(), [] ) # Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia) # There should still be no members in CIA self.assertQuerysetEqual( self.cia.members.all(), [] ) def test_m2m_through_reverse_ignores_invalid_members(self): # We start out by making sure that Jane has no groups. self.assertQuerysetEqual( self.jane.groups.all(), [] ) # Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia) # Jane should still not be in any groups self.assertQuerysetEqual( self.jane.groups.all(), [] ) def test_m2m_through_on_self_works(self): self.assertQuerysetEqual( self.jane.friends.all(), [] ) Friendship.objects.create( from_friend_country=self.jane.person_country, from_friend=self.jane, to_friend_country=self.george.person_country, to_friend=self.george) self.assertQuerysetEqual( self.jane.friends.all(), ['George'], attrgetter("name") ) def test_m2m_through_on_self_ignores_mismatch_columns(self): self.assertQuerysetEqual(self.jane.friends.all(), []) # Note that we use ids instead of instances. This is because instances on ForeignObject # properties will set all related field off of the given instance Friendship.objects.create( from_friend_id=self.jane.id, to_friend_id=self.george.id, to_friend_country_id=self.jane.person_country_id, from_friend_country_id=self.george.person_country_id) self.assertQuerysetEqual(self.jane.friends.all(), []) def test_prefetch_related_m2m_forward_works(self): Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia) Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat) with self.assertNumQueries(2): members_lists = [list(g.members.all()) for g in Group.objects.prefetch_related('members')] normal_members_lists = [list(g.members.all()) for g in Group.objects.all()] self.assertEqual(members_lists, normal_members_lists) def test_prefetch_related_m2m_reverse_works(self): Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia) Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat) with self.assertNumQueries(2): groups_lists = [list(p.groups.all()) for p in Person.objects.prefetch_related('groups')] normal_groups_lists = [list(p.groups.all()) for p in Person.objects.all()] self.assertEqual(groups_lists, normal_groups_lists) @translation.override('fi') def test_translations(self): a1 = Article.objects.create(pub_date=datetime.date.today()) at1_fi = ArticleTranslation(article=a1, lang='fi', title='Otsikko', body='Diipadaapa') at1_fi.save() at2_en = ArticleTranslation(article=a1, lang='en', title='Title', body='Lalalalala') at2_en.save() self.assertEqual(Article.objects.get(pk=a1.pk).active_translation, at1_fi) with self.assertNumQueries(1): fetched = Article.objects.select_related('active_translation').get( active_translation__title='Otsikko') self.assertEqual(fetched.active_translation.title, 'Otsikko') a2 = Article.objects.create(pub_date=datetime.date.today()) at2_fi = ArticleTranslation(article=a2, lang='fi', title='Atsikko', body='Diipadaapa', abstract='dipad') at2_fi.save() a3 = Article.objects.create(pub_date=datetime.date.today()) at3_en = ArticleTranslation(article=a3, lang='en', title='A title', body='lalalalala', abstract='lala') at3_en.save() # Test model initialization with active_translation field. a3 = Article(id=a3.id, pub_date=a3.pub_date, active_translation=at3_en) a3.save() self.assertEqual( list(Article.objects.filter(active_translation__abstract=None)), [a1, a3]) self.assertEqual( list(Article.objects.filter(active_translation__abstract=None, active_translation__pk__isnull=False)), [a1]) with translation.override('en'): self.assertEqual( list(Article.objects.filter(active_translation__abstract=None)), [a1, a2]) def test_foreign_key_raises_informative_does_not_exist(self): referrer = ArticleTranslation() with self.assertRaisesMessage(Article.DoesNotExist, 'ArticleTranslation has no article'): referrer.article def test_foreign_key_related_query_name(self): a1 = Article.objects.create(pub_date=datetime.date.today()) ArticleTag.objects.create(article=a1, name="foo") self.assertEqual(Article.objects.filter(tag__name="foo").count(), 1) self.assertEqual(Article.objects.filter(tag__name="bar").count(), 0) msg = ( "Cannot resolve keyword 'tags' into field. Choices are: " "active_translation, active_translation_q, articletranslation, " "id, idea_things, newsarticle, pub_date, tag" ) with self.assertRaisesMessage(FieldError, msg): Article.objects.filter(tags__name="foo") def test_many_to_many_related_query_name(self): a1 = Article.objects.create(pub_date=datetime.date.today()) i1 = ArticleIdea.objects.create(name="idea1") a1.ideas.add(i1) self.assertEqual(Article.objects.filter(idea_things__name="idea1").count(), 1) self.assertEqual(Article.objects.filter(idea_things__name="idea2").count(), 0) msg = ( "Cannot resolve keyword 'ideas' into field. Choices are: " "active_translation, active_translation_q, articletranslation, " "id, idea_things, newsarticle, pub_date, tag" ) with self.assertRaisesMessage(FieldError, msg): Article.objects.filter(ideas__name="idea1") @translation.override('fi') def test_inheritance(self): na = NewsArticle.objects.create(pub_date=datetime.date.today()) ArticleTranslation.objects.create( article=na, lang="fi", title="foo", body="bar") self.assertSequenceEqual( NewsArticle.objects.select_related('active_translation'), [na] ) with self.assertNumQueries(1): self.assertEqual( NewsArticle.objects.select_related( 'active_translation')[0].active_translation.title, "foo") @skipUnlessDBFeature('has_bulk_insert') def test_batch_create_foreign_object(self): objs = [Person(name="abcd_%s" % i, person_country=self.usa) for i in range(0, 5)] Person.objects.bulk_create(objs, 10) def test_isnull_lookup(self): Membership.objects.create(membership_country=self.usa, person=self.bob, group_id=None) Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia) self.assertQuerysetEqual( Membership.objects.filter(group__isnull=True), ['<Membership: Bob is a member of NULL>'] ) self.assertQuerysetEqual( Membership.objects.filter(group__isnull=False), ['<Membership: Bob is a member of CIA>'] ) class TestModelCheckTests(SimpleTestCase): @isolate_apps('foreign_object') def test_check_composite_foreign_object(self): class Parent(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() class Meta: unique_together = (('a', 'b'),) class Child(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() value = models.CharField(max_length=255) parent = models.ForeignObject( Parent, on_delete=models.SET_NULL, from_fields=('a', 'b'), to_fields=('a', 'b'), related_name='children', ) self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), []) @isolate_apps('foreign_object') def test_check_subset_composite_foreign_object(self): class Parent(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() c = models.PositiveIntegerField() class Meta: unique_together = (('a', 'b'),) class Child(models.Model): a = models.PositiveIntegerField() b = models.PositiveIntegerField() c = models.PositiveIntegerField() d = models.CharField(max_length=255) parent = models.ForeignObject( Parent, on_delete=models.SET_NULL, from_fields=('a', 'b', 'c'), to_fields=('a', 'b', 'c'), related_name='children', ) self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), []) class TestExtraJoinFilterQ(TestCase): @translation.override('fi') def test_extra_join_filter_q(self): a = Article.objects.create(pub_date=datetime.datetime.today()) ArticleTranslation.objects.create(article=a, lang='fi', title='title', body='body') qs = Article.objects.all() with self.assertNumQueries(2): self.assertEqual(qs[0].active_translation_q.title, 'title') qs = qs.select_related('active_translation_q') with self.assertNumQueries(1): self.assertEqual(qs[0].active_translation_q.title, 'title')
81c87ce1c55855d33153f712bb84e1da8ffee54134a305df975ddd298079aba5
from django.db import models from django.utils import timezone class Article(models.Model): title = models.CharField(max_length=100) pub_date = models.DateField() pub_datetime = models.DateTimeField(default=timezone.now) categories = models.ManyToManyField("Category", related_name="articles") def __str__(self): return self.title class Comment(models.Model): article = models.ForeignKey(Article, models.CASCADE, related_name="comments") text = models.TextField() pub_date = models.DateField() approval_date = models.DateField(null=True) def __str__(self): return 'Comment to %s (%s)' % (self.article.title, self.pub_date) class Category(models.Model): name = models.CharField(max_length=255)